id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
9673383
|
<reponame>tugot17/Pytorch-Lightning-Templates-
import albumentations as A
import pytorch_lightning as pl
from albumentations.pytorch import ToTensorV2
from torch.utils.data import DataLoader
from .dataset import ImageClassificationDataset
class ImageClassificationDatamodule(pl.LightningDataModule):
def __init__(self, batch_size, train_transform, val_transform):
super().__init__()
self.batch_size = batch_size
self.train_transform = train_transform
self.val_transform = val_transform
def setup(self, stage=None):
self.train_set = ImageClassificationDataset(
file_paths=[
"./images/image_1.jpg",
"./images/image_2.jpg",
"./images/image_3.jpg",
],
labels=[1, 2, 3],
transform=self.train_transform,
)
self.val_set = ImageClassificationDataset(
file_paths=[
"./images/image_1.jpg",
"./images/image_2.jpg",
"./images/image_3.jpg",
],
labels=[1, 2, 3],
transform=self.val_transform,
)
def train_dataloader(self):
return DataLoader(
self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=4
)
def val_dataloader(self):
return DataLoader(
self.val_set, batch_size=self.batch_size, shuffle=False, num_workers=4
)
if __name__ == "__main__":
val_transform = A.Compose(
[
A.Resize(224, 224),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensorV2(),
]
)
train_transform = A.Compose(
[
A.Resize(400, 400),
A.RandomCrop(224, 224),
A.HorizontalFlip(),
A.RandomRotate90(),
A.ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=0.75
),
A.Blur(blur_limit=3),
A.OpticalDistortion(),
A.GridDistortion(),
A.HueSaturationValue(),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
dm = ImageClassificationDatamodule(16, train_transform, val_transform)
|
StarcoderdataPython
|
6491097
|
<reponame>arsho/Hackerrank_30_Days_of_Code_Solutions
'''
Title : Day 2: Operators
Domain : Tutorials
Author : <NAME>
Created : 03 April 2019
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(meal_cost, tip_percent, tax_percent):
tip = meal_cost * (tip_percent / 100)
tax = meal_cost * (tax_percent / 100)
return round(meal_cost + tip + tax)
if __name__ == '__main__':
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
total_cost = solve(meal_cost, tip_percent, tax_percent)
print(total_cost)
|
StarcoderdataPython
|
6659803
|
<filename>number_theory/primality_test/naive.py<gh_stars>0
from math import sqrt
from parameterized import parameterized
import unittest
class Test(unittest.TestCase):
@parameterized.expand([
(11, True),
(15, False),
(1, False),
(5, True),
(4, False),
(49, False)
])
def test(self, target, is_prime_expected):
self.assertEqual(is_prime(target), is_prime_expected)
def is_prime(target):
if target < 2:
return False
if not target%2 or not target%3:
return False
for i in range(5, int(sqrt(target)) + 1, 6):
if not target%i or not target%(i+2):
return False
return True
|
StarcoderdataPython
|
3550063
|
import sys, json;
with open(sys.argv[1]) as f:
data = json.load(f)
for x in range(2,len(sys.argv)):
data = data[sys.argv[x]]
print data
|
StarcoderdataPython
|
109964
|
from django.apps import apps
from django.core.management.base import BaseCommand
from rayures.events import dispatch
class Command(BaseCommand):
help = 'Sync stripe events'
def handle(self, *args, **options):
# TODO: option to select only the one that have only failed or never processed
cls = apps.get_model('rayures', 'Event')
qs = cls.objects.all()
# qs = qs.filter(type__startswith='coupon.')
for event in qs.order_by('created_at'):
print(event, event.created_at, event.type)
dispatch(event)
|
StarcoderdataPython
|
82531
|
<reponame>monk-after-90s/diyblog
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.urls import reverse
from django.views.generic import DetailView, ListView, CreateView
from blog.forms import BlogUserModelForm
from blog.models import BlogUser, Blog, Comment
def index(request):
return render(request, 'blog/index.html')
class BlogUserDetailView(ListView):
model = Blog
template_name = 'blog/bloguser_detail.html'
def get_queryset(self):
return Blog.objects.filter(author_id=int(self.kwargs['pk']))
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['bloguser'] = BlogUser.objects.get(pk=int(self.kwargs['pk']))
return context
def register_view(request):
if request.method == 'GET':
form = BlogUserModelForm()
else:
form = BlogUserModelForm(request.POST)
if form.is_valid():
new_blog_user = BlogUser.objects.create_user(
**{key: request.POST[key] for key in BlogUserModelForm.Meta.fields})
new_blog_user.save()
return HttpResponseRedirect(reverse('bloguser-detail', args=[str(new_blog_user.pk)]))
context = {
'form': form
}
return render(request, 'blog/bloguser_form.html', context=context)
class BlogListView(ListView):
model = Blog
paginate_by = 5
class BlogUserListView(ListView):
model = BlogUser
template_name = 'blog/blogger_list.html'
def get_queryset(self):
return BlogUser.objects.filter(blog__id__gt=0)
class BlogDetailView(DetailView):
model = Blog
class CommentDetailView(DetailView):
model = Comment
# @login_required
# def comment_create_view(request, blog_pk: int):
# blog = Blog.objects.get(pk=blog_pk)
# if request.method == 'GET':
# blog = Blog.objects.get(pk=blog_pk)
# form = CommentModelForm()
# context = {
# 'form': form,
# 'blog': blog
# }
# return render(request, 'blog/comment_form.html', context=context)
# else:
# form = CommentModelForm(request.POST)
# if form.is_valid():
# new_comment = Comment.objects.create(
# content=form.cleaned_data['content'],
# blog=blog,
# bloguser=BlogUser.objects.get(pk=(request.user.pk))
# )
# new_comment.save()
# return HttpResponseRedirect(reverse('blog', args=(blog_pk,)))
class CommentCreateView(LoginRequiredMixin, CreateView):
fields = ['content']
model = Comment
def get_context_data(self, **kwargs):
context = super(CommentCreateView, self).get_context_data(**kwargs)
context['blog'] = Blog.objects.get(pk=int(self.kwargs['blog_pk']))
return context
def form_valid(self, form):
form.instance.bloguser = BlogUser.objects.get(pk=int(self.request.user.pk))
form.instance.blog = get_object_or_404(Blog, pk=int(self.kwargs['blog_pk']))
return super(CommentCreateView, self).form_valid(form)
def get_success_url(self):
return reverse('blog', args=(str(self.kwargs['blog_pk']),))
|
StarcoderdataPython
|
6571999
|
<filename>python/fitfitparse.py
#!/usr/bin/env python3
import fitparse
import sys
import time
import os
start = time.time()
fitfile = fitparse.FitFile( sys.argv[1] )
records = []
laps = []
for record in fitfile.get_messages( 'record' ):
records.append( record )
for lap in fitfile.get_messages( 'lap' ):
laps.append( lap )
print( 'record: {} messages'.format( len( records) ) )
print( 'laps: {} messages'.format( len( laps) ) )
end = time.time()
print( "| python | {} | {} | {:.3f} seconds | [python-fitparse](https://github.com/dtcooper/python-fitparse) |".format( os.path.basename(sys.argv[0]), sys.argv[1], end-start ) )
|
StarcoderdataPython
|
3279543
|
from sklearn.model_selection import train_test_split;
from sklearn.datasets import make_moons
from supervised_learning.trees.random_forests import RandomForestClassifier
from misc.plot_functions import plot_decision
def main():
#We sample 100 points from the make_moons function in order to classify them.
X, y = make_moons(n_samples=100, noise=0.25, random_state=42);
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
model = RandomForestClassifier()
model.fit(X_train, y_train)
print("Test score: ", model.score(X_test, y_test))
plot_decision(model, X, y, "Random forest classifier", "X0", "X1")
if __name__ == "__main__":
main();
|
StarcoderdataPython
|
5188548
|
import logging
from typing import Optional
import docker # type: ignore[import]
from docker.client import DockerClient # type: ignore[import]
from docker.models.networks import Network # type: ignore[import]
from docker.errors import APIError # type: ignore[import]
import psutil # type: ignore[import]
from .errors import AlreadyStoppedError, ContainerRuntimeError
from .commons import DOCKER_LABEL_KEY, get_docker_label_value
from .implementations import (
FHIRImplementation,
HAPIFHIRImplementation,
MicrosoftFHIRImplemention,
)
from .models import Configuration, FHIRFlavor, RunningFHIR
LOGGER = logging.getLogger(__name__)
def _kill_orphaned_containers(docker_client: DockerClient):
containers = docker_client.containers.list(
filters={
"label": DOCKER_LABEL_KEY,
},
)
for container in containers:
parent_pid = int(container.labels[DOCKER_LABEL_KEY])
if not psutil.pid_exists(parent_pid):
LOGGER.info(
f"Found orphaned container {container.id}, \
created by pid {parent_pid}, killing..."
)
container.kill()
def _kill_orphaned_networks(docker_client: DockerClient):
networks = docker_client.networks.list(
filters={
"label": DOCKER_LABEL_KEY,
},
)
for network in networks:
parent_pid = int(network.attrs["Labels"][DOCKER_LABEL_KEY])
if not psutil.pid_exists(parent_pid):
LOGGER.info(
f"Found orphaned network {network.id}, \
created by pid {parent_pid}, killing..."
)
network.remove()
def _create_implementation(flavor: FHIRFlavor) -> FHIRImplementation:
if flavor == FHIRFlavor.HAPI:
return HAPIFHIRImplementation()
elif flavor == FHIRFlavor.MICROSOFT:
return MicrosoftFHIRImplemention()
else:
raise NotImplementedError()
class FHIRRunner(object):
"""A class responsible for running a selected FHIR implementation.
Can be used in one of two ways:
* Directly, using the ``running_fhir`` property and the ``stop`` method.
* As a context manager: ``with FHIRRunner(configuration) as running_fhir:``
:param flavor: Selected FHIR implementation.
:type flavor: FHIRFlavor
:param host_ip: Host IP used to expose the service externally
, defaults to None
:type host_ip: str, optional
:param kill_orphans: Whether to destroy orphaned Docker objects
from previous runs, defaults to True
:type kill_orphans: bool, optional
:param network_id: A Docker network id to attach to, defaults to None
:type network_id: Optional[str], optional
:param startup_timeout: Number of seconds to wait for server startup,
defaults to 120
:type startup_timeout: float, optional
:param docker_client: A Docker client, will be created
using ``docker.from_env()`` if not set, defaults to None
:type docker_client: Optional[DockerClient], optional
:ivar running_fhir: Descriptor of the running FHIR server.
:vartype running_fhir: RunningFHIR
:raises NotImplementedError: Selected implementation is not supported.
:raises StartupTimeoutError: An error caused by exceeding the time limit.
:raises ContainerRuntimeError: An error related to container runtime.
"""
running_fhir: RunningFHIR
_implementation: FHIRImplementation
_configuration: Configuration
_network: Network
_stopped: bool = False
def __init__(
self,
flavor: FHIRFlavor,
host_ip: Optional[str] = None,
kill_orphans: bool = True,
network_id: Optional[str] = None,
startup_timeout: float = 120,
docker_client: Optional[DockerClient] = None,
) -> None:
"""A constructor of ``RunningFHIR``."""
self._configuration = Configuration(
host_ip=host_ip,
kill_orphans=kill_orphans,
network_id=network_id,
startup_timeout=startup_timeout,
docker_client=docker_client,
)
self._implementation = _create_implementation(flavor)
self.running_fhir = self._start()
def _start(self) -> RunningFHIR:
try:
configuration = self._configuration
if configuration.docker_client:
docker_client = configuration.docker_client
else:
docker_client = docker.from_env()
if configuration.kill_orphans:
_kill_orphaned_containers(docker_client)
_kill_orphaned_networks(docker_client)
new_network_created = configuration.network_id is None
if new_network_created:
network = docker_client.networks.create(
name="pyembeddedfhir",
driver="bridge",
labels={DOCKER_LABEL_KEY: get_docker_label_value()},
)
else:
network = docker_client.networks.get(configuration.network_id)
self._network = network
try:
return self._implementation.start(
docker_client,
configuration,
network,
)
except: # noqa: E722 (intentionally using bare except)
if new_network_created:
network.remove()
raise
except APIError as e:
raise ContainerRuntimeError(e)
def _stop(self) -> None:
try:
if self._stopped:
raise AlreadyStoppedError(
"Tried stopping FHIR, but it was already stopped."
)
self._implementation.stop()
self._network.remove()
self._stopped = True
except APIError as e:
raise ContainerRuntimeError(e)
def stop(self) -> None:
"""Stop the FHIR server and perform cleanup.
:raises ContainerRuntimeError: An error related to container runtime.
:raises AlreadyStoppedError: If the runner was already stopped.
"""
self._stop()
def __enter__(self) -> RunningFHIR:
return self.running_fhir
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self._stop()
|
StarcoderdataPython
|
3464044
|
<reponame>rsgit95/med_kg_txt_multimodal
from rdflib import Graph, URIRef
from tqdm import tqdm
import pickle
# kg 로드 1분걸려요
kg = Graph()
kg.parse('mimic_sparqlstar_kg.xml', format='xml', publicID='/')
def build_dict(triples, nodes, edges):
h, r, t = triples
#for (h,r,t) in triples:
if h not in nodes:
nodes[h]=1
else:
nodes[h]+=1
if t not in nodes:
nodes[t]=1
else:
nodes[t]+=1
if r not in edges:
edges[r]=1
else:
edges[r]+=1
return nodes, edges
# triple 확인
nodes = dict()
edges = dict()
for triple in tqdm(kg):
triples = [x.n3() for x in triple]
#print(triples)
nodes, edges = build_dict(triples, nodes, edges)
#matching = [s for s in tqdm(list(nodes.keys())) if "hadm_id" in list(nodes.keys())]
print(len(nodes))
print(len(edges))
f = open('node_dict','w')
g = open('edge_dict','w')
for node in list(nodes.keys()):
f.write('{}\n'.format(node))
for edge in list(edges.keys()):
g.write('{}\n'.format(edge))
|
StarcoderdataPython
|
214186
|
import pydsm
import pydsm.similarity
from scipy.stats import spearmanr
from pkg_resources import resource_stream
import pickle
import os
def synonym_test(matrix, synonym_test, sim_func=pydsm.similarity.cos):
"""
Evaluate DSM using a synonym test.
:param matrix: A DSM matrix.
:param synonym_test: A dictionary where the key is the word in focus,
and the value is a list of possible word choices.
The first word in the dict is the correct choice.
:param sim_func: The similarity function to use for evaluation.
:return: Accuracy of synonym test.
"""
correct = []
incorrect = []
unknown_focus_words = []
unknown_synonyms = []
for focus_word, candidates in synonym_test.items():
if focus_word not in matrix.word2row:
unknown_focus_words.append(focus_word)
continue
known_words = [w for w in candidates if w in matrix.word2row]
unknown_words = [w for w in candidates if w not in matrix.word2row]
if candidates[0] in unknown_words:
unknown_synonyms.append(focus_word)
continue
word_sims = sim_func(matrix[focus_word], matrix[known_words], assure_consistency=False).transpose().sort(ascending=False)
if word_sims.row2word[0] == candidates[0]:
correct.append(focus_word)
else:
incorrect.append(focus_word)
accuracy = len(correct) / len(synonym_test)
print("Evaluation report")
print("Accuracy: {}".format(accuracy))
print("Number of words: {}".format(len(synonym_test)))
print("Correct words: {}".format(correct))
print("Incorrect words: {}".format(incorrect))
print("Unknown words: {}".format(unknown_focus_words))
print("Unknown correct synonym: {}".format(unknown_synonyms))
return accuracy
def simlex(matrix, sim_func=pydsm.similarity.cos):
"""
Evaluate DSM using simlex-999 evaluation test [1].
:param matrix: A DSM matrix.
:param sim_func: The similarity function to use for evaluation.
:return: Spearman correlation coefficient.
[1] SimLex-999: Evaluating Semantic Models with (Genuine) Similarity Estimation. 2014.
<NAME>, <NAME> and <NAME>. Preprint pubslished on arXiv. arXiv:1408.3456
"""
wordpair_sims = pickle.load(resource_stream(__name__, os.path.join('resources', 'simlex.pickle')))
simlex_vals = []
sim_vals = []
skipped = []
for (w1, w2), value in wordpair_sims.items():
if w1 not in matrix.word2row or w2 not in matrix.word2row:
skipped.append((w1, w2))
continue
sim_vals.append(sim_func(matrix[w1], matrix[w2])[0,0])
simlex_vals.append(value)
spearman = spearmanr(simlex_vals, sim_vals)
print("Evaluation report")
print("Spearman correlation: {}".format(spearman[0]))
print("P-value: {}".format(spearman[1]))
print("Skipped the following word pairs: {}".format(skipped ))
return spearman[0]
def toefl(matrix, sim_func=pydsm.similarity.cos):
"""
Evaluate DSM using TOEFL synonym test [1].
:param matrix: A DSM matrix.
:param sim_func: The similarity function to use for evaluation.
:return: Accuracy of TOEFL test.
[1] http://aclweb.org/aclwiki/index.php?title=TOEFL_Synonym_Questions_%28State_of_the_art%29
"""
synonym_dict = pickle.load(resource_stream(__name__, os.path.join('resources', 'toefl.pickle')))
return synonym_test(matrix, synonym_dict, sim_func=sim_func)
|
StarcoderdataPython
|
6470549
|
import stats_batch as sb
import numpy as np
import numpy.testing as npt
# Test mean_batch returns the mean if prior_mean and prior_sample_size are missing
def test_mean_batch_missing_prior_mean_prior_sample_size():
x = list(range(1, 100))
assert sb.mean_batch(x)[0] == np.mean(x)
assert sb.mean_batch(x)[1] == len(x)
# Test mean_batch returns the correct mean from multiple batches
def test_mean_batch_multiple_batches():
n = 10_000
x = np.random.normal(size=n)
# First batch
batch_1 = x[:100]
b1_mean, b1_n = sb.mean_batch(batch_1)
# Second batch
batch_2 = x[100:n]
b2_mean, b2_n = sb.mean_batch(batch_2, b1_mean, b1_n)
npt.assert_approx_equal(b2_mean, np.mean(x))
assert b2_n == n
|
StarcoderdataPython
|
11314598
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
def user_directory_path(instance, filename):
return 'user_{0}/{1}'.format(instance.user.id, filename)
# use the default User model and this Profile model to represent user info
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile', on_delete=models.CASCADE)
avatar = models.FileField(upload_to=user_directory_path, blank=True, null=True)
def __str__(self):
return f"Profile [id:{self.id}, user_id:{self.user.id}, username:{self.user.username}]"
class Meta:
app_label = "monopoly"
|
StarcoderdataPython
|
265318
|
<filename>model/wordrep.py<gh_stars>1-10
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
class WordRep(nn.Module):
def __init__(self, data):
super(WordRep, self).__init__()
print("build word representation...")
self.gpu = data.HP_gpu
self.batch_size = data.HP_batch_size
self.embedding_dim = data.word_emb_dim
self.drop = nn.Dropout(data.HP_dropout)
# word embedding
self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim)
if data.pretrain_word_embedding is not None:
self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding))
else:
self.word_embedding.weight.data.copy_(
torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim)))
# label embedding
self.label_dim = data.HP_hidden_dim
self.label_embedding = nn.Embedding(data.label_alphabet_size, self.label_dim)
self.label_embedding.weight.data.copy_(torch.from_numpy(
self.random_embedding_label(data.label_alphabet_size, self.label_dim, data.label_embedding_scale)))
# whether to use gpu
if self.gpu:
self.drop = self.drop.cuda()
self.word_embedding = self.word_embedding.cuda()
self.label_embedding = self.label_embedding.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def random_embedding_label(self, vocab_size, embedding_dim, scale):
pretrain_emb = np.empty([vocab_size, embedding_dim])
# scale = np.sqrt(3.0 / embedding_dim)
# scale = 0.025
for index in range(vocab_size):
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def forward(self, word_inputs, input_label_seq_tensor):
"""
input:
word_inputs: (batch_size, sent_len)
word_seq_lengths: list of batch_size, (batch_size,1)
input_label_seq_tensor: (batch_size, number of label)
output:
Variable(batch_size, sent_len, hidden_dim)
"""
word_embs = self.word_embedding(word_inputs)
# label embedding
label_embs = self.label_embedding(input_label_seq_tensor)
word_represent = self.drop(word_embs)
# label_embs = self.drop(label_embs)
return word_represent, label_embs
|
StarcoderdataPython
|
11246563
|
<reponame>cfnyu/distributed_db
# -*- coding: utf-8 -*-
""" Site
This module represents a site
"""
from enum import IntEnum
from sites.data_manager import DataManager
from objects.variable import Variable
class SiteStatus(IntEnum):
""" Represents the possible status of a Site """
UP = 1,
DOWN = 2
class Site:
""" Represents a single Site """
def __init__(self, site_id, time, logger):
self.identifer = site_id # TODO: Fix typo
self.status = SiteStatus.UP
self.create_time = 0
variables = {}
# Load all variables
for i in range(1, 21):
if i % 2 == 0 or 1 + (i % 10) == site_id:
new_variable = Variable(time, i)
logger.log("Adding %s at time %s" % (new_variable.identifier, str(time)))
variables[new_variable.identifier] = new_variable
self.data_manager = DataManager(variables, logger, site_id)
def dump(self):
""" Dump the results of all commits values to stdout """
return self.data_manager.variables
def recover(self, time):
""" Recover this site """
self.status = SiteStatus.UP
self.create_time = time
for variable in self.data_manager.variables.values():
variable.readable = not variable.replicated
def fail(self):
""" Fail this site """
self.status = SiteStatus.DOWN
#clear lock list
self.data_manager.locks = {}
self.data_manager.entries = {} # clear entries log
|
StarcoderdataPython
|
3283034
|
<reponame>knownmed/opentrons<gh_stars>0
import logging
import numpy as np # type: ignore
from dataclasses import dataclass
from typing import Optional, List
from opentrons import config
from opentrons.config.robot_configs import get_legacy_gantry_calibration
from opentrons.calibration_storage import modify, types, get
from opentrons.types import Mount
from opentrons.util import linal
from .util import DeckTransformState
log = logging.getLogger(__name__)
@dataclass
class RobotCalibration:
deck_calibration: types.DeckCalibration
def build_temporary_identity_calibration() -> RobotCalibration:
"""
Get a temporary identity deck cal suitable for use during
calibration
"""
return RobotCalibration(
deck_calibration=types.DeckCalibration(
attitude=linal.identity_deck_transform().tolist(),
source=types.SourceType.default,
status=types.CalibrationStatus(),
)
)
def validate_attitude_deck_calibration(deck_cal: types.DeckCalibration):
"""
This function determines whether the deck calibration is valid
or not based on the following use-cases:
TODO(lc, 8/10/2020): Expand on this method, or create
another method to diagnose bad instrument offset data
"""
curr_cal = np.array(deck_cal.attitude)
row, _ = curr_cal.shape
rank = np.linalg.matrix_rank(curr_cal)
if row != rank:
# Check that the matrix is non-singular
return DeckTransformState.SINGULARITY
elif not deck_cal.last_modified:
# Check that the matrix is not an identity
return DeckTransformState.IDENTITY
else:
# Transform as it stands is sufficient.
return DeckTransformState.OK
def validate_gantry_calibration(gantry_cal: List[List[float]]):
"""
This function determines whether the gantry calibration is valid
or not based on the following use-cases:
"""
curr_cal = np.array(gantry_cal)
row, _ = curr_cal.shape
rank = np.linalg.matrix_rank(curr_cal)
id_matrix = linal.identity_deck_transform()
z = abs(curr_cal[2][-1])
outofrange = z < 16 or z > 34
if row != rank:
# Check that the matrix is non-singular
return DeckTransformState.SINGULARITY
elif np.array_equal(curr_cal, id_matrix):
# Check that the matrix is not an identity
return DeckTransformState.IDENTITY
elif outofrange:
# Check that the matrix is not out of range.
return DeckTransformState.BAD_CALIBRATION
else:
# Transform as it stands is sufficient.
return DeckTransformState.OK
def migrate_affine_xy_to_attitude(
gantry_cal: List[List[float]],
) -> types.AttitudeMatrix:
masked_transform = np.array(
[
[True, True, True, False],
[True, True, True, False],
[False, False, False, False],
[False, False, False, False],
]
)
masked_array = np.ma.masked_array(gantry_cal, ~masked_transform)
attitude_array = np.zeros((3, 3))
np.put(attitude_array, [0, 1, 2], masked_array[0].compressed())
np.put(attitude_array, [3, 4, 5], masked_array[1].compressed())
np.put(attitude_array, 8, 1)
return attitude_array.tolist()
def save_attitude_matrix(
expected: linal.SolvePoints,
actual: linal.SolvePoints,
pipette_id: str,
tiprack_hash: str,
):
attitude = linal.solve_attitude(expected, actual)
modify.save_robot_deck_attitude(attitude, pipette_id, tiprack_hash)
def load_attitude_matrix() -> types.DeckCalibration:
calibration_data = get.get_robot_deck_attitude()
gantry_cal = get_legacy_gantry_calibration()
if not calibration_data and gantry_cal:
if validate_gantry_calibration(gantry_cal) == DeckTransformState.OK:
log.debug(
"Attitude deck calibration matrix not found. Migrating "
"existing affine deck calibration matrix to {}".format(
config.get_opentrons_path("robot_calibration_dir")
)
)
attitude = migrate_affine_xy_to_attitude(gantry_cal)
modify.save_robot_deck_attitude(
transform=attitude,
pip_id=None,
lw_hash=None,
source=types.SourceType.legacy,
)
calibration_data = get.get_robot_deck_attitude()
if calibration_data:
return calibration_data
else:
# load default if deck calibration data do not exist
return types.DeckCalibration(
attitude=config.robot_configs.DEFAULT_DECK_CALIBRATION_V2,
source=types.SourceType.default,
status=types.CalibrationStatus(),
)
def load_pipette_offset(
pip_id: Optional[str], mount: Mount
) -> types.PipetteOffsetByPipetteMount:
# load default if pipette offset data do not exist
pip_cal_obj = types.PipetteOffsetByPipetteMount(
offset=config.robot_configs.DEFAULT_PIPETTE_OFFSET,
source=types.SourceType.default,
status=types.CalibrationStatus(),
)
if pip_id:
pip_offset_data = get.get_pipette_offset(pip_id, mount)
if pip_offset_data:
return pip_offset_data
return pip_cal_obj
def load() -> RobotCalibration:
return RobotCalibration(deck_calibration=load_attitude_matrix())
|
StarcoderdataPython
|
8186508
|
<gh_stars>10-100
from Instrucciones.Excepcion import Excepcion
from tkinter.constants import FALSE
from Instrucciones.Sql_create.ShowDatabases import ShowDatabases
from Instrucciones.TablaSimbolos.Instruccion import *
from Instrucciones.Tablas.BaseDeDatos import BaseDeDatos
from storageManager.jsonMode import *
class CreateDatabase(Instruccion):
def __init__(self, base, tipo, existe, owner, mode, strGram, linea, columna):
Instruccion.__init__(self,tipo,linea,columna, strGram)
self.base=base
self.tipo=tipo
self.existe = existe
self.owner=owner
self.mode=mode
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
bandera = False
#SE OBTIENE LA LISTA DE BD
lb=showDatabases()
#SE RECORRE LA BD PARA VERIFICAR QUE NO EXISTA
for bd in lb:
if bd== self.base:
#SI SE ENCUENTRA LA BD SE TERMINA EL RECORRIDO
bandera = True
break
if self.existe=="IF NOT EXISTS" and bandera==True:
arbol.consola.append(f"La Base de Datos ya existe: {self.base}.")
print(f"LA BASE DE DATOS: {self.base} YA EXISTE.")
elif self.existe=="IF NOT EXISTS" and bandera==False:
arbol.consola.append(f"Se Creo la base de datos: {self.base} correctamente.")
print(f"SE CREO LA BASE DE DATOS: {self.base} CORRECTAMENTE.")
createDatabase(str(self.base))
nueva = BaseDeDatos(str(self.base))
arbol.setListaBd(nueva)
elif self.existe=="NULL" and bandera==True:
error = Excepcion("42P04","Semantico",f"La Base de Datos {self.base} ya Existe.",self.linea,self.columna)
print(f"LA BASE DE DATOS: {self.base} YA EXISTE.")
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
elif self.existe=="NULL" and bandera==False:
#AVISOS
arbol.consola.append(f"Se Creo la base de datos: {self.base} correctamente.")
print(f"SE CREO LA BASE DE DATOS: {self.base} CORRECTAMENTE.")
createDatabase(str(self.base))
nueva = BaseDeDatos(str(self.base))
arbol.setListaBd(nueva)
def getCodigo(self, tabla, arbol):
base = self.base
tipo = self.tipo
existe = self.existe
owner = f"\"{self.owner}\"" if self.owner else None
mode = self.mode
strGram = self.strGram
linea = self.linea
columna = self.columna
num_params = 8
temp_param_base = arbol.getTemporal()
temp_param_tipo = arbol.getTemporal()
temp_param_existe = arbol.getTemporal()
temp_param_owner = arbol.getTemporal()
temp_param_mode = arbol.getTemporal()
temp_param_strGram = arbol.getTemporal()
temp_param_linea = arbol.getTemporal()
temp_param_columna = arbol.getTemporal()
temp_tam_func = arbol.getTemporal()
temp_index_param_base = arbol.getTemporal()
temp_index_param_tipo = arbol.getTemporal()
temp_index_param_existe = arbol.getTemporal()
temp_index_param_owner = arbol.getTemporal()
temp_index_param_mode = arbol.getTemporal()
temp_index_param_strGram = arbol.getTemporal()
temp_index_param_linea = arbol.getTemporal()
temp_index_param_columna = arbol.getTemporal()
temp_return = arbol.getTemporal()
temp_result = arbol.getTemporal()
codigo = f"\t#CREATE DATABASE 3D\n"
codigo += f"\t{temp_param_base} = \"{base}\"\n"
codigo += f"\t{temp_param_tipo} = {tipo}\n"
codigo += f"\t{temp_param_existe} = \"{existe}\"\n"
codigo += f"\t{temp_param_owner} = {owner}\n"
codigo += f"\t{temp_param_mode} = {mode}\n"
codigo += f"\t{temp_param_strGram} = \"{strGram}\"\n"
codigo += f"\t{temp_param_linea} = {linea}\n"
codigo += f"\t{temp_param_columna} = {columna}\n"
codigo += f"\t{temp_tam_func} = pointer + {num_params}\n"
codigo += f"\t{temp_index_param_base} = {temp_tam_func} + 1\n"
codigo += f"\tstack[{temp_index_param_base}] = {temp_param_base}\n"
codigo += f"\t{temp_index_param_tipo} = {temp_tam_func} + 2\n"
codigo += f"\tstack[{temp_index_param_tipo}] = {temp_param_tipo}\n"
codigo += f"\t{temp_index_param_existe} = {temp_tam_func} + 3\n"
codigo += f"\tstack[{temp_index_param_existe}] = {temp_param_existe}\n"
codigo += f"\t{temp_index_param_owner} = {temp_tam_func} + 4\n"
codigo += f"\tstack[{temp_index_param_owner}] = {temp_param_owner}\n"
codigo += f"\t{temp_index_param_mode} = {temp_tam_func} + 5\n"
codigo += f"\tstack[{temp_index_param_mode}] = {temp_param_mode}\n"
codigo += f"\t{temp_index_param_strGram} = {temp_tam_func} + 6\n"
codigo += f"\tstack[{temp_index_param_strGram}] = {temp_param_strGram}\n"
codigo += f"\t{temp_index_param_linea} = {temp_tam_func} + 7\n"
codigo += f"\tstack[{temp_index_param_linea}] = {temp_param_linea}\n"
codigo += f"\t{temp_index_param_columna} = {temp_tam_func} + 8\n"
codigo += f"\tstack[{temp_index_param_columna}] = {temp_param_columna}\n"
codigo += f"\tpointer = pointer + {num_params}\n"
codigo += f"\tinter_createDataBase()\n"
#codigo += f"\t{temp_return} = pointer + 0\n"
#codigo += f"\t{temp_result} = stack[{temp_return}]\n"
codigo += f"\tpointer = pointer - {num_params}\n"
#codigo += f"\tprint({temp_result})\n"
#arbol.consola.append(codigo)
return codigo
'''
instruccion = CreateDatabase("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
'''
|
StarcoderdataPython
|
12827807
|
"""
Save certstream data into Elasticsearch so that it can be queried by Kibana
later on.
"""
from datetime import datetime
from elasticsearch_dsl import connections, analyzer
from elasticsearch_dsl import Document, Date, Text, Keyword
from .base import Storage
ANALYZER = analyzer('standard_analyzer',
tokenizer='standard_tokenizer',
filter=['lowercase'])
# pylint: disable=too-few-public-methods
class ElasticsearchStorage(Storage):
"""
An experiment Elasticsearch storage to keep and index the received records.
"""
class Record(Document):
"""
An Elasticsearch record as it is.
"""
timestamp = Date(default_timezone='UTC')
# As reported by certstream
seen = Date(default_timezone='UTC')
# The domain time to live
not_before = Date(default_timezone='UTC')
not_after = Date(default_timezone='UTC')
# The domain and its alternative names
domain = Text(analyzer=ANALYZER, fields={'raw': Keyword()})
san = Text(analyzer=ANALYZER, fields={'raw': Keyword()})
# The issuer
chain = Text(analyzer=ANALYZER, fields={'raw': Keyword()})
class Index:
"""
Use daily indices.
"""
name = 'certstream-*'
# pylint: disable=arguments-differ
def save(self, **kwargs):
"""
Magically save the record in Elasticsearch.
"""
self.timestamp = datetime.now()
# Override the index to go to the proper timeslot
kwargs['index'] = self.timestamp.strftime('certstream-%Y.%m.%d')
return super().save(**kwargs)
def __init__(self, hosts, timeout=10):
"""
Provide the Elasticsearch hostname (Defaults to localhost).
"""
connections.create_connection(hosts=hosts, timeout=timeout)
def save(self, record):
"""
Save the certstream record in Elasticsearch.
"""
elasticsearch_record = ElasticsearchStorage.Record(meta={'id': record['cert_index']})
# In miliseconds
elasticsearch_record.seen = int(record['seen'] * 1000)
elasticsearch_record.not_before = int(record['not_before'] * 1000)
elasticsearch_record.not_after = int(record['not_after'] * 1000)
# Elasticsearch will parse and index the domain and all its alternative names
elasticsearch_record.domain = record['all_domains'][0]
elasticsearch_record.san = record['all_domains'][1:]
elasticsearch_record.save()
|
StarcoderdataPython
|
1648346
|
<gh_stars>1-10
#!/usr/bin/env python
# Script that generates triangle specifications for all
# possible combinations of corner signs for the marching
# cubes algorithm.
#
# Assumes that the corner signs are encoded as integers
# where bit i indicates whether corner i (as given by the
# corner index specification below) is positive or not.
#
# Outputs two Java arrays code corresponding to:
# - The number of triangles for each sign combination.
# - The edges of the vertices for these triangles (as
# given by the edge index specification below).
#
# Operates by taking a minimal set of pre-specified
# triangles and applying three operations (two rotations
# and an inversion) to them until all combinations have
# been generated.
#
# Corner indices:
# 6--------------7
# /: /|
# / : / |
# / : / |
# 4--------------5 |
# | : | |
# | : | |
# | 2 - - - - -|- -3
# | / | /
# | / | /
# |/ |/
# 0--------------1
#
# Edge indices:
# -------10-------
# /: /|
# 11 : 9 |
# / 6 / 5
# -------8-------- |
# | : | |
# | : | |
# | - - - -2- -|- -|
# 7 / 4 /
# | 3 | 1
# |/ |/
# -------0--------
import itertools
import pprint
import ipdb
# Which corners indices map to which under the operations
X_ROTATION_CORNER_MAP = [1, 5, 3, 7, 0, 4, 2, 6]
Y_ROTATION_CORNER_MAP = [4, 5, 0, 1, 6, 7, 2, 3]
Z_ROTATION_CORNER_MAP = [2, 0, 3, 1, 6, 4, 7, 5]
INVERSION_CORNER_MAP = [1, 0, 3, 2, 5, 4, 7, 6]
# Which edges indices map to which under the operations
X_ROTATION_EDGE_MAP = [7, 3, 6, 11, 0, 2, 10, 8, 4, 1, 5, 9]
Y_ROTATION_EDGE_MAP = [2, 5, 10, 6, 1, 9, 11, 3, 0, 4, 8, 7]
Z_ROTATION_EDGE_MAP = [1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8]
INVERSION_EDGE_MAP = [0, 3, 2, 1, 7, 6, 5, 4, 8, 11, 10, 9]
MAX_PERMUTATIONS = 8
# Initial triangle specification, given as a dict {a: (b, c), ...} where:
# - a is an integer containing the corner flags for the cube.
# - b is the number of corresponding triangles
# - c is a list of the edge indices of the corresponding triangles
INITIAL_SPECIFICATIONS = {0b00000000: (0, []),
0b00000001: (1, [0,3,7]),
0b00000011: (2, [4,1,7,7,1,3]),
0b00100001: (2, [0,3,7,8,9,4]),
0b00001110: (3, [0,4,3,3,4,6,6,4,5]),
0b00001111: (2, [4,5,7,7,5,6]),
0b00011110: (4, [0,4,3,3,4,6,6,4,5,11,8,7]),
0b01101001: (4, [0,3,7,1,5,2,8,9,4,10,11,6]),
0b01001101: (4, [1,5,0,0,5,10,0,10,7,7,10,11]),
0b01001110: (4, [4,5,0,0,5,11,10,11,5,11,3,0]),
0b10000001: (2, [0,3,7,5,9,10]),
0b10000011: (3, [4,1,7,7,1,3,9,10,5]),
0b10010010: (3, [4,1,0,11,8,7,9,10,5]),
0b10011001: (4, [8,0,11,11,0,3,9,10,1,1,10,2]),
0b10001101: (4, [1,9,0,0,9,6,6,9,10,0,6,7])}
def apply_maps(corner_flags, edges, corner_map, edge_map):
new_corner_flags = 0
for i in range(8):
new_corner_flags += ((corner_flags >> corner_map[i]) & 1) << i
new_triangles = [edge_map[edge] for edge in edges]
return new_corner_flags, new_triangles
# Takes lists of corner flags and edge indices and returns two lists
# containing the elements they are mapped to under a rotation about the
# x axis.
def x_rotation(corner_flags, edges):
return apply_maps(corner_flags, edges, X_ROTATION_CORNER_MAP, X_ROTATION_EDGE_MAP)
def y_rotation(corner_flags, edges):
return apply_maps(corner_flags, edges, Y_ROTATION_CORNER_MAP, Y_ROTATION_EDGE_MAP)
def z_rotation(corner_flags, edges):
return apply_maps(corner_flags, edges, Z_ROTATION_CORNER_MAP, Z_ROTATION_EDGE_MAP)
# Takes lists of corner flags and edge indices and returns two lists
# containing the elements they are mapped to under the inversion.
def inversion(corner_flags, edges):
new_corner_flags, new_edges = apply_maps(corner_flags, edges, INVERSION_CORNER_MAP, INVERSION_EDGE_MAP)
new_edges = flip_normals(new_edges)
return new_corner_flags, new_edges
def sign_flip(corner_flags, edges):
return corner_flags ^ 0b11111111, flip_normals(edges)
def flip_normals(edges):
if len(edges) == 0:
return edges
for i in range(0, len(edges), 3):
temp = edges[i]
edges[i] = edges[i + 1]
edges[i + 1] = temp
return edges
def print_specifications(specifications):
triangle_counts = []
edges = []
for i in range(256):
triangle_counts.append(specifications[i][0] if specifications.has_key(i) else 0)
edges.append(specifications[i][1] if specifications.has_key(i) else [])
print "final static int[] TRIANGLE_COUNTS = %s;" % str(triangle_counts).replace('[','{').replace(']','}')
print "final static int[][] TRIANGLE_SPECIFICATIONS = %s;" % str(edges).replace('[','{').replace(']','}')
specifications = INITIAL_SPECIFICATIONS.copy()
available_operations = [x_rotation, z_rotation, y_rotation, sign_flip]
# generate all combinations by brute force
for permutation in itertools.combinations_with_replacement(available_operations, MAX_PERMUTATIONS):
for corner_flags, (triangle_count, edges) in INITIAL_SPECIFICATIONS.iteritems():
for operation in permutation:
corner_flags, edges = operation(corner_flags, edges)
specifications[corner_flags] = (triangle_count, edges)
print_specifications(specifications)
|
StarcoderdataPython
|
1911946
|
<filename>examples/autobahn-twisted-flask/app.py
import argparse
import json
import msgpack
from flask import Flask, render_template
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.web.wsgi import WSGIResource
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol
from autobahn.resource import WebSocketResource
import myapp
app = Flask(__name__)
app.debug = True
@app.route("/")
def index():
return render_template("index.html")
class WebSocketWrapperProtocol(WebSocketServerProtocol):
def onConnect(self, req):
P = "wswrapper_msgpack"
if P in req.protocols:
self._is_msgpack_used = True
return (P, {})
P = "wswrapper_json"
if P in req.protocols:
self._is_msgpack_used = False
return (P, {})
def onMessage(self, msg, binary):
if self._is_msgpack_used:
data = msgpack.unpackb(msg)
else:
data = json.loads(msg)
name, payload = data
try:
getattr(myapp, "on_%s" % name)(self, payload)
except AttributeError as e:
self.close()
def emit(self, name, payload):
data = [name, payload]
if self._is_msgpack_used:
self.sendMessage(msgpack.packb(data), True)
else:
self.sendMessage(json.dumps(data))
class Root(Resource):
wsgi = WSGIResource(reactor, reactor.getThreadPool(), app)
def getChild(self, child, req):
req.prepath.pop()
req.postpath.insert(0, child)
return self.wsgi
def render(self, req):
return self.wsgi.render(req)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", metavar="PORT=3000", type=int, default=3000)
myapp.setup_argparser(parser)
args = parser.parse_args()
myapp.setup_app(args)
factory = WebSocketServerFactory("ws://localhost:%d" % args.port)
factory.protocol = WebSocketWrapperProtocol
root = Root()
root.putChild("ws", WebSocketResource(factory))
reactor.listenTCP(args.port, Site(root))
reactor.run()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3374095
|
import pytest
from cell import Cell
def _as_digit(digit: int):
return digit
def test_init_with_invalid_value():
with pytest.raises(Exception):
Cell(10)
@pytest.mark.parametrize("repr_fun", [_as_digit, Cell])
def test_exclude_and_solve(repr_fun):
cell = Cell()
cell.exclude([repr_fun(i) for i in range(1, 8)]) # 1, ..., 7
assert cell.value is None
assert cell.is_solved is False
cell.exclude(repr_fun(8))
assert cell.value == 9
assert cell.is_solved is True
def test_set_unexpected_value():
cell = Cell()
cell.exclude(1)
with pytest.raises(Exception):
cell.value = 1
|
StarcoderdataPython
|
1993952
|
<gh_stars>0
# coding=utf-8
"""
Collect HAProxy Stats
#### Dependencies
* urlparse
* urllib2
"""
import re
import urllib2
import base64
import csv
import diamond.collector
class HAProxyCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(HAProxyCollector, self).get_default_config_help()
config_help.update({
'url': "Url to stats in csv format",
'user': "Username",
'pass': "Password",
'ignore_servers': "Ignore servers, just collect frontend and "
+ "backend stats",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HAProxyCollector, self).get_default_config()
config.update({
'path': 'haproxy',
'url': 'http://localhost/haproxy?stats;csv',
'user': 'admin',
'pass': 'password',
'ignore_servers': False,
})
return config
def get_csv_data(self):
"""
Request stats from HAProxy Server
"""
metrics = []
req = urllib2.Request(self.config['url'])
try:
handle = urllib2.urlopen(req)
return handle.readlines()
except Exception, e:
if not hasattr(e, 'code') or e.code != 401:
self.log.error("Error retrieving HAProxy stats. %s", e)
return metrics
# get the www-authenticate line from the headers
# which has the authentication scheme and realm in it
authline = e.headers['www-authenticate']
# this regular expression is used to extract scheme and realm
authre = (r'''(?:\s*www-authenticate\s*:)?\s*'''
+ '''(\w*)\s+realm=['"]([^'"]+)['"]''')
authobj = re.compile(authre, re.IGNORECASE)
matchobj = authobj.match(authline)
if not matchobj:
# if the authline isn't matched by the regular expression
# then something is wrong
self.log.error('The authentication header is malformed.')
return metrics
scheme = matchobj.group(1)
# here we've extracted the scheme
# and the realm from the header
if scheme.lower() != 'basic':
self.log.error('Invalid authentication scheme.')
return metrics
base64string = base64.encodestring(
'%s:%s' % (self.config['user'], self.config['pass']))[:-1]
authheader = 'Basic %s' % base64string
req.add_header("Authorization", authheader)
try:
handle = urllib2.urlopen(req)
metrics = handle.readlines()
return metrics
except IOError, e:
# here we shouldn't fail if the USER/PASS is right
self.log.error("Error retrieving HAProxy stats. (Invalid username "
+ "or password?) %s", e)
return metrics
def _generate_headings(self, row):
headings = {}
for index, heading in enumerate(row):
headings[index] = self._sanitize(heading)
return headings
def collect(self):
"""
Collect HAProxy Stats
"""
csv_data = self.get_csv_data()
data = list(csv.reader(csv_data))
headings = self._generate_headings(data[0])
for row in data:
if (self.config['ignore_servers']
and row[1].lower() not in ['frontend', 'backend']):
continue
part_one = self._sanitize(row[0].lower())
part_two = self._sanitize(row[1].lower())
metric_name = '%s.%s' % (part_one, part_two)
for index, metric_string in enumerate(row):
try:
metric_value = float(metric_string)
except ValueError:
continue
stat_name = '%s.%s' % (metric_name, headings[index])
self.publish(stat_name, metric_value, metric_type='GAUGE')
def _sanitize(self, s):
"""Sanitize the name of a metric to remove unwanted chars
"""
return re.sub('[^\w-]', '_', s)
|
StarcoderdataPython
|
4866838
|
"""
The :mod:`part` package is designed to maintain subsets of sorted spaces.
It defines several classes.
For atomic values:
* :class:`TotallyOrdered` which represents totally ordered type;
* :class:`TO` which represents a generic totally ordered type;
* :class:`Atomic` which represents any convex subset of a totally ordered space;
* :class:`Empty` which represents the empty subset. There is only one instance of
this class;
* :class:`Interval` which represents a non-empty subset of a totally ordered space.
There is a special instance representing the whole space;
For set classes:
* :class:`IntervalSet` is an abstract class representing all interval sets;
* :class:`FrozenIntervalSet` is a frozen version of :class:`IntervalSet`;
* :class:`MutableIntervalSet` is a mutable version of :class:`IntervalSet`.
For dictionary classes:
* :class:`IntervalDict` is an abstract class representing all interval dictionaries;
* :class:`FrozenIntervalDict` is a frozen version of :class:`IntervalDict`;
* :class:`MutableIntervalDict` is a mutable version of :class:`IntervalDict`.
It also defines one constant:
* :const:`INFINITY` to hold the infinity value. (-:const:`INFINITY` is also a valid
expression);
"""
from .atomic import Atomic, Empty, Interval, TO, TotallyOrdered
from .dicts import IntervalDict, FrozenIntervalDict, MutableIntervalDict
from .sets import IntervalSet, FrozenIntervalSet, MutableIntervalSet
from .values import INFINITY
__all__ = (
"INFINITY",
"TO",
"TotallyOrdered",
"Atomic",
"Empty",
"Interval",
"IntervalSet",
"FrozenIntervalSet",
"MutableIntervalSet",
"IntervalDict",
"FrozenIntervalDict",
"MutableIntervalDict",
)
|
StarcoderdataPython
|
8194751
|
# -*- coding: utf-8 -*-
"""
test_parse_haadf_stem
~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from lxml import etree
from chemdataextractor.doc.text import Sentence
from chemdataextractor.parse.mp import mp_phrase
class TestParseMp(unittest.TestCase):
def test_mp1(self):
# Declaration
s = Sentence('Colorless solid (81% yield, 74.8 mg, 0.22 mmol); mp 77.2–77.5 °C.')
expected = '<mp_phrase><mp><value>77.2–77.5</value><units>°C</units></mp></mp_phrase>'
# Testing
result = next(mp_phrase.scan(s.tagged_tokens))[0]
#Assertion
self.assertEqual(expected, etree.tostring(result, encoding='unicode'))
|
StarcoderdataPython
|
1643766
|
from ubiquiti.unifi import API as Unifi_API
import json
import paho.mqtt.client as mqtt
import time
class anybody_home():
def __init__(self):
self.n_devices = 4
self.dev_1 = '192.168.0.51'
self.dev_2 = '192.168.0.52'
self.dev_3 = '192.168.0.53'
self.dev_4 = '192.168.0.54'
self.devices = ['192.168.0.51','192.168.0.52','192.168.0.53','192.168.0.54']
self.last_seen_prev = [(time.time() - 7200)] * self.n_devices
def time_seen(self, dev_list):
#last_seen = [0] * self.n_devices
max_devices = len(dev_list)
for i in range(0,max_devices):
for j in range(0,self.n_devices):
try:
if dev_list[i]['ip'] == self.devices[j]:
self.last_seen_prev[j] = device_list[i]['last_seen']
except:
continue
def time_since_seen(self):
duration = [0] * self.n_devices
for i in range(0, self.n_devices):
duration[i] = round((time.time() - self.last_seen_prev[i])/60)
if duration[i] > 999:
duration[i] = 999
return duration
def anybody_home(self, last_seen_curr):
somebody_home = 0
if any(t < 20 for t in last_seen_curr):
somebody_home = 1
return somebody_home
if __name__ == "__main__":
anybody_home = anybody_home()
topic = "home/inside/sensor/presence"
# Broker details:
server_address="192.168.0.10"
client = mqtt.Client("docker_anybody_home")
client.connect(server_address, keepalive=60)
while True:
api = Unifi_API(username="davideo.ezy", password="<PASSWORD>", baseurl="https://192.168.0.10:8443", verify_ssl=False)
api.login()
device_list = (api.list_clients(order_by="ip"))
api.logout()
anybody_home.time_seen(device_list)
last_seen_curr = anybody_home.time_since_seen()
somebody_home = anybody_home.anybody_home(last_seen_curr)
dict_msg = {"somebody_home": somebody_home, "dev_1":last_seen_curr[0], "dev_2":last_seen_curr[1], "dev_3":last_seen_curr[2], "dev_4":last_seen_curr[3]}
msg = json.dumps(dict_msg)
client.publish(topic,msg)
time.sleep(30)
|
StarcoderdataPython
|
9625941
|
import re
class EvalNode:
"""An eval node"""
def __init__(self, name):
self.name = name;
self.cached = -1
def parse(self):
if(self.cached == -1):
#print("parsing", self.name)
self.cached = applyOp(self.l.parse(), self.op, self.r.parse())
return self.cached;
class ConstNode:
def __init__(self, const):
self.const = const;
self.name = str(const)
def parse(self):
return self.const
def applyOp(l, op, r):
if op == "NOT":
return ~r
elif op == "AND":
return l & r
elif op == "OR":
return l | r
elif op == "LSHIFT":
return l << r
elif op == "RSHIFT":
return l >> r
#const
else:
return r;
#empty strings are matched, yes. We want them to be evaluated to 0
re_isConst = re.compile("\d*$")
def isConst(str):
return re_isConst.match(str);
def parseLine(line):
args = line.strip().split(" ")
args.reverse()
args += [''] * (5 - len(args))
(out, arrow, right, op, left) = args;
#print(out, " = ", left, op, right )
if(isConst(left)):
l = ConstNode( int('0'+left) )
else:
l = wires[left]
if(isConst(right)):
r = ConstNode( int('0'+right) )
else:
r = wires[right]
#print(wires[out].name, " = " , l.name, r.name );
wires[out].l = l;
wires[out].r = r;
wires[out].op = op;
#build wire list
wires = { "":{} }
for i in range(ord('a'), ord('z')+1):
wires[chr(i)] = EvalNode(chr(i));
for j in range(ord('a'), ord('z')+1):
wires[chr(i)+chr(j)] = EvalNode(chr(i)+chr(j));
#add wire tree
with open("input/input7.txt") as f:
for line in f:
parseLine( line );
aay = wires['a'].parse();
print ( aay );
for i in range(ord('a'), ord('z')+1):
wires[chr(i)].cached = -1
for j in range(ord('a'), ord('z')+1):
wires[chr(i)+chr(j)].cached = -1;
wires['b'].cached = aay
print ( wires['a'].parse() );
|
StarcoderdataPython
|
323458
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
import pytest
from pants.backend.kotlin import target_types
from pants.backend.kotlin.dependency_inference import kotlin_parser
from pants.backend.kotlin.dependency_inference.kotlin_parser import (
KotlinImport,
KotlinSourceDependencyAnalysis,
)
from pants.backend.kotlin.target_types import KotlinSourceField, KotlinSourceTarget
from pants.build_graph.address import Address
from pants.core.util_rules import source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.rules import QueryRule
from pants.engine.target import SourcesField
from pants.jvm import jdk_rules
from pants.jvm import util_rules as jvm_util_rules
from pants.jvm.resolve import jvm_tool
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner, logging
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*kotlin_parser.rules(),
*jvm_tool.rules(),
*source_files.rules(),
*jdk_rules.rules(),
*target_types.rules(),
*jvm_util_rules.rules(),
QueryRule(SourceFiles, (SourceFilesRequest,)),
QueryRule(KotlinSourceDependencyAnalysis, (SourceFiles,)),
],
target_types=[KotlinSourceTarget],
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
def _analyze(rule_runner: RuleRunner, source: str) -> KotlinSourceDependencyAnalysis:
rule_runner.write_files(
{
"BUILD": """kotlin_source(name="source", source="Source.kt")""",
"Source.kt": source,
}
)
target = rule_runner.get_target(address=Address("", target_name="source"))
source_files = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(
(target.get(SourcesField),),
for_sources_types=(KotlinSourceField,),
enable_codegen=True,
)
],
)
return rule_runner.request(KotlinSourceDependencyAnalysis, [source_files])
@logging
def test_parser_simple(rule_runner: RuleRunner) -> None:
analysis = _analyze(
rule_runner,
textwrap.dedent(
"""\
package org.pantsbuild.backend.kotlin
import java.io.File
open class Foo {
fun grok() {
val x = X()
val y = Y()
}
}
class Bar {}
fun main(args: Array<String>) {
}
"""
),
)
assert analysis.imports == {KotlinImport(name="java.io.File", alias=None, is_wildcard=False)}
assert analysis.named_declarations == {
"org.pantsbuild.backend.kotlin.Bar",
"org.pantsbuild.backend.kotlin.Foo",
"org.pantsbuild.backend.kotlin.main",
}
assert analysis.consumed_symbols_by_scope == FrozenDict(
{
"org.pantsbuild.backend.kotlin.Foo": frozenset(
{
"X",
"Y",
}
),
"org.pantsbuild.backend.kotlin": frozenset(
{
"Array",
"String",
}
),
}
)
assert analysis.scopes == {
"org.pantsbuild.backend.kotlin",
"org.pantsbuild.backend.kotlin.Foo",
"org.pantsbuild.backend.kotlin.Bar",
}
|
StarcoderdataPython
|
1675890
|
import codecs
import gzip
from lxmls.sequences.label_dictionary import *
from lxmls.sequences.sequence import *
from lxmls.sequences.sequence_list import *
from os.path import dirname
import numpy as np # This is also needed for theano=True
# from nltk.corpus import brown
# Directory where the data files are located.
data_dir = dirname(__file__) + "/../../data/"
# Train and test files for english WSJ part of the Penn Tree Bank
en_train = data_dir + "train-02-21.conll"
en_dev = data_dir + "dev-22.conll"
en_test = data_dir + "test-23.conll"
# Train and test files for portuguese Floresta sintatica
pt_train = data_dir + "pt_train.txt"
pt_dev = ""
pt_test = data_dir + "pt_test.txt"
def compacify(train_seq, test_seq, dev_seq, theano=False):
"""
Create a map for indices that is be compact (do not have unused indices)
"""
# REDO DICTS
new_x_dict = LabelDictionary()
new_y_dict = LabelDictionary(['noun'])
for corpus_seq in [train_seq, test_seq, dev_seq]:
for seq in corpus_seq:
for index in seq.x:
word = corpus_seq.x_dict.get_label_name(index)
if word not in new_x_dict:
new_x_dict.add(word)
for index in seq.y:
tag = corpus_seq.y_dict.get_label_name(index)
if tag not in new_y_dict:
new_y_dict.add(tag)
# REDO INDICES
# for corpus_seq in [train_seq2, test_seq2, dev_seq2]:
for corpus_seq in [train_seq, test_seq, dev_seq]:
for seq in corpus_seq:
for i in seq.x:
if corpus_seq.x_dict.get_label_name(i) not in new_x_dict:
pass
for i in seq.y:
if corpus_seq.y_dict.get_label_name(i) not in new_y_dict:
pass
seq.x = [new_x_dict[corpus_seq.x_dict.get_label_name(i)] for i in seq.x]
seq.y = [new_y_dict[corpus_seq.y_dict.get_label_name(i)] for i in seq.y]
# For compatibility with GPUs store as numpy arrays and cats to int
# 32
if theano:
seq.x = np.array(seq.x, dtype='int32')
seq.y = np.array(seq.y, dtype='int32')
# Reinstate new dicts
corpus_seq.x_dict = new_x_dict
corpus_seq.y_dict = new_y_dict
# Add reverse indices
corpus_seq.word_dict = {v: k for k, v in new_x_dict.items()}
corpus_seq.tag_dict = {v: k for k, v in new_y_dict.items()}
# SANITY CHECK:
# These must be the same
# tmap = {v: k for k, v in train_seq.x_dict.items()}
# tmap2 = {v: k for k, v in train_seq2.x_dict.items()}
# [tmap[i] for i in train_seq[0].x]
# [tmap2[i] for i in train_seq2[0].x]
return train_seq, test_seq, dev_seq
class PostagCorpus(object):
def __init__(self):
# Word dictionary.
self.word_dict = LabelDictionary()
# POS tag dictionary.
# Initialize noun to be tag zero so that it the default tag.
self.tag_dict = LabelDictionary(['noun'])
# Initialize sequence list.
self.sequence_list = SequenceList(self.word_dict, self.tag_dict)
# Read a text file in conll format and return a sequence list
#
def read_sequence_list_conll(self, train_file,
mapping_file=("%s/en-ptb.map"
% dirname(__file__)),
max_sent_len=100000,
max_nr_sent=100000):
# Build mapping of postags:
mapping = {}
if mapping_file is not None:
for line in open(mapping_file):
coarse, fine = line.strip().split("\t")
mapping[coarse.lower()] = fine.lower()
instance_list = self.read_conll_instances(train_file,
max_sent_len,
max_nr_sent, mapping)
seq_list = SequenceList(self.word_dict, self.tag_dict)
for sent_x, sent_y in instance_list:
seq_list.add_sequence(sent_x, sent_y)
return seq_list
# ----------
# Reads a conll file into a sequence list.
# ----------
def read_conll_instances(self, file, max_sent_len, max_nr_sent, mapping):
if file.endswith("gz"):
zf = gzip.open(file, 'rb')
reader = codecs.getreader("utf-8")
contents = reader(zf)
else:
contents = codecs.open(file, "r", "utf-8")
nr_sent = 0
instances = []
ex_x = []
ex_y = []
nr_types = len(self.word_dict)
nr_pos = len(self.tag_dict)
for line in contents:
toks = line.split()
if len(toks) < 2:
# print "sent n %i size %i"%(nr_sent,len(ex_x))
if len(ex_x) < max_sent_len and len(ex_x) > 1:
# print "accept"
nr_sent += 1
instances.append([ex_x, ex_y])
# else:
# if(len(ex_x) <= 1):
# print "refusing sentence of len 1"
if nr_sent >= max_nr_sent:
break
ex_x = []
ex_y = []
else:
pos = toks[4]
word = toks[1]
pos = pos.lower()
if pos not in mapping:
mapping[pos] = "noun"
print "unknown tag %s" % pos
pos = mapping[pos]
if word not in self.word_dict:
self.word_dict.add(word)
if pos not in self.tag_dict:
self.tag_dict.add(pos)
ex_x.append(word)
ex_y.append(pos)
# ex_x.append(self.word_dict[word])
# ex_y.append(self.tag_dict[pos])
return instances
# Read a text file in brown format and return a sequence list
#
# def read_sequence_list_brown(self,mapping_file="readers/en-ptb.map",max_sent_len=100000,max_nr_sent=100000,categories=""):
# ##Build mapping of postags:
# mapping = {}
# if(mapping_file != None):
# for line in open(mapping_file):
# coarse,fine = line.strip().split("\t")
# mapping[coarse.lower()] = fine.lower()
# if(categories == ""):
# sents = brown.tagged_sents()
# else:
# sents = brown.tagged_sents(categories=categories)
# seq_list = Sequence_List(self.word_dict,self.int_to_word,self.tag_dict,self.int_to_tag)
# nr_types = len(self.word_dict)
# nr_tag = len(self.tag_dict)
# for sent in sents:
# if(len(sent) > max_sent_len or len(sent) <= 1):
# continue
# ns_x = []
# ns_y = []
# for word,tag in sent:
# tag = tag.lower()
# if(tag not in mapping):
# ##Add unk tags to dict
# mapping[tag] = "noun"
# c_t = mapping[tag]
# if(word not in self.word_dict):
# self.word_dict[word] = nr_types
# c_word = nr_types
# self.int_to_word.append(word)
# nr_types += 1
# else:
# c_word = self.word_dict[word]
# if(c_t not in self.tag_dict):
# self.tag_dict[c_t] = nr_tag
# c_pos_c = nr_tag
# self.int_to_tag.append(c_t)
# nr_tag += 1
# else:
# c_pos_c = self.tag_dict[c_t]
# ns_x.append(c_word)
# ns_y.append(c_pos_c)
# seq_list.add_sequence(ns_x,ns_y)
# return seq_list
# Dumps a corpus into a file
def save_corpus(self, dir):
if not os.path.isdir(dir + "/"):
os.mkdir(dir + "/")
word_fn = codecs.open(dir + "word.dic", "w", "utf-8")
for word_id, word in enumerate(self.int_to_word):
word_fn.write("%i\t%s\n" % (word_id, word))
word_fn.close()
tag_fn = open(dir + "tag.dic", "w")
for tag_id, tag in enumerate(self.int_to_tag):
tag_fn.write("%i\t%s\n" % (tag_id, tag))
tag_fn.close()
word_count_fn = open(dir + "word.count", "w")
for word_id, counts in self.word_counts.iteritems():
word_count_fn.write("%i\t%s\n" % (word_id, counts))
word_count_fn.close()
self.sequence_list.save(dir + "sequence_list")
# Loads a corpus from a file
def load_corpus(self, dir):
word_fn = codecs.open(dir + "word.dic", "r", "utf-8")
for line in word_fn:
word_nr, word = line.strip().split("\t")
self.int_to_word.append(word)
self.word_dict[word] = int(word_nr)
word_fn.close()
tag_fn = open(dir + "tag.dic", "r")
for line in tag_fn:
tag_nr, tag = line.strip().split("\t")
if tag not in self.tag_dict:
self.int_to_tag.append(tag)
self.tag_dict[tag] = int(tag_nr)
tag_fn.close()
word_count_fn = open(dir + "word.count", "r")
for line in word_count_fn:
word_nr, word_count = line.strip().split("\t")
self.word_counts[int(word_nr)] = int(word_count)
word_count_fn.close()
self.sequence_list.load(dir + "sequence_list")
|
StarcoderdataPython
|
1690007
|
from django.contrib import admin
from .models import librarian
admin.site.register(librarian)
|
StarcoderdataPython
|
8059926
|
<filename>tests/examples/testcases.py
#!/usr/bin/env python3
import argparse
import unittest
import itertools
import json
import subprocess
import os
import sys
import shutil
import gzip
import aug_out_filter as afilter
import aug_comparator as comp
# This script executes AUGUSTUS test cases based on the examples
# folder and compares the current results with reference results
# if the option --compare is set. It is expected that both results
# are identical for a successful test.
# This script must be called from "tests/examples_test"!
# Python version 3.6 or higher is required for execution.
parser = argparse.ArgumentParser(description='Execute Augustus test cases.')
parser.add_argument('--mysql',
action='store_true',
help='cgp test cases are also executed with a MySQL database.')
parser.add_argument('--compare',
action='store_true',
help='Compare generated results with reference results.')
parser.add_argument('--html',
action='store_true',
help='Save diff results in html file.')
parser.add_argument('--clean',
action='store_true',
help='Remove all files created during the tests. If this option is set, no tests are executed.')
args = parser.parse_args()
# only import mysql connector if testcases using mysql should be executed
# MySQL Connector must be installed in this case
if args.mysql:
import mysql.connector
resultdir = 'results/'
refdir = 'expected_results/'
htmldir = 'output_html/'
tmpdir = 'data/tmp/'
exampledir = '../../examples/'
bindir = '../../bin/'
augustusbin = f'{bindir}augustus'
datadir = exampledir + 'chr2L/'
default_wd = os.getcwd()
def create_initial_resultdir():
clean(False)
os.mkdir(resultdir)
def clean(withtmpdir=True):
print('Removing generated test files...')
if os.path.exists(htmldir):
shutil.rmtree(htmldir)
if os.path.exists(resultdir):
shutil.rmtree(resultdir)
if withtmpdir and os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def check_working_dir(clean):
wd = os.getcwd()
if not (wd.endswith('tests/examples')):
errstr = 'Wrong working directory!' + '\n'
errstr += 'This script must be called from "tests/examples"!'
sys.exit(errstr)
if not clean and not (os.path.exists(augustusbin)):
errstr = 'Missing augustus binaries!' + '\n'
errstr += f'The augustus binaries must be accessible in this path: "{bindir}"!'
sys.exit(errstr)
class TestAugustus(unittest.TestCase):
dbname = None
dbhost = None
dbuser = None
dbpasswd = None
cpuno = 2
opt_compare = False
opt_html = False
opt_mysql = False
@classmethod
def read_config(cls):
with open('testconfig.json', 'r') as file:
config = json.load(file)
cls.dbname = config['dbname']
cls.dbhost = config['dbhost']
cls.dbuser = config['dbuser']
cls.dbpasswd = config['dbpasswd']
cls.cpuno = int(config['cpuno'])
@classmethod
def init_test_data(cls):
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
inputfile = os.path.join(tmpdir, 'chr2L.sm.fa.gz')
testfile = os.path.join(tmpdir, 'chr2L.sm.fa')
shutil.copyfile(os.path.join(datadir, 'chr2L.sm.fa.gz'), inputfile)
# '../../docs/tutorial2015/data/chr2L.sm.fa.gz', inputfile)
with gzip.open(inputfile, 'rb') as f_in:
with open(testfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(inputfile)
@classmethod
def init_sqlite_db(cls):
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
cmd_list = [[
f'{bindir}load2sqlitedb', '--species=hg19',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/human.fa'
],
[
f'{bindir}load2sqlitedb', '--species=mm9',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/mouse.fa'
],
[
f'{bindir}load2sqlitedb', '--species=bosTau4',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/cow.fa'
],
[
f'{bindir}load2sqlitedb', '--species=galGal3',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/chicken.fa'
],
[
f'{bindir}load2sqlitedb', '--noIdx', '--species=hg19',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/human.hints.gff'
],
[
f'{bindir}load2sqlitedb', '--noIdx', '--species=mm9',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/mouse.hints.gff'
],
[
f'{bindir}load2sqlitedb', '--makeIdx',
f'--dbaccess={tmpdir}vertebrates.db', '--clean'
]]
print('Creating SQLite database for cgp test cases...')
cls.init_db(cmd_list)
@classmethod
def init_mysql_db(cls):
cmd_list = [[
f'{bindir}load2db', '--species=hg19', '--dbaccess=' + cls.dbname +
',' + cls.dbhost + ',' + cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/human.fa'
],
[
f'{bindir}load2db', '--species=mm9',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/mouse.fa'
],
[
f'{bindir}load2db', '--species=bosTau4',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/cow.fa'
],
[
f'{bindir}load2db', '--species=galGal3',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/chicken.fa'
],
[
f'{bindir}load2db', '--species=hg19',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/human.hints.gff'
],
[
f'{bindir}load2db', '--species=mm9',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/mouse.hints.gff'
]]
print(' -' +
'Inserting data into MySQL database for testing purposes...')
cls.init_db(cmd_list)
@classmethod
def init_db(cls, cmd_list):
for cmd in cmd_list:
output = TestAugustus().process(cmd)
# print(output)
@classmethod
def cleanup(cls):
os.chdir(default_wd)
# remove generated SQLite database
if os.path.isfile(f'{tmpdir}vertebrates.db'):
os.remove(f'{tmpdir}vertebrates.db')
# remove copied/unzipped files
if os.path.isfile(f'{tmpdir}chr2L.sm.fa'):
os.remove(f'{tmpdir}chr2L.sm.fa')
@classmethod
def cleanup_mysqldb(cls):
mysqldb = mysql.connector.connect(host=cls.dbhost,
user=cls.dbuser,
passwd=cls.dbpasswd,
database=cls.dbname)
print('\n' + ' -' + 'Clean up MySQL database...')
augcursor = mysqldb.cursor()
augcursor.execute('DROP TABLE IF EXISTS genomes;')
augcursor.execute('DROP TABLE IF EXISTS speciesnames;')
augcursor.execute('DROP TABLE IF EXISTS seqnames;')
augcursor.execute('DROP TABLE IF EXISTS hints;')
augcursor.execute('DROP TABLE IF EXISTS featuretypes;')
@classmethod
def setUpClass(cls):
cls.read_config()
# check config
missing_arguments = False
if (cls.opt_mysql):
if TestAugustus.dbname is None:
print('The database name is missing!')
missing_arguments = True
if TestAugustus.dbhost is None:
print('The host name is missing!')
missing_arguments = True
if TestAugustus.dbuser is None:
print('The db user name is missing!')
missing_arguments = True
if TestAugustus.dbpasswd is None:
print('The db user passwd is missing!')
missing_arguments = True
if missing_arguments:
assert False, 'Test case using MySQL are not executed.'
cls.init_test_data()
cls.init_sqlite_db()
if (cls.opt_mysql):
cls.cleanup_mysqldb()
cls.init_mysql_db()
@classmethod
def tearDownClass(cls):
cls.cleanup()
if (cls.opt_mysql):
cls.cleanup_mysqldb()
def assertEqualFolders(self, reffolder, resfolder, html=None, outputfolder=None):
if TestAugustus.opt_compare:
if html is None:
html = self.opt_html
if outputfolder is None:
diff = comp.compare_folder(reffolder,
resfolder,
html=html)
else:
diff = comp.compare_folder(reffolder,
resfolder,
html=html,
outputfolder=outputfolder)
self.assertEqual(diff, '', diff)
def get_ref_folder(self, folder_name=None, path_to_wd=None):
if folder_name is None:
folder_name = self._testMethodName
if path_to_wd is None:
return os.path.join(refdir, folder_name)
else:
return os.path.join(path_to_wd, refdir, folder_name)
def get_res_folder(self, folder_name=None, path_to_wd=None):
if folder_name is None:
folder_name = self._testMethodName
if path_to_wd is None:
return os.path.join(resultdir, folder_name)
else:
return os.path.join(path_to_wd, resultdir, folder_name)
def process(self, cmd_list, out=subprocess.PIPE):
isFile = isinstance(out, str)
output = out
if isFile:
output = open(out, 'w')
p = subprocess.Popen(cmd_list,
stdout=output,
stderr=subprocess.PIPE,
universal_newlines=True)
rc = p.wait()
error = p.stderr.read()
p.stderr.close()
self.assertEqual(error, '', error)
self.assertEqual(rc, 0, f'Returncode not 0! Error: {error}')
if isFile:
self.assertTrue(os.path.isfile(out),
'Output file was not created as expected!')
else:
stdout = p.stdout.read()
p.stdout.close()
return stdout
return ''
def test_utr_on(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'aug_utr_on_tmp.gff')
testfile = os.path.join(resfolder, 'aug_utr_on.gff')
os.mkdir(resfolder)
self.process([
augustusbin, '--species=human', '--UTR=on', '--softmasking=0',
f'{exampledir}example.fa'
], testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_iterative_prediction(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
os.mkdir(resfolder)
species_list = ['nasonia', 'zebrafish', 'tomato']
# run augustus several times with different parameter sets
for species in species_list:
testtmpfile = os.path.join(
resfolder, 'aug.' + species + '.1-1M_tmp.gff')
self.process([
augustusbin, '--species=' + species,
f'{tmpdir}chr2L.sm.fa', '--softmasking=on',
'--predictionEnd=1000000'
], testtmpfile)
# filter output
testfile = os.path.join(resfolder, 'aug.' + species + '.1-1M.gff')
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_iterative_prediction_with_hints(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
os.mkdir(resfolder)
if not os.path.isfile('data/tmp/chr2L.sm.fa'):
TestAugustus.init_test_data()
for i in range(0, 3):
testtmpfile = os.path.join(
resfolder, f'aug.nasonia.hints.{str(i)}_tmp.gff')
self.process([
augustusbin, '--species=nasonia',
f'{tmpdir}chr2L.sm.fa', '--softmasking=on',
'--predictionStart=' + str(i * 2000000),
'--predictionEnd=' + str((i + 1) * 2000000 + 50000),
f'--hintsfile={datadir}/hints.gff',
'--extrinsicCfgFile=extrinsic.M.RM.E.W.cfg'
], testtmpfile)
# filter output
testfile = os.path.join(
resfolder, f'aug.nasonia.hints.{str(i)}.gff')
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_training_new_species(self):
self.training_new_species(False)
def test_training_new_species_crf(self):
self.training_new_species(True)
def training_new_species(self, crf):
os.chdir(default_wd)
speciesname = 'test_aug_dev_species'
# Remove test species folder.
# Just in case the deletion fails for whatever reason.
if os.path.exists('../../config/species/' + speciesname):
shutil.rmtree('../../config/species/' + speciesname)
resfolder = self.get_res_folder()
reffolder = self.get_ref_folder()
testtmpfile = os.path.join(resfolder, 'test_tmp.out')
testfile = os.path.join(resfolder, 'test.out')
os.mkdir(resfolder)
# call script to initialize new species
self.process([
'perl', '../../scripts/new_species.pl', '--species=' + speciesname,
'--AUGUSTUS_CONFIG_PATH=../../config'
])
# training
self.process([
f'{bindir}etraining', os.path.join(datadir, 'genes.gb.train'),
'--species=' + speciesname
])
# test
cmd = [
augustusbin, os.path.join(datadir, 'genes.gb.test'),
'--species=' + speciesname, '--softmasking=0',
'--AUGUSTUS_CONFIG_PATH=../../config'
]
if (crf):
cmd.append('--CRF=on')
cmd.append('--CRF_N=2')
cmd.append('--UTR=off')
self.process(cmd, testtmpfile)
# filter output file
afilter.eval(testtmpfile, testfile)
os.remove(testtmpfile)
# move new species to result folder
shutil.move('../../config/species/' + speciesname, resfolder)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_ab_initio_prediction(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'augustus_tmp.gff')
testfile = os.path.join(resfolder, 'augustus.gff')
os.mkdir(resfolder)
self.process([
augustusbin, f'{exampledir}autoAug/genome.fa', '--softmasking=1',
'--species=caenorhabditis'
], testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_format_and_error_out(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'augustus_tmp.gff3')
testfile = os.path.join(resfolder, 'augustus.gff3')
os.mkdir(resfolder)
cmd = [
augustusbin, f'{exampledir}autoAug/genome.fa',
'--species=caenorhabditis', '--gff3=on', '--softmasking=1',
'--outfile=' + testtmpfile,
'--errfile=' + resfolder + '/augustus.err'
]
self.process(cmd)
# filter output file
self.assertTrue(os.path.isfile(testtmpfile),
'Output file was not created as expected!')
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_alternatives_from_sampling(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'augustus_tmp.gff')
testfile = os.path.join(resfolder, 'augustus.gff')
os.mkdir(resfolder)
cmd = [
augustusbin, f'{exampledir}autoAug/genome.fa',
'--species=caenorhabditis', '--alternatives-from-sampling=on',
'--minexonintronprob=0.08', '--minmeanexonintronprob=0.4',
'--maxtracks=3'
]
self.process(cmd, testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_cgp(self):
reffolder = self.get_ref_folder(path_to_wd='../../tests/examples')
resfolder = self.get_res_folder(path_to_wd='../../tests/examples')
testtmpfile = os.path.join(resfolder, 'output_tmp.txt')
testfile = os.path.join(resfolder, 'output.txt')
os.chdir(os.path.join(default_wd, f'{exampledir}cgp'))
os.mkdir(resfolder)
cmd = [
augustusbin,
'--species=human',
'--speciesfilenames=genomes.tbl',
'--treefile=tree.nwk',
'--alnfile=aln.maf',
'--softmasking=0',
'--alternatives-from-evidence=0', # removes warning
'--/CompPred/outdir=' + resfolder + '/'
]
self.process(cmd, testtmpfile)
# filter output files
for file in os.listdir(resfolder):
filename = os.fsdecode(file)
if filename.endswith('.gff'):
afilter.cgp(os.path.join(resfolder, filename),
os.path.join(resfolder, filename.replace('.gff', '.filtered.gff')))
os.remove(os.path.join(resfolder, filename))
afilter.cgp_out(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder,
outputfolder=default_wd + '/output_html/')
def test_cgp_sqlite(self):
self.cgp_with_db_preparation(False, False)
def test_cgp_sqlite_hints(self):
self.cgp_with_db_preparation(True, False)
def test_cgp_mysql(self):
self.cgp_with_db_preparation(False, True)
def test_cgp_mysql_hints(self):
self.cgp_with_db_preparation(True, True)
def cgp_with_db_execution(self, resfolder, reffolder, *args):
os.mkdir(resfolder)
proc_list = []
# create groups according to the configured number of cpus
grouped_args = [iter(args)] * TestAugustus.cpuno
# parallel execution of the commands of each group
for arg_list in itertools.zip_longest(*grouped_args):
proc_list = []
for cmd, filename in filter(None, arg_list):
with open(filename, 'w') as file:
proc_list.append(
subprocess.Popen(cmd,
stdout=file,
stderr=subprocess.PIPE,
universal_newlines=True))
for p in proc_list:
p.wait()
for p in proc_list:
error = p.stderr.read()
p.stderr.close()
self.assertEqual(error, '', error)
# filter output prediction files
for subdir, dirs, files in os.walk(resfolder):
for file in files:
filename = os.fsdecode(file)
if filename.endswith('.gff'):
afilter.cgp(
subdir + '/' + filename, subdir + '/' +
filename.replace('.gff', '.filtered.gff'))
os.remove(subdir + '/' + filename)
elif filename.endswith('.out'):
afilter.cgp_out(
subdir + '/' + filename,
subdir + '/' + filename.replace('_tmp', ''))
os.remove(subdir + '/' + filename)
# compare results
self.assertEqualFolders(reffolder, resfolder,
outputfolder=default_wd + '/output_html/')
def cgp_with_db_preparation(self, hints, mysql):
os.chdir(os.path.join(default_wd, f'{exampledir}cgp'))
testname = 'test_cgp_with_db'
if mysql:
testname += '_mysql'
if hints:
testname += '_hints'
resfolder = self.get_res_folder(testname, '../../tests/examples')
reffolder = self.get_ref_folder(testname, '../../tests/examples')
cmd = [
augustusbin,
'--species=human',
'--speciesfilenames=genomes.tbl',
'--treefile=tree.nwk',
'--alnfile=aln.maf',
'--softmasking=0',
'--alternatives-from-evidence=0', # removes warning
'--/CompPred/outdir=' + resfolder + '/pred'
]
if mysql:
cmd.append('--dbaccess=' + TestAugustus.dbname + ',' +
TestAugustus.dbhost + ',' + TestAugustus.dbuser + ',' +
TestAugustus.dbpasswd)
else:
cmd.append(
'--dbaccess=../../tests/examples/data/tmp/vertebrates.db')
if hints:
cmd.append('--dbhints=true')
cmd.append('--extrinsicCfgFile=cgp.extrinsic.cfg')
args = [[cmd, resfolder + '/aug_tmp.out']]
self.cgp_with_db_execution(resfolder, reffolder, *args)
def test_cgp_denovo_tutorial(self):
os.chdir(default_wd)
os.chdir('../../docs/tutorial-cgp/results/mafs')
resfolder = self.get_res_folder('test_cgp_with_db')
reffolder = self.get_ref_folder('test_cgp_with_db')
args = []
# create command list for all alignment files
for idx, alin in enumerate(os.listdir(os.curdir), 1):
args.append([
[
'../../../' + augustusbin,
'--species=human',
'--softmasking=1',
'--speciesfilenames=../../../../examples_test/data/cgp_genomes.tbl',
'--treefile=../../data/tree.nwk',
'--alnfile=' + alin.__str__(),
'--alternatives-from-evidence=0', # removes warning
'--dbaccess=../vertebrates.db',
'--/CompPred/outdir=' + resfolder + '/pred' + str(idx)
],
resfolder + '/aug-' + str(idx) + '_tmp.out'
])
self.cgp_with_db_execution(resfolder, reffolder, *args)
def test_cgp_rna_hint_tutorial(self):
os.chdir(default_wd)
os.chdir('../../docs/tutorial-cgp/results/mafs')
reffolder = self.get_ref_folder(path_to_wd='../../../../tests/examples')
resfolder = self.get_res_folder(path_to_wd='../../../../tests/examples')
args = []
# create command list for all alignment files
for idx, alin in enumerate(os.listdir(os.curdir), 1):
args.append([
[
'../../../' + augustusbin,
'--species=human',
'--softmasking=1',
'--speciesfilenames=../../../../tests/examples_test/data/cgp_genomes.tbl',
'--treefile=../../data/tree.nwk',
'--alnfile=' + alin.__str__(),
'--alternatives-from-evidence=0', # removes warning
'--dbaccess=../vertebrates.db',
'--dbhints=1',
'--UTR=1',
'--allow_hinted_splicesites=atac',
'--extrinsicCfgFile=../extrinsic-rnaseq.cfg',
'--/CompPred/outdir=' + resfolder + '/pred' + str(idx)
],
resfolder + '/aug-' + str(idx) + '_tmp.out'
])
self.cgp_with_db_execution(resfolder, reffolder, *args)
def test_hints_MPE(self):
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'aug_hints_MPE_tmp.gff')
testfile = os.path.join(resfolder, 'aug_hints_MPE.gff')
os.chdir(default_wd)
os.mkdir(resfolder)
self.process([
augustusbin, '--species=human', f'--hintsfile={exampledir}hints.gff',
'--extrinsicCfgFile=../../config/extrinsic/extrinsic.MPE.cfg',
f'{exampledir}example.fa'
], testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def default_test_suite():
suite = unittest.TestSuite()
suite.addTest(TestAugustus('test_utr_on'))
suite.addTest(TestAugustus('test_hints_MPE'))
suite.addTest(TestAugustus('test_iterative_prediction'))
suite.addTest(TestAugustus('test_iterative_prediction_with_hints'))
suite.addTest(TestAugustus('test_training_new_species'))
suite.addTest(TestAugustus('test_training_new_species_crf'))
suite.addTest(TestAugustus('test_ab_initio_prediction'))
suite.addTest(TestAugustus('test_format_and_error_out'))
suite.addTest(TestAugustus('test_alternatives_from_sampling'))
suite.addTest(TestAugustus('test_cgp'))
suite.addTest(TestAugustus('test_cgp_sqlite'))
suite.addTest(TestAugustus('test_cgp_sqlite_hints'))
return suite
def small_test_suite():
suite = unittest.TestSuite()
suite.addTest(TestAugustus('test_utr_on'))
suite.addTest(TestAugustus('test_hints_MPE'))
suite.addTest(TestAugustus('test_training_new_species'))
suite.addTest(TestAugustus('test_ab_initio_prediction'))
suite.addTest(TestAugustus('test_format_and_error_out'))
# suite.addTest(TestAugustus('test_alternatives_from_sampling'))
suite.addTest(TestAugustus('test_cgp'))
suite.addTest(TestAugustus('test_cgp_sqlite'))
suite.addTest(TestAugustus('test_cgp_sqlite_hints'))
return suite
def mysql_test_suite():
suite = unittest.TestSuite()
suite.addTest(TestAugustus('test_cgp_mysql'))
suite.addTest(TestAugustus('test_cgp_mysql_hints'))
return suite
def print_tc_header(tc_name):
print(
'----------------------------------------------------------------------'
)
print('Executing ' + tc_name)
print(
'----------------------------------------------------------------------'
)
if __name__ == '__main__':
check_working_dir(args.clean)
default_wd = os.getcwd()
# Remove only generated test files and do not execute test
# cases if option --clean is set.
if args.clean:
clean()
sys.exit()
create_initial_resultdir()
TestAugustus.opt_compare = args.compare
TestAugustus.opt_html = args.html
TestAugustus.opt_mysql = args.mysql
runner = unittest.TextTestRunner(verbosity=2)
#print_tc_header('default test suite')
#result = runner.run(default_test_suite())
print_tc_header('small test suite')
result = runner.run(small_test_suite())
mysql_was_successful = True
if args.mysql:
os.chdir(default_wd)
print_tc_header('MySQL test suite')
result_mysql = runner.run(mysql_test_suite())
mysql_was_successful = result_mysql.wasSuccessful()
if result.wasSuccessful() and mysql_was_successful:
sys.exit()
else:
sys.exit(1)
|
StarcoderdataPython
|
8118051
|
<filename>main.py
import os
import time
import datetime as dt
import schedule
from threading import Timer
from weather import Weather, Unit
weather = Weather(unit=Unit.CELSIUS)
lookup = weather.lookup(2487365)
condition = lookup.condition
needToWaterEarly = True
needToWaterMore = True
triggerConditions = ["tropical storm", "showers", "hail", "sleet", "mixed rain and hail", "scattered showers", "thundershowers"]
def updateWeather():
global needToWaterEarly
global needToWaterMore
global lookup
global condition
lookup = weather.lookup(2487365)
condition = lookup.condition
lowerCaseCondition = condition.text.lower()
if triggerConditions.count(lowerCaseCondition) > 0 and dt.datetime.today().hour == 7:
needToWaterEarly = False
else:
needToWaterEarly = True
if triggerConditions.count(lowerCaseCondition) > 0 and dt.datetime.today().hour == 18:
needToWaterMore = False
else:
needToWaterMore = True
def startWatering(stack, relayNum):
if needToWaterEarly and dt.datetime.today().hour == 7:
os.system("megaio " + str(stack) + " rwrite " + str(relayNum) + " on")
print("NOW WATERING THE PLANTS AT 7:00 AM")
needToStop = Timer(150.0, stopWatering, [stack, relayNum])
needToStop.start()
else:
print("SKIPPING EARLY WATER CYCLE: " + condition.text)
if needToWaterMore and dt.datetime.today().hour == 18:
os.system("megaio " + str(stack) + " rwrite " + str(relayNum) + " on")
print("NOW WATERING THE PLANTS AT 6:30 PM")
needToStop = Timer(150.0, stopWatering, [stack, relayNum])
needToStop.start()
else:
print("SKIPPING LATE WATER CYCLE: " + condition.text)
def stopWatering(stack, relayNum):
os.system("megaio " + str(stack) + " rwrite " + str(relayNum) + " off")
print("FINISHED WATERING THE PLANTS")
schedule.every().day.at("7:10").do(startWatering, 0, 1)
schedule.every().day.at("18:30").do(startWatering, 0, 1)
schedule.every().hour.do(updateWeather)
while True:
schedule.run_pending()
time.sleep(1)
|
StarcoderdataPython
|
9778132
|
# TODO(matt): Reformat script.
# flake8: noqa
"""
Big Data Training
=================
"""
###############################################################################
# train
###############################################################################
import argparse
import os
import sys
import time
from typing import Tuple
import boto3
import dask
import dask.dataframe as dd
import mlflow
import pandas as pd
import ray
import torch
import torch.nn as nn
import torch.optim as optim
from dask_ml.compose import ColumnTransformer
from dask_ml.model_selection import train_test_split
from dask_ml.preprocessing import OneHotEncoder
from dask_ml.preprocessing import StandardScaler
from ray import train
from ray.train import Trainer
from ray.train import TrainingCallback
from ray.train.callbacks import TBXLoggerCallback
from ray.util.dask import ray_dask_get
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
# TODO(amogkam): Upstream this into Ray Train.
class MLflowCallback(TrainingCallback):
def __init__(self, config):
self.config = config
def handle_result(self, results, **info):
# For each result that's being reported by ``train.report()``,
# we get the result from the rank 0 worker (i.e. first worker) and
# report it to MLflow.
rank_zero_results = results[0]
mlflow.log_metrics(rank_zero_results)
# TODO: fix type hint for logdir
def start_training(self, logdir, **info):
mlflow.start_run(run_name=str(logdir.name))
mlflow.log_params(config)
# TODO: Update TrainCallback to provide logdir in finish_training.
self.logdir = logdir
def finish_training(self, error: bool = False, **info):
# Save the Trainer checkpoints as artifacts to mlflow.
mlflow.log_artifacts(self.logdir)
def read_dask_dataframe(path: str) -> "dask.DataFrame":
print(f"reading data from {path}")
return ray.data.read_parquet(path).random_shuffle().to_dask().reset_index()
class DataPreprocessor:
def __init__(self):
self.column_transformer = None
self.scaler = None
def preprocess_train_data(self, df: "dask.DataFrame"
) -> Tuple["dask.DataFrame", "dask.DataFrame"]:
return self._preprocess(df, False)
def preprocess_inference_data(
self,
df: "dask.DataFrame") -> Tuple["dask.DataFrame", "dask.DataFrame"]:
return self._preprocess(df, True)[0]
def _preprocess(self, df: "dask.DataFrame", inferencing: bool
) -> Tuple["dask.DataFrame", "dask.DataFrame"]:
df = df.loc[:, df.columns != "index"]
# remove nulls and/or NaNs scalably with dask
print(f"step1: drop nulls from rows")
df = df.dropna(subset=["nullable_feature"])
print(f"step2: creating new_col and updatingfeature_1")
df["new_col"] = (
df["feature_1"] - 2 * df["feature_2"] + df["feature_3"]) / 3.
df["feature_1"] = 2. * df["feature_1"] + 0.1
# TODO: this doesn't work with more than 1 parquet file
# df['mean_by_fruit'] = df.groupby('fruit')['feature_1'].transform('mean')
print(f"step3: one-hot encoding fruit")
df = df.astype({"fruit": "category"})
df = df.categorize()
df.persist()
if inferencing:
assert self.column_transformer is not None
df_fruits = self.column_transformer.transform(df)
else:
assert self.column_transformer is None
self.column_transformer = ColumnTransformer(
[("one-hot", OneHotEncoder(sparse=False), ["fruit"])])
df_fruits = self.column_transformer.fit_transform(df)
df_data = df.loc[:, (df.columns != "label") & (df.columns != "fruit")]
df_data = dd.concat([df_data, df_fruits], axis=1)
assert df_data.isnull().sum().sum().compute(
) == 0, "There are nulls or Nans in the data!"
if inferencing:
print(f"step4: standardrize inference dataset")
assert self.scaler is not None
df_data_inference = self.scaler.transform(df_data)
return df_data_inference, None
else:
print(f"step4: standardrize train dataset")
df_labels = df.loc[:, df.columns == "label"]
df_data_train, df_data_test, df_label_train, df_label_test = train_test_split(
df_data, df_labels)
df_data_train.persist()
assert self.scaler is None
self.scaler = StandardScaler(
) # this just turns col values to z-scores
df_data_train = self.scaler.fit_transform(df_data_train)
df_data_test = self.scaler.transform(df_data_test)
df_train = dd.concat([df_data_train, df_label_train], axis=1)
df_test = dd.concat([df_data_test, df_label_test], axis=1)
return df_train, df_test
def inference(dataset, model_cls: type, batch_size: int, result_path: str,
use_gpu: bool):
print("inferencing...")
num_gpus = 1 if use_gpu else 0
dataset.map_batches(model_cls, compute="actors", batch_size=batch_size, num_gpus=num_gpus). \
write_parquet(result_path)
"""
TODO: Define neural network code in pytorch
P0:
1) can take arguments to change size of net arbitrarily so we can stress test against distributed training on cluster
2) has a network (nn.module?), optimizer, and loss function for binary classification
3) has some semblence of regularization (ie: via dropout) so that this artificially gigantic net doesn't just overfit horrendously
4) works well with pytorch dataset we'll create from Ray data .to_torch_dataset()
P1:
1) also tracks AUC for training, testing sets and records to tensorboard to
"""
class Net(nn.Module):
def __init__(self, n_layers, n_features, num_hidden, dropout_every,
drop_prob):
super().__init__()
self.n_layers = n_layers
self.dropout_every = dropout_every
self.drop_prob = drop_prob
self.fc_input = nn.Linear(n_features, num_hidden)
self.relu_input = nn.ReLU()
for i in range(self.n_layers):
layer = nn.Linear(num_hidden, num_hidden)
relu = nn.ReLU()
dropout = nn.Dropout(p=self.drop_prob)
setattr(self, f"fc_{i}", layer)
setattr(self, f"relu_{i}", relu)
if i % self.dropout_every == 0:
# only apply every few layers
setattr(self, f"drop_{i}", dropout)
self.add_module(f"drop_{i}", dropout)
self.add_module(f"fc_{i}", layer)
self.add_module(f"relu_{i}", relu)
# self.register_parameter(name=f"fc_{i}", param=getattr(self, f"fc_{i}"))
# self.register_parameter(name=f"relu_{i}", param=getattr(self, f"relu_{i}"))
self.fc_output = nn.Linear(num_hidden, 1)
def forward(self, x):
x = self.fc_input(x)
x = self.relu_input(x)
for i in range(self.n_layers):
x = getattr(self, f"fc_{i}")(x)
x = getattr(self, f"relu_{i}")(x)
if i % self.dropout_every == 0:
x = getattr(self, f"drop_{i}")(x)
x = self.fc_output(x)
return x
"""
TODO: training loop for NN
P0 Requirements:
1) should iterate through epochs, inner loop per batch
2) should keep running total of accuracy, loss (training & test) and record those to tensorboard
3) should perform windowing / shuffling per epoch
P1:
1) use Ray Tune for tuning / checkpointing
"""
def train_epoch(dataset, model, device, criterion, optimizer):
num_correct = 0
num_total = 0
running_loss = 0.0
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
loss.backward()
optimizer.step()
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"training batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def test_epoch(dataset, model, device, criterion):
num_correct = 0
num_total = 0
running_loss = 0.0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"testing batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def train_func(config):
is_distributed = config.get("is_distributed", False)
use_gpu = config["use_gpu"]
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
print("Defining model, loss, and optimizer...")
# Setup device.
if is_distributed:
device = torch.device(f"cuda:{train.local_rank()}" if use_gpu
and torch.cuda.is_available() else "cpu")
else:
device = torch.device("cuda:0" if use_gpu
and torch.cuda.is_available() else "cpu")
print(f"Device: {device}")
# Setup data.
if is_distributed:
train_dataset_pipeline = train.get_dataset_shard("train_dataset")
train_dataset_epoch_iterator = train_dataset_pipeline.iter_epochs()
test_dataset = train.get_dataset_shard("test_dataset")
else:
train_dataset_epoch_iterator = config["train_dataset"].iter_epochs()
test_dataset = config["test_dataset"]
test_torch_dataset = test_dataset.to_torch(
label_column="label", batch_size=batch_size)
# Setup Tensorboard and MLflow.
if is_distributed:
# Setup is done through Callback.
pass
else:
writer = SummaryWriter()
mlflow.start_run()
mlflow_config = config.copy()
mlflow_config.pop("test_dataset")
mlflow_config.pop("train_dataset")
mlflow.log_params(mlflow_config)
net = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob,
).to(device)
print(net.parameters)
if is_distributed:
net = DistributedDataParallel(net)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters(), weight_decay=0.0001)
print("Starting training...")
for epoch in range(num_epochs):
train_dataset = next(train_dataset_epoch_iterator)
train_torch_dataset = train_dataset.to_torch(
label_column="label", batch_size=batch_size)
train_running_loss, train_num_correct, train_num_total = train_epoch(
train_torch_dataset, net, device, criterion, optimizer)
train_acc = train_num_correct / train_num_total
print(
f"epoch [{epoch + 1}]: training accuracy: {train_num_correct} / {train_num_total} = {train_acc:.4f}"
)
test_running_loss, test_num_correct, test_num_total = test_epoch(
test_torch_dataset, net, device, criterion)
test_acc = test_num_correct / test_num_total
print(
f"epoch [{epoch + 1}]: testing accuracy: {test_num_correct} / {test_num_total} = {test_acc:.4f}"
)
# Record and log stats.
if is_distributed:
train.report(
train_acc=train_acc,
train_loss=train_running_loss,
test_acc=test_acc,
test_loss=test_running_loss)
else:
writer.add_scalar("Accuracy/train", train_acc, epoch)
writer.add_scalar("Loss/train", train_running_loss, epoch)
writer.add_scalar("Accuracy/test", test_acc, epoch)
writer.add_scalar("Loss/test", test_running_loss, epoch)
writer.flush()
mlflow.log_metrics({
"train_acc": train_acc,
"train_loss": train_running_loss,
"test_acc": test_acc,
"test_loss": test_running_loss
})
# Checkpoint model.
if is_distributed:
train.save_checkpoint(model_state_dict=net.module.state_dict())
else:
torch.save(net.state_dict(), f"models/model-epoch-{epoch}.torch")
# Shutdown Tensorboard and MLflow.
if is_distributed:
pass
else:
writer.close()
# mlflow.end_run()
if is_distributed:
if train.world_rank() == 0:
return net.module
else:
return None
else:
return net
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir-path",
default=".",
type=str,
help="Path to read and write data from")
parser.add_argument(
"--use-s3",
action="store_true",
default=False,
help="Use data from s3 for testing.")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
parser.add_argument(
"--address",
required=False,
type=str,
help="The address to use for Ray. `auto` if running through `ray submit"
)
parser.add_argument(
"--num-workers",
default=1,
type=int,
help="If > 1, number of Ray workers to use for distributed training")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Use GPU for training.")
parser.add_argument(
"--mlflow-register-model",
action="store_true",
help="Whether to use mlflow model registry. If set, a local MLflow "
"tracking server is expected to have already been started.")
args = parser.parse_args()
smoke_test = args.smoke_test
address = args.address
num_workers = args.num_workers
use_gpu = args.use_gpu
use_s3 = args.use_s3
dir_path = args.dir_path
start_time = time.time()
ray.init(address=address)
dask.config.set(scheduler=ray_dask_get)
###############################################################################
# make_and_upload_dataset
###############################################################################
import random
import os
import shutil
import pandas as pd
import sklearn.datasets
NUM_EXAMPLES = 2_000_000
NUM_FEATURES = 20
PARQUET_FILE_CHUNK_SIZE = 50_000
NUM_FILES = NUM_EXAMPLES // PARQUET_FILE_CHUNK_SIZE
def create_data_chunk(n, d, seed, include_label=False):
X, y = sklearn.datasets.make_classification(
n_samples=n,
n_features=d,
n_informative=10,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=3,
weights=None,
flip_y=0.03,
class_sep=0.8,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=False,
random_state=seed)
# turn into dataframe with column names
col_names = ['feature_%0d' % i for i in range(1, d + 1, 1)]
df = pd.DataFrame(X)
df.columns = col_names
# add some bogus categorical data columns
options = ['apple', 'banana', 'orange']
df['fruit'] = df.feature_1.map(
lambda x: random.choice(options)
) # bogus, but nice to test categoricals
# add some nullable columns
options = [None, 1, 2]
df['nullable_feature'] = df.feature_1.map(
lambda x: random.choice(options)
) # bogus, but nice to test categoricals
# add label column
if include_label:
df['label'] = y
return df
# create data files
print("Creating synthetic dataset...")
data_path = os.path.join(dir_path, "data")
os.makedirs(data_path, exist_ok=True)
for i in range(NUM_FILES):
path = os.path.join(data_path, f"data_{i:05d}.parquet.snappy")
if not os.path.exists(path):
tmp_df = create_data_chunk(
n=PARQUET_FILE_CHUNK_SIZE,
d=NUM_FEATURES,
seed=i,
include_label=True)
tmp_df.to_parquet(path, compression="snappy", index=False)
print(f"Wrote {path} to disk...")
# todo: at large enough scale we might want to upload the rest after first N files rather than write to disk
# to simulate a user with local copy of subset of data
print("Creating synthetic inference dataset...")
inference_path = os.path.join(dir_path, "inference")
os.makedirs(inference_path, exist_ok=True)
for i in range(NUM_FILES):
path = os.path.join(inference_path, f"data_{i:05d}.parquet.snappy")
if not os.path.exists(path):
tmp_df = create_data_chunk(
n=PARQUET_FILE_CHUNK_SIZE,
d=NUM_FEATURES,
seed=i,
include_label=False)
tmp_df.to_parquet(path, compression="snappy", index=False)
print(f"Wrote {path} to disk...")
# todo: at large enough scale we might want to upload the rest after first N files rather than write to disk
# to simulate a user with local copy of subset of data
# os.system("aws s3 sync ./data s3://cuj-big-data/data")
# os.system("aws s3 sync ./inference s3://cuj-big-data/inference")
###############################################################################
# train
###############################################################################
# Setup MLflow.
# By default, all metrics & artifacts for each run will be saved to disk
# in ./mlruns directory. Uncomment the below lines if you want to change
# the URI for the tracking uri.
# TODO: Use S3 backed tracking server for golden notebook.
if args.mlflow_register_model:
# MLflow model registry does not work with a local file system backend.
# Have to start a mlflow tracking server on localhost
mlflow.set_tracking_uri("http://127.0.0.1:5000")
# Set the experiment. This will create the experiment if not already
# exists.
mlflow.set_experiment("cuj-big-data-training")
if use_s3:
# Check if s3 data is populated.
BUCKET_NAME = 'cuj-big-data'
FOLDER_NAME = 'data/'
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(BUCKET_NAME)
count = bucket.objects.filter(Prefix=FOLDER_NAME)
if len(list(count)) == 0:
print("please run `python make_and_upload_dataset.py` first")
sys.exit(1)
data_path = "s3://cuj-big-data/data/"
inference_path = "s3://cuj-big-data/inference/"
inference_output_path = "s3://cuj-big-data/output/"
else:
data_path = os.path.join(dir_path, "data")
inference_path = os.path.join(dir_path, "inference")
inference_output_path = "/tmp"
if len(os.listdir(data_path)) <= 1 or len(
os.listdir(inference_path)) <= 1:
print("please run `python make_and_upload_dataset.py` first")
sys.exit(1)
if smoke_test:
# Only read a single file.
data_path = os.path.join(data_path, "data_00000.parquet.snappy")
inference_path = os.path.join(inference_path,
"data_00000.parquet.snappy")
preprocessor = DataPreprocessor()
train_df, test_df = preprocessor.preprocess_train_data(
read_dask_dataframe(data_path))
num_columns = len(train_df.columns)
num_features = num_columns - 1 # remove label column
train_dataset = ray.data.from_dask(train_df)
test_dataset = ray.data.from_dask(test_df)
NUM_EPOCHS = 5
BATCH_SIZE = 64
NUM_HIDDEN = 50 # 200
NUM_LAYERS = 3 # 15
DROPOUT_EVERY = 5
DROPOUT_PROB = 0.2
# Random global shuffle
train_dataset_pipeline = train_dataset.repeat().random_shuffle_each_window(
)
datasets = {
"train_dataset": train_dataset_pipeline,
"test_dataset": test_dataset
}
if num_workers <= 1:
config = {
"use_gpu": use_gpu,
"num_epochs": NUM_EPOCHS,
"batch_size": BATCH_SIZE,
"num_hidden": NUM_HIDDEN,
"num_layers": NUM_LAYERS,
"dropout_every": DROPOUT_EVERY,
"dropout_prob": DROPOUT_PROB,
"num_features": num_features,
}
config.update(datasets)
model = train_func(config=config)
else:
config = {
"is_distributed": True,
"use_gpu": use_gpu,
"num_epochs": NUM_EPOCHS,
"batch_size": BATCH_SIZE,
"num_hidden": NUM_HIDDEN,
"num_layers": NUM_LAYERS,
"dropout_every": DROPOUT_EVERY,
"dropout_prob": DROPOUT_PROB,
"num_features": num_features
}
# Create 2 callbacks: one for Tensorboard Logging and one for MLflow
# logging. Pass these into Trainer, and all results that are
# reported by ``train.report()`` will be logged to these 2 places.
# TODO: TBXLoggerCallback should create nonexistent logdir
# and should also create 1 directory per file.
tbx_logdir = "./runs"
os.makedirs("./runs", exist_ok=True)
callbacks = [
TBXLoggerCallback(logdir=tbx_logdir),
MLflowCallback(config)
]
trainer = Trainer(
backend="torch", num_workers=num_workers, use_gpu=use_gpu)
trainer.start()
results = trainer.run(
train_func=train_func,
config=config,
callbacks=callbacks,
dataset=datasets)
model = results[0]
trainer.shutdown()
if args.mlflow_register_model:
mlflow.pytorch.log_model(
model, artifact_path="models", registered_model_name="torch_model")
# Get the latest model from mlflow model registry.
client = mlflow.tracking.MlflowClient()
registered_model_name = "torch_model"
# Get the info for the latest model.
# By default, registered models are in stage "None".
latest_model_info = client.get_latest_versions(
registered_model_name, stages=["None"])[0]
latest_version = latest_model_info.version
def load_model_func():
model_uri = f"models:/torch_model/{latest_version}"
return mlflow.pytorch.load_model(model_uri)
else:
state_dict = model.state_dict()
def load_model_func():
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
model = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob)
model.load_state_dict(state_dict)
return model
class BatchInferModel:
def __init__(self, load_model_func):
self.device = torch.device("cuda:0"
if torch.cuda.is_available() else "cpu")
self.model = load_model_func().to(self.device)
def __call__(self, batch) -> "pd.DataFrame":
tensor = torch.FloatTensor(batch.to_pandas().values).to(
self.device)
return pd.DataFrame(self.model(tensor).cpu().detach().numpy())
inference_df = preprocessor.preprocess_inference_data(
read_dask_dataframe(inference_path))
inference_dataset = ray.data.from_dask(inference_df)
inference(inference_dataset, BatchInferModel(load_model_func), 100,
inference_output_path, use_gpu)
end_time = time.time()
total_time = end_time - start_time
print(f"Job finished in {total_time} seconds.")
|
StarcoderdataPython
|
4851379
|
from typing import Callable
class Solution:
def rotate(self, matrix: list[list[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
new_matrix = [[0 for _ in row] for row in matrix]
n = len(matrix)
for i, row in enumerate(matrix):
for j, _ in enumerate(row):
new_matrix[i][j] = matrix[n-1 - j][i]
for i, row in enumerate(new_matrix):
for j, cell in enumerate(row):
matrix[i][j] = cell
tests = [
(
([[1, 2, 3], [4, 5, 6], [7, 8, 9]],),
[[7, 4, 1], [8, 5, 2], [9, 6, 3]],
),
(
([[5, 1, 9, 11], [2, 4, 8, 10], [13, 3, 6, 7], [15, 14, 12, 16]],),
[[15, 13, 2, 5], [14, 3, 4, 1], [12, 6, 8, 9], [16, 7, 10, 11]],
),
(
([[1]],),
[[1]],
),
(
([[1, 2], [3, 4]],),
[[3, 1], [4, 2]],
),
]
def validator(
rotate: Callable[[list[list[int]]], None],
inputs: tuple[list[list[int]]],
expected: list[list[int]]
) -> None:
matrix, = inputs
rotate(matrix)
assert matrix == expected, (matrix, expected)
|
StarcoderdataPython
|
5034669
|
from .kanji_svg import KanjiSvg
from cached_property import cached_property
from requests_html import HTMLSession
import backoff
BASE_URL = "https://jisho.org/search/{}%20%23kanji"
MAX_TRIES = 4
SVG_SELECTOR = ".stroke_order_diagram--outer_container svg"
class ContentNotFound(Exception):
""" Represents an error when the content is not found at all """
class ContentNotReady(Exception):
""" Represents an early load of a page before the content is ready """
class KanjiStrokeScraper:
""" Helper class to scrape the Kanji Strokes from Jsiho.org """
def scrape(self, kanji, start, end):
""" Scrape the given Kanji """
url = BASE_URL.format(kanji)
page = self.html_session.get(url)
try:
svg = self.extract_svg(page.html, start, end)
except ContentNotFound:
print('No SVG found for {}'.format(kanji))
except ContentNotReady:
print('SVG not found in page')
return svg
@cached_property
def html_session(self):
""" Return the Html Session for this Scraper """
return HTMLSession()
def extract_svg(self, pageHtml, start, end):
""" Extract the SVG from the contents """
svg = self.load_element(pageHtml)
lxmlElement = svg.lxml[0] # the lxml element actually has html as the root element rather than the svg, so grab the first child
return KanjiSvg(lxmlElement, start=start, end=end)
@backoff.on_exception(backoff.expo, ContentNotReady, max_tries=MAX_TRIES)
def load_element(self, pageHtml):
""" Extract the SVG from the contents """
pageHtml.render()
svg = pageHtml.find(SVG_SELECTOR, first=True)
if not svg:
raise ContentNotFound()
if 'display: none' in svg.attrs['style']:
raise ContentNotReady()
return svg
|
StarcoderdataPython
|
5024104
|
#-*- coding: utf-8 -*-
"""
CHOCO CLI(Command Line Interface) Handler
"""
import sys
import os
import shlex
import inspect
import imp
import psutil
import pickle
import traceback
import time
try:
import readline
except:
print >> sys.stderr, 'GNU Readline is not supported by this environment'
from datetime import datetime
from modules import modules
from .utils.number import sizeof_fmt
class ChocoCLI(object):
def __init__(self, choco):
self.choco = choco
self.first = True
# get cli methods
self.commands = dict(inspect.getmembers(self, predicate=inspect.ismethod))
del self.commands['open']
del self.commands['__init__']
def open(self):
while True:
if self.first:
self.first = False
print 'Welcome to CHOCO Shell'
print ' * Do not press CTRL + C.'
print ' If you want to exit choco, just enter "exit".'
print ' * When you need help, enter "help" or "?".'
print ''
command = raw_input('CHOCO ~# ')
try:
cmd = shlex.split(command)
if len(cmd) > 0 and len(cmd[0]) > 0:
c = cmd[0].replace('-', '_').replace('?', 'help')
if c in self.commands:
del cmd[0]
self.commands[c](*cmd)
else:
print 'choco-shell: command not found: {0}'.format(c)
except Exception, e:
traceback.print_exc()
def help(self):
print ' COMMAND LIST :'
for c in self.commands:
print ' {0}'.format(c.replace('_', '-'))
def send_all(self, message):
room_list = self.choco.cache.hgetall('choco:rooms')
for k, v in room_list.iteritems():
d = {
'command': 'ADMINMSG',
'room': pickle.loads(v),
'message': message,
}
self.choco.queue.put(d)
time.sleep(0.05)
print 'Success'
def app_status(self):
p = psutil.Process(os.getpid())
cpu_usage = p.get_cpu_percent(interval=1)
memory_info = p.get_memory_info()
rss = sizeof_fmt(memory_info.rss)
vms = sizeof_fmt(memory_info.vms)
thread_count = p.get_num_threads()
if os.name != 'nt':
fd_count = p.get_num_fds()
print "CPU Usage : {0}%".format(cpu_usage)
print "Memory : {0} (RSS), {1} (VMS)".format(rss, vms)
print "Threads : {0}".format(thread_count)
if os.name != 'nt':
print "FDs : {0}".format(fd_count)
def status(self):
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
recv_count = self.choco.cache.get('choco:count:recv')
exec_count = self.choco.cache.get('choco:count:exec')
sent_count = self.choco.cache.get('choco:count:sent')
room_count = self.choco.cache.hlen('choco:rooms')
session_count = self.choco.cache.hlen('choco:sessions')
print '[C] {0}'.format(now)
print 'RECV : %s' % recv_count
print 'EXEC : %s' % exec_count
print 'SENT : %s' % sent_count
print 'ROOMS : %s' % room_count
print 'SESSIONS : %s' % session_count
print 'BUSY : %s/%s' % (self.choco.working_count.value, self.choco.config.THREAD_COUNT)
def reload(self):
print 'RELOADING MODULES ..'
self.choco.module.rules = []
self.choco.module.functions = {}
for m in modules:
imp.reload(m)
print 'RELOAD COMPLETE !'
def exit(self):
self.choco.exit.value = True
os._exit(1)
|
StarcoderdataPython
|
5146371
|
'''given a list of integers and a number n,
find a pair that sums to the number n'''
#Using hash table - O(n) time and O(n) space
def get_pair_using_hashmap(lst, num):
num_dict = {}
for each in lst:
if (num -each) in num_dict:
return [each,num-each]
else:
num_dict[each]=None
return []
# using sorted list. O(nlogn) time and O(1) space
def get_pair_using_sorted_list(lst,num):
lst.sort()
left = 0
right=len(lst)-1
while left < right:
curr_sum = lst[left] + lst[right]
if curr_sum < num:
left+=1
elif curr_sum > num:
right-=1
else:
return [lst[left],lst[right]]
return []
if __name__ == "__main__":
lst = [3,5,-4,8,11,1,-1,6]
print(get_pair_using_sorted_list(lst,10))
|
StarcoderdataPython
|
6435201
|
import logging
from functools import wraps
from data import model
from util.http import abort
logger = logging.getLogger(__name__)
def _raise_unauthorized(repository, scopes):
raise StandardError("Unauthorized acces to %s", repository)
def _get_reponame_kwargs(*args, **kwargs):
return [kwargs["namespace"], kwargs["package_name"]]
def disallow_for_image_repository(get_reponame_method=_get_reponame_kwargs):
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
namespace_name, repo_name = get_reponame_method(*args, **kwargs)
image_repo = model.repository.get_repository(
namespace_name, repo_name, kind_filter="image"
)
if image_repo is not None:
logger.debug("Tried to invoked a CNR method on an image repository")
abort(
405,
message="Cannot push an application to an image repository with the same name",
)
return func(*args, **kwargs)
return wrapped
return wrapper
def require_repo_permission(
permission_class,
scopes=None,
allow_public=False,
raise_method=_raise_unauthorized,
get_reponame_method=_get_reponame_kwargs,
):
def wrapper(func):
@wraps(func)
@disallow_for_image_repository(get_reponame_method=get_reponame_method)
def wrapped(*args, **kwargs):
namespace_name, repo_name = get_reponame_method(*args, **kwargs)
logger.debug(
"Checking permission %s for repo: %s/%s",
permission_class,
namespace_name,
repo_name,
)
permission = permission_class(namespace_name, repo_name)
if permission.can() or (
allow_public and model.repository.repository_is_public(namespace_name, repo_name)
):
return func(*args, **kwargs)
repository = namespace_name + "/" + repo_name
raise_method(repository, scopes)
return wrapped
return wrapper
|
StarcoderdataPython
|
8016833
|
<filename>microutil/array_utils.py<gh_stars>1-10
__all__ = [
"zeros_like",
"not_xr",
"axis2int",
]
import dask.array as da
import numpy as np
import xarray as xr
import zarr
def zeros_like(arr):
"""
Smooth over the differences zeros_likes for different
array types
Parameters
----------
arr : array-like
Returns
-------
An zeroed array of the same array type as *arr*
"""
if isinstance(arr, np.ndarray):
return np.zeros_like(arr)
elif isinstance(arr, xr.DataArray):
return xr.zeros_like(arr)
elif isinstance(arr, zarr.Array):
return zarr.zeros_like(arr)
elif isinstance(arr, da.Array):
return da.zeros_like(arr)
def not_xr(arr):
"""
Make sure an array is not an xarray as that can
have major implications for indexing
"""
if isinstance(arr, xr.DataArray):
return arr.values
return arr
def axis2int(arr, axis, fallthrough=None):
"""
Get the integer index of an axis for xarray or numpy.
dims contain *axis*.
Parameters
----------
arr : ndarry or xarray
axis : int, str
The axis to find the index of
fallthrough : int or None, default: None
The value to return if the inference doesn't work.
Returns
-------
i : int
The index in dims.
"""
if isinstance(axis, int):
return axis
else:
if isinstance(arr, xr.DataArray) and isinstance(axis, str) and (axis in arr.dims):
return arr.dims.index(axis)
return fallthrough
|
StarcoderdataPython
|
9723778
|
<filename>submodular_optimization/algorithms/algorithm_driver.py<gh_stars>0
"""
This class runs an algorithm with given config
"""
import logging
import numpy as np
from copy import deepcopy
from timeit import default_timer as timer
from algorithms.distorted_greedy import DistortedGreedy
from algorithms.cost_scaled_greedy import CostScaledGreedy
from algorithms.greedy import Greedy
from algorithms.unconstrained_linear import UnconstrainedLinear
from algorithms.cost_scaled_lazy_greedy import CostScaledLazyGreedy
from algorithms.cost_scaled_partition_matroid_greedy import CostScaledPartitionMatroidGreedy
from algorithms.partition_matroid_greedy import PartitionMatroidGreedy
from algorithms.cost_scaled_partition_matroid_lazy_greedy import CostScaledPartitionMatroidLazyGreedy
from algorithms.stochastic_distorted_greedy import StochasticDistortedGreedy
from algorithms.unconstrained_distorted_greedy import UnconstrainedDistortedGreedy
from algorithms.scaled_single_threshold_greedy import ScaledSingleThresholdGreedy
from algorithms.baseline_topk import BaselineTopk
from algorithms.baseline_topk_matroid import BaselineTopkMatroid
from ordered_set import OrderedSet
class AlgorithmDriver(object):
"""
Creates experiment driver
"""
def __init__(self):
"""
Constructor
:param:
:return:
"""
self.logger = logging.getLogger("so_logger")
def create_sample(self, config, data, num_sampled_skills, rare_sample_fraction, popular_sample_fraction, rare_threshold,
popular_threshold, user_sample_ratio, seed):
"""
create the sample
"""
np.random.seed(seed=seed)
data.sample_skills_to_be_covered_controlled(num_sampled_skills, rare_sample_fraction,
popular_sample_fraction, rare_threshold,
popular_threshold, user_sample_ratio)
def create_partitions(self, data, num_of_partitions, partition_type, cardinality_constraint):
"""
create the partition matroids
"""
if partition_type == "random":
data.assign_ground_set_to_random_partitions(num_of_partitions, cardinality_constraint)
else:
data.assign_ground_set_to_equi_salary_partitions(num_of_partitions, cardinality_constraint)
def run(self, config, data, algorithm, sample_epsilon, error_epsilon, scaling_factor, num_sampled_skills,
rare_sample_fraction, popular_sample_fraction, rare_threshold, popular_threshold, user_sample_ratio, seed, k):
"""run
:param config:
:param data:
:param algorithm:
:param sample_epsilon:
:param lazy_epsilon:
:param scaling_factor:
:param num_sampled_skills:
:param rare_sample_fraction:
:param popular_sample_fraction:
:param rare_threshold:
:param popular_threshold:
:param user_sample_ratio:
:seed:
:k:
"""
data.scaling_factor = scaling_factor
data.E = sorted(list(data.E))
data.E = OrderedSet(data.E)
if algorithm == "distorted_greedy":
alg = DistortedGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "cost_scaled_greedy":
alg = CostScaledGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "greedy":
alg = Greedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "unconstrained_linear":
alg = UnconstrainedLinear(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E)
elif algorithm == "cost_scaled_lazy_greedy":
alg = CostScaledLazyGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "unconstrained_distorted_greedy":
alg = UnconstrainedDistortedGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E)
elif algorithm == "stochastic_distorted_greedy":
config['algorithms']['stochastic_distorted_greedy_config']['epsilon'] = sample_epsilon
alg = StochasticDistortedGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "scaled_single_threshold_greedy":
config['algorithms']['scaled_single_threshold_greedy_config']['epsilon'] = error_epsilon
alg = ScaledSingleThresholdGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "baseline_topk":
alg = BaselineTopk(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, k)
elif algorithm == "cost_scaled_partition_matroid_greedy":
alg = CostScaledPartitionMatroidGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, deepcopy(data.partitions))
elif algorithm == "partition_matroid_greedy":
alg = PartitionMatroidGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, deepcopy(data.partitions))
elif algorithm == "cost_scaled_partition_matroid_lazy_greedy":
alg = CostScaledPartitionMatroidLazyGreedy(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, deepcopy(data.partitions))
elif algorithm == "baseline_topk_matroid":
alg = BaselineTopkMatroid(config, data.init_submodular_func_coverage_caching, data.submodular_func_caching, data.cost_func, data.E, deepcopy(data.partitions))
else:
self.logger.info("Algorithm is not implemented")
# Run algorithm
sol = alg.run()
submodular_val = data.submodular_func(sol)
cost = data.cost_func(sol)
val = submodular_val - cost
result = {'alg': algorithm,
'sol': sol,
'val': val,
'submodular_val': submodular_val,
'cost': cost,
'runtime': None,
'error_epsilon': error_epsilon,
'sample_epsilon': sample_epsilon,
'user_sample_ratio': user_sample_ratio,
'scaling_factor': scaling_factor,
'num_rare_skills': data.num_rare_skills,
'num_common_skills': data.num_common_skills,
'num_popular_skills': data.num_popular_skills,
'num_sampled_skills': num_sampled_skills,
'seed': seed,
'k': k
}
return result
|
StarcoderdataPython
|
6683254
|
'''
Stores all unique rooms in game, every type of room got its own description and features,
this is intended to be used as the building block for the dungeon generation function which
is not yet implemented
> Must-have room attributes:
NAME, USERDESC, DESC, NORTH, SOUTH, EAST, WEST, UP, DOWN, GROUND, SHOP, ENEMIES
> Optional room attributes:
SHOPINTRO (Greetings if room is shop)
'''
# Imports package namespace which contain all constants
from dicts import *
# Returns rooms exits as a dictionary of {DIRECTION: DESTINATION, ...}
def get_room_exits(room):
exits = {}
for dir in DIRECTIONS:
if room[dir]:
exits[dir] = ROOMS[room[dir]][NAME]
else:
pass
# exits[dir] = str(None)
return exits
ROOMS = {
'town_square': {
NAME: 'Town Square',
USERDESC: 'You are in the middle of the square, with streets surrounding you from all directions',
DESC: 'There are many people around, they all seem busy',
NORTH: 'courtyard',
SOUTH: 'butchery',
EAST: 'bakery',
WEST: 'house_63',
UP: None,
DOWN: None,
GROUND: ['fountain', 'apple', 'bread', 'coin'],
SHOP: [],
ENEMIES: [],
},
'house_63': {
NAME: 'House 63 (Ground)',
USERDESC: 'You are inside an old, deserted house. You see a dark staircase leading upward',
DESC: 'This house looks like it is going to collapse',
NORTH: None,
SOUTH: None,
EAST: 'town_square',
WEST: None,
UP: 'house_63_1',
DOWN: None,
GROUND: ['coin', 'apple'],
SHOP: [],
ENEMIES: [],
},
'house_63_1': {
NAME: 'House 63 (Attic)',
USERDESC: 'You are in a dark, gloomy attic, there is a staircase leading downward',
DESC: 'Everything is untouched, covered in dust.',
NORTH: None,
SOUTH: None,
EAST: None,
WEST: None,
UP: None,
DOWN: 'house_63',
GROUND: ['dagger'],
SHOP: [],
ENEMIES: ['spider', 'spider'],
},
'bakery': {
NAME: 'Bakery',
USERDESC: 'You are looking at various kinds of delicious pastry, making your stomach growl',
DESC: 'The air smells of warm, tasty bread',
NORTH: None,
SOUTH: None,
WEST: 'town_square',
EAST: None,
UP: None,
DOWN: None,
GROUND: [],
SHOP: ['flatbread', 'bread', 'cake'],
SHOPINTRO: 'The bakery has some freshly baked pastry for sale\n# Have a look:',
ENEMIES: [],
},
'butchery': {
NAME: 'Butchery',
USERDESC: "You are at the Butchery's entrance, You observe an old man as he sharpens his knife",
DESC: 'The air smells of meat and blood, it is unclean and stinky',
NORTH: 'town_square',
SOUTH: None,
EAST: None,
WEST: None,
UP: None,
DOWN: None,
GROUND: [],
SHOP: ['beef', 'sausage'],
SHOPINTRO: 'The butcher has some cuts ready to go\n# Have a look:',
ENEMIES: [],
},
'courtyard': {
NAME: 'Courtyard',
USERDESC: 'You come to a courtyard strewn with flowers, protected by small ornate cast-iron fences.',
DESC: 'A cardinal sings, sitting atop a distant fence. You faintly make out the smell of bread, and hear voices from the south',
NORTH: None,
SOUTH: 'town_square',
EAST: None,
WEST: None,
UP: None,
DOWN: None,
GROUND: [],
SHOP: [],
ENEMIES: [],
}
}
'''
Stores all unique rooms in game, every type of room got its own description and features,
this is intended to be used as the building block for the dungeon generation function which
is not yet implemented
> Must-have room attributes:
NAME, USERDESC, DESC, NORTH, SOUTH, EAST, WEST, UP, DOWN, GROUND, SHOP, ENEMIES
> Optional room attributes:
SHOPINTRO (Greetings if room is shop)
'''
# Imports package namespace which contain all constants
from dicts import *
# Returns rooms exits as a dictionary of {DIRECTION: DESTINATION, ...}
def get_room_exits(room):
exits = {}
for dir in DIRECTIONS:
if room[dir]:
exits[dir] = ROOMS[room[dir]][NAME]
else:
pass
# exits[dir] = str(None)
return exits
ROOMS = {
'town_square': {
NAME: 'Town Square',
USERDESC: 'You are in the middle of the square, with streets surrounding you from all directions',
DESC: 'There is many people around, they all seem busy',
NORTH: None,
SOUTH: 'butchery',
EAST: 'bakery',
WEST: 'house_63',
UP: None,
DOWN: None,
GROUND: ['fountain', 'apple', 'bread', 'coin'],
SHOP: [],
ENEMIES: [],
},
'house_63': {
NAME: "House 63's Main Floor",
USERDESC: 'You are inside an old, deserted house. You see a dark staircase upwards',
DESC: 'This house looks like it is going to collapse',
NORTH: None,
SOUTH: None,
EAST: 'town_square',
WEST: None,
UP: 'house_63_1',
DOWN: None,
GROUND: ['coin', 'apple'],
SHOP: [],
ENEMIES: [],
},
'house_63_1': {
NAME: "House 63's Attic",
USERDESC: 'You are in a dark, gloomy attic, There is a staircase downwards',
DESC: 'Everything is untouched, covered in dust.',
NORTH: None,
SOUTH: None,
EAST: None,
WEST: None,
UP: None,
DOWN: 'house_63',
GROUND: ['dagger'],
SHOP: [],
ENEMIES: ['spider', 'spider'],
},
'bakery': {
NAME: 'Bakery',
USERDESC: 'You are looking at various kinds of delicious pastry, making your stomach growl',
DESC: 'The air smells of warm, tasty bread',
NORTH: None,
SOUTH: None,
WEST: 'town_square',
EAST: None,
UP: None,
DOWN: None,
GROUND: [],
SHOP: ['flatbread', 'bread', 'cake'],
SHOPINTRO: 'The bakery got some freshly baked pastry for sale\n# Have a look:',
ENEMIES: [],
},
'butchery': {
NAME: 'Butchery',
USERDESC: "You are at the Butchery's entrance, You observe an old man as he sharpens his knife",
DESC: 'The air smells of meat and blood, It is unclean and stinky',
NORTH: 'town_square',
SOUTH: None,
EAST: None,
WEST: None,
UP: None,
DOWN: None,
GROUND: [],
SHOP: ['beef', 'sausage'],
SHOPINTRO: 'The butcher got some cuts ready to go\n# Have a look:',
ENEMIES: [],
},
}
|
StarcoderdataPython
|
6492812
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.database import get_connection
from datetime import *
import time
class Campaign(Model):
_name = "mkt.campaign"
_string = "Campaign"
_fields = {
"name": fields.Char("Campaign Name", required=True, search=True),
"date": fields.Date("Date", required=True, search=True),
"target_lists": fields.Many2Many("mkt.target.list", "Target Lists"),
"email_tmpl_id": fields.Many2One("email.template", "Email Template"),
"mailbox_id": fields.Many2One("email.mailbox", "Email Mailbox"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"state": fields.Selection([["active", "Active"], ["inactive", "Inactive"]], "Status", required=True),
"limit_day": fields.Integer("Daily Limit"),
"limit_hour": fields.Integer("Hourly Limit"),
"num_targets": fields.Integer("Number targets", function="get_stats", function_multi=True),
"num_create": fields.Integer("Number emails created", function="get_stats", function_multi=True),
"percent_create": fields.Float("% created", function="get_stats", function_multi=True),
"num_sent": fields.Integer("Number emails sent", function="get_stats", function_multi=True),
"percent_sent": fields.Float("% sent", function="get_stats", function_multi=True),
"num_delivered": fields.Integer("Number emails delivered", function="get_stats", function_multi=True),
"percent_delivered": fields.Float("% delivered", function="get_stats", function_multi=True),
"num_bounced": fields.Integer("Number emails bounced", function="get_stats", function_multi=True),
"percent_bounced": fields.Float("% bounced", function="get_stats", function_multi=True),
"num_rejected": fields.Integer("Number emails rejected", function="get_stats", function_multi=True),
"percent_rejected": fields.Float("% rejected", function="get_stats", function_multi=True),
"num_opened": fields.Integer("Number emails opened", function="get_stats", function_multi=True),
"percent_opened": fields.Float("% opened", function="get_stats", function_multi=True),
"num_clicked": fields.Integer("Number emails clicked", function="get_stats", function_multi=True),
"percent_clicked": fields.Float("% clicked", function="get_stats", function_multi=True),
"num_create_day": fields.Integer("Emails created within day", function="get_stats", function_multi=True),
"num_create_hour": fields.Integer("Emails created within hour", function="get_stats", function_multi=True),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
"min_target_life": fields.Integer("Minimum Target Life (days)"),
}
_defaults = {
"date": lambda *a: time.strftime("%Y-%m-%d"),
"state": "active",
}
def create_emails_all(self, context={}):
for obj in self.search_browse([["state", "=", "active"]]):
obj.create_emails()
def create_emails(self, ids, context={}):
obj = self.browse(ids)[0]
if obj.state != "active":
raise Exception("Invalid state")
if not obj.email_tmpl_id:
raise Exception("Missing email template")
limit = None
if obj.limit_day:
limit = obj.limit_day - obj.num_create_day
if obj.limit_hour:
l = obj.limit_hour - obj.num_create_hour
if limit is None or l < limit:
limit = l
sent_emails = set()
for email in obj.emails:
if not email.name_id:
continue
if email.name_id._model != "mkt.target":
continue
target_id = email.name_id.id
res = get_model("mkt.target").search([["id", "=", email.name_id.id]]) # XXX
if not res:
continue
target = get_model("mkt.target").browse(target_id)
sent_emails.add(target.email)
count = 0
for tl in obj.target_lists:
for target in tl.targets:
if target.email in sent_emails:
continue
if obj.min_target_life and target.target_life < obj.min_target_life:
continue
if limit is not None and count >= limit:
break
settings = get_model("settings").browse(1)
data = {
"settings": settings,
"obj": target,
}
obj.email_tmpl_id.create_email(
data, name_id="mkt.target,%d" % target.id, related_id="mkt.campaign,%d" % obj.id, mailbox_id=obj.mailbox_id)
count += 1
db = get_connection()
db.commit()
return {
"next": {
"name": "campaign",
"mode": "form",
"active_id": obj.id,
},
"flash": "%d emails created" % count,
}
def get_stats(self, ids, context={}):
vals = {}
for obj_id in ids:
vals[obj_id] = {
"num_targets": 0,
"num_create": 0,
"num_sent": 0,
"num_delivered": 0,
"num_bounced": 0,
"num_rejected": 0,
"num_opened": 0,
"num_clicked": 0,
"num_create_day": 0,
"num_create_hour": 0,
}
db = get_connection()
res = db.query(
"SELECT c.id,COUNT(DISTINCT t.email) FROM mkt_campaign c JOIN m2m_mkt_campaign_mkt_target_list r ON r.mkt_campaign_id=c.id JOIN mkt_target t ON t.list_id=r.mkt_target_list_id WHERE c.id IN %s GROUP BY c.id", tuple(ids))
for r in res:
vals[r.id]["num_targets"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_create"] = r.count
d = (datetime.now() - timedelta(hours=24)).strftime("%Y-%m-%d %H:%M:%S")
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND date>%s GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]), d)
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_create_day"] = r.count
d = (datetime.now() - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND date>%s GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]), d)
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_create_hour"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='sent' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_sent"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='delivered' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_delivered"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='bounced' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_bounced"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='rejected' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_rejected"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND opened GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_opened"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND clicked GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_clicked"] = r.count
for obj in self.browse(ids):
v = vals[obj.id]
v["percent_create"] = v["num_create"] * 100.0 / v["num_targets"] if v["num_targets"] else None
v["percent_sent"] = v["num_sent"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_delivered"] = v["num_delivered"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_bounced"] = v["num_bounced"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_rejected"] = v["num_rejected"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_opened"] = v["num_opened"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_clicked"] = v["num_clicked"] * 100.0 / v["num_create"] if v["num_create"] else None
return vals
Campaign.register()
|
StarcoderdataPython
|
5065380
|
cores = {'limpo': '\033[m',
'vermelho': '\033[1;31m',
'verde': '\033[1;32m',
'azul': '\033[1;34m'}
frase = str(input('Digite uma frase: ')).strip().lower()
print(f'Quantas vezes aparece a letra A? {cores["vermelho"]}{frase.count("a")}{cores["limpo"]}')
print(f'Onde aparece a letra A pela primeira vez?: {cores["verde"]}{frase.find("a")+1}{cores["limpo"]}')
print(f'Onde aparece a letra A pela última vez?: {cores["azul"]}{frase.rfind("a")+1}{cores["limpo"]}')
|
StarcoderdataPython
|
6476451
|
<filename>h3mlcore/models/H3RNNSeqClassifier.py
'''
A bidirectional LSTM sequence model used for document classification.
It is basically a sequence classification model developed in mxnet.
Author: <NAME>
Group: Cognitive Security Technologies
Institute: Fraunhofer AISEC
Mail: <EMAIL>
Copyright@2017
'''
import mxnet as mx
import numpy as np
import os
import pickle
import logging
import yaml
import logging.config
from h3mlcore.models.H3BaseActor import H3BaseActor
from h3mlcore.utils.MxHelper import BasicArgparser
from h3mlcore.io.BucketSeqLabelIter import BucketSeqLabelIter
class H3RNNSeqClassifier(H3BaseActor):
""" """
def __init__(self,
num_hidden=256,
num_embed=128,
input_dim=None,
lstm_layer=1,
num_classes=2,
params_file='',
learning_rate=.1,
optimizer='sgd',
metric='acc',
use_gpus=[],
use_cpus=[],
logging_root_dir='logs/',
logging_config='configs/logging.yaml',
verbose=False
):
# setup logging
try:
# logging_root_dir = os.sep.join(__file__.split('/')[:-1])
logging_path = logging_root_dir + self.__class__.__name__ + '/'
if not os.path.exists(logging_path):
os.makedirs(logging_path)
logging_config = yaml.safe_load(open(logging_config, 'r'))
logging_config['handlers']['info_file_handler']['filename'] = logging_path + 'info.log'
logging_config['handlers']['error_file_handler']['filename'] = logging_path + 'error.log'
logging.config.dictConfig(logging_config)
except IOError:
logging.basicConfig(level=logging.INFO)
logging.warning(
"logging config file: %s does not exist." % logging_config)
finally:
self.logger = logging.getLogger('default')
# setup training parameters
self.num_hidden = num_hidden
self.num_embed = num_embed
self.input_dim = input_dim
self.lstm_layer = lstm_layer
self.num_classes = num_classes
self.params_file = params_file
self.learning_rate = learning_rate
self.optimizer = optimizer
if metric == 'acc':
self.metric = mx.metric.Accuracy()
elif metric == 'cross-entropy':
self.metric = mx.metric.CrossEntropy()
elif metric == 'topk':
self.metric = mx.metric.TopKAccuracy(top_k=3)
self.ctx = []
if use_gpus:
self.ctx = [mx.gpu(i) for i in use_gpus]
elif use_cpus:
self.ctx = [mx.cpu(i) for i in use_cpus]
else:
self.ctx = mx.cpu(0)
self.model = None
def _sym_gen(self, seq_len):
"""Dynamic symbol generator
For variable length sequence model, we define a dynamic symbol generator
to generate various length unrolled sequence model based on differnet cells.abs
Args:
seq_len(int): The sequence length to unroll
Returns:
mx.sym.Symbol: pred-> a symbol for the output of the sequence model
"""
data = mx.sym.Variable(name='data')
label = mx.sym.Variable(name='softmax_label')
embeds = mx.symbol.Embedding(
data=data, input_dim=self.input_dim, output_dim=self.num_embed, name='embed')
lstm_1 = mx.rnn.LSTMCell(prefix='lstm_1_', num_hidden=self.num_hidden)
outputs, _ = lstm_1.unroll(seq_len, inputs=embeds, layout='NTC')
for i in range(self.lstm_layer - 1):
new_lstm = mx.rnn.LSTMCell(
prefix='lstm_' + str(i + 2) + '_', num_hidden=self.num_hidden)
outputs, _ = new_lstm.unroll(seq_len, inputs=outputs, layout='NTC')
pred = mx.sym.FullyConnected(
data=outputs[-1], num_hidden=self.num_classes, name='logits')
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
def initialize(self, data_iter):
"""Initialize the neural network model
This should be called during model constructor. It tries to load NN parameters from
a file, if it does exist, otherwise initialize it. It sets up optimizer and optimization
parameters as well.
Args:
data_iter(mx.io.NDArrayIter): initialize the model with data iterator, it should be
of type BucketSeqLabelIter.
"""
if not isinstance(data_iter, BucketSeqLabelIter):
err_msg = "Data iterator for this model should be of type BucketSeqLabelIter."
raise TypeError(err_msg)
self.logger.error(err_msg, exc_info=True)
return
self.model = mx.module.BucketingModule(
sym_gen=self._sym_gen, default_bucket_key=data_iter.default_bucket_key, context=self.ctx)
self.model.bind(data_iter.provide_data, data_iter.provide_label)
if os.path.isfile(self.params_file):
try:
self.model.load_params(self.params_file)
self.logger.info(
"LSTM model parameters loaded from file: %s." % (self.params_file))
except (IOError, ValueError):
self.logger.warning(
"Parameters file does not exist or not valid! please check the file.")
else:
self.model.init_params()
self.logger.info("LSTM Model initialized.")
self.model.init_optimizer(optimizer=self.optimizer,
optimizer_params=(('learning_rate', self.learning_rate),))
def step(self, data_batch):
"""Feed one data batch from data iterator to train model
This function is called when we feed one data batch to model to update parameters.
it can be used in train_epochs.
See also: train_epochs.
Args:
data_batch (mx.io.DataBatch): a data batch matches the model definition
"""
self.model.forward(data_batch=data_batch)
metric = mx.metric.CrossEntropy()
metric.update(data_batch.label, self.model.get_outputs())
self.logger.debug('train step %s: %f' % (metric.get()))
self.model.backward()
self.model.update()
def train_epochs(self, train_data,
eval_data=None,
num_epochs=10,
):
"""Train model for many epochs with training data.
The model will be trained in epochs and possibly evaluated with validation dataset. The
model parameters will be saved on disk. Note that for Bucketing model, only network parameters
will be saved in checkpoint, since model symbols need to be created according to buckets
which match the training data.
Args:
train_data (BucketSeqLabelIter): Training data iterator
eval_data (BucketSeqLabelIter): Validation data iterator
num_epochs (int): Number of epochs to train
"""
for e in range(num_epochs):
train_data.reset()
for batch in train_data:
self.step(data_batch=batch)
if eval_data:
eval_data.reset()
self.model.score(eval_data, self.metric)
self.logger.info("Training epoch %d -- Evaluate %s: %f"
% (e + 1, self.metric.name, self.metric.get()[1]))
def predict(self, test_data, batch_size=32):
"""Predict labels on test dataset which is a list of list of encoded tokens (integer).
Predict labels on a list of list of integers. As for training, test data sample is
a list of integers mapped from token.
Args:
test_data (list): A list of list of integers
Returns:
labels (list): a list of integers (labels)
"""
sample_ids = range(len(test_data))
labels = np.zeros(shape=(len(test_data, )), dtype=int)
scores = np.zeros(shape=(len(test_data), self.num_classes))
tt_iter = BucketSeqLabelIter(
test_data, sample_ids, batch_size=batch_size)
for batch in tt_iter:
self.model.forward(batch, is_train=False)
out = self.model.get_outputs()[0].asnumpy()
for logits, idx in zip(out, batch.label[0].asnumpy()):
labels[idx] = np.argmax(logits)
scores[idx] = logits
return labels, scores
def save(self, path, epoch=None):
"""Save model parameters for BucketingModel
This function saves model offline, either a checkpoint or parameters for Bucketing model.
Note that normally it can be saved as checkpoint, but for variable length model such as
BucketingModel, we can only save parameters and initialize the model with parameter loading,
since the unrolled version of models need to be determined by data iterator, which can be
any length.
Args:
path (str): a valid path to save the checkpoint/parameters
"""
if epoch:
path = path + '-' + str(epoch)
self.model.save_params(path)
self.logger.info('Network parameters saved in %s' % (path))
if __name__ == '__main__':
'''
Run from terminal
'''
# arg_parser = BasicArgparser(
# prog="LSTM Models with varying length inputs.").get_parser()
# args = arg_parser.parse_args()
# # basic parameters
# epochs = args.epochs
# batch_size = args.batch_size
# lr = args.learning_rate
# ctx = []
# if args.gpus:
# for gid in args.gpus:
# ctx.append(mx.gpu(args.gpus[gid]))
# elif args.cpus:
# for cid in args.cpus:
# ctx.append(mx.cpu(args.cpus[gid]))
# else:
# # default
# ctx = mx.cpu(0)
from termcolor import colored
# from nltk.tokenize import word_tokenize
from sklearn.cross_validation import train_test_split
from h3mlcore.utils.DatasetHelper import load_snp17, java_tokenize
# load data
# datafile = "../datasets/npc_chat_data2.p"
# data = pickle.load(open(datafile, 'r'))
# all_sents = data['Xtr']
# sents = [word_tokenize(sent) for sent in all_sents]
# labels = np.array(data['ytr'], dtype=int) - 1
# label_names = data['label_info']
all_sents, all_labels, _ = load_snp17(csv_file='/Users/hxiao/repos/h3lib/h3db/snp17/train/answer_snippets_coded.csv',
save_path='/Users/hxiao/repos/webdemo/datasets/snp17.p',
force_overwrite=False)
sents, labels, discard_snippets = java_tokenize(all_sents, all_labels)
sents_encoded, vocab = mx.rnn.encode_sentences(sents, vocab=None, invalid_key='\n',
invalid_label=-1, start_label=0)
word_map = dict([(index, word) for word, index in vocab.iteritems()])
print 'Total #encoded_snippets: %d, #issue_snippets: %d, total #tokens: %d' \
% (len(sents_encoded), discard_snippets, len(vocab))
tr_data, tt_data, tr_labels, tt_labels = train_test_split(
sents_encoded, labels, train_size=0.8)
buckets = [50, 100, 200, 1000]
tr_iter = BucketSeqLabelIter(
tr_data, tr_labels, buckets=buckets, batch_size=64)
tt_iter = BucketSeqLabelIter(
tt_data, tt_labels, buckets=buckets, batch_size=64)
clf = H3RNNSeqClassifier(input_dim=len(vocab), num_classes=np.unique(labels).size)
clf.initialize(tr_iter)
clf.train_epochs(tr_iter, tt_iter, num_epochs=50)
# test
# test_sents = [word_tokenize(sent) for sent in all_sents[100:400]]
# test_labels = labels[100:400]
# test_sents_encoded, _ = mx.rnn.encode_sentences(test_sents, vocab=vocab)
# preds, logits = clf.predict(test_sents_encoded, batch_size=50)
# for s, p, lgt, real in zip(all_sents[100:300], preds, logits, test_labels):
# if real == p:
# print colored(s, color='blue') + \
# colored(' -> ' + label_names[p] +
# ' <- ' + label_names[real], color='green')
# else:
# print colored(s, color='blue') + \
# colored(' -> ' + label_names[p] +
# ' <- ' + label_names[real], color='red')
# print 'Logits: ', lgt
|
StarcoderdataPython
|
1882224
|
from dreamcoder.program import Primitive, Program
from dreamcoder.type import arrow, baseType, tint
turtle = baseType("turtle")
tstate = baseType("tstate")
tangle = baseType("tangle")
tlength = baseType("tlength")
primitives = (
[
Primitive("logo_UA", tangle, ""),
Primitive("logo_UL", tlength, ""),
Primitive("logo_ZA", tangle, ""),
Primitive("logo_ZL", tlength, ""),
Primitive("logo_DIVA", arrow(tangle, tint, tangle), ""),
Primitive("logo_MULA", arrow(tangle, tint, tangle), ""),
Primitive("logo_DIVL", arrow(tlength, tint, tlength), ""),
Primitive("logo_MULL", arrow(tlength, tint, tlength), ""),
Primitive("logo_ADDA", arrow(tangle, tangle, tangle), ""),
Primitive("logo_SUBA", arrow(tangle, tangle, tangle), ""),
# Primitive("logo_ADDL", arrow(tlength,tlength,tlength), ""),
# Primitive("logo_SUBL", arrow(tlength,tlength,tlength), ""),
# Primitive("logo_PU", arrow(turtle,turtle), ""),
# Primitive("logo_PD", arrow(turtle,turtle), ""),
Primitive("logo_PT", arrow(arrow(turtle, turtle), arrow(turtle, turtle)), None),
Primitive("logo_FWRT", arrow(tlength, tangle, turtle, turtle), ""),
Primitive("logo_GETSET", arrow(arrow(turtle, turtle), turtle, turtle), ""),
]
+ [
Primitive("logo_IFTY", tint, ""),
Primitive("logo_epsA", tangle, ""),
Primitive("logo_epsL", tlength, ""),
Primitive(
"logo_forLoop",
arrow(tint, arrow(tint, turtle, turtle), turtle, turtle),
"ERROR: python has no way of expressing this hence you shouldn't eval on this",
),
]
+ [Primitive(str(j), tint, j) for j in range(10)]
)
if __name__ == "__main__":
expr_s = "(lambda (logo_forLoop 3 (lambda (lambda (logo_GET (lambda (logo_FWRT (logo_S2L (logo_I2S 1)) (logo_S2A (logo_I2S 0)) (logo_SET $0 (logo_FWRT (logo_S2L eps) (logo_DIVA (logo_S2A (logo_I2S 2)) (logo_I2S 3)) ($1)))))))) ($0)))"
x = Program.parse(expr_s)
print(x)
|
StarcoderdataPython
|
1974265
|
# Copyright 2014 Intel Corporation, All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.core import validators
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.validators import validate_port_range
# from horizon.utils import fields
import logging
from vsm_dashboard.api import vsm as vsm_api
from vsm_dashboard.utils.validators import validate_storage_group_name
LOG = logging.getLogger(__name__)
class CreateStorageGroupForm(forms.SelfHandlingForm):
failure_url = 'horizon:vsm:storage-group-management:index'
name = forms.CharField(label=_("Storage Group name"),
max_length=255,
min_length=1,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" letters ,numbers, dot and underline")},
validators=[validate_storage_group_name])
friendly_name = forms.CharField(label=_("Storage Group Friendly name"),
max_length=255,
min_length=1,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" letters ,numbers, dot and underline")},
validators=[validate_storage_group_name])
storage_class = forms.CharField(label=_("Storage Class"),
max_length=24,
min_length=1,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" letters ,numbers, dot and underline")},
validators=[validate_storage_group_name])
def handle(self, request, data):
# TODO deliver a cluster id in data
data['cluster_id'] = 1
#try:
if True:
LOG.debug("DEBUG in storage groups, %s" % str(data))
body = {
'storage_group': {
'name': data['name'],
'friendly_name': data['friendly_name'],
'storage_class': data['storage_class'],
'cluster_id': data['cluster_id']
}
}
LOG.debug("DEBUG in handle body %s" % str(body))
rsp = vsm_api.storage_group_create(request, body=body)
LOG.debug("DEBUG in storage groups" + str(rsp))
messages.success(request,
_('Successfully created Storage Group: %s')
% data['name'])
return True
#except:
# redirect = reverse("horizon:vsm:storage-group-management:index")
# exceptions.handle(request,
# _('Unable to create Storage Group.'),
# redirect=redirect)
|
StarcoderdataPython
|
1958827
|
<gh_stars>1-10
import unittest
import tests
if __name__ == "__main__":
test_suite = unittest.TestLoader().discover(tests.__name__)
runner = unittest.TextTestRunner(verbosity=3)
runner.run(test_suite)
|
StarcoderdataPython
|
97630
|
<reponame>chocianowice/weatherQuery
import sys
import mariadb
import time
def export_to_db(dbconfig, temperature, weatherCode, windSpeed, windDirection):
# Connect to MariaDB Platform
try:
conn = mariadb.connect(
user=dbconfig['username'],
password=dbconfig['password'],
host=dbconfig['host'],
port=dbconfig['port'],
database=dbconfig['databaseName']
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB: {e}")
return
# Get Cursor
cursor = conn.cursor()
cDate = time.strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO " + dbconfig['measurementsTableName'] + \
" (SensorID, Date, Value1, Value2, Value3, Value4) VALUES (%s, %s, %s, %s, %s, %s)"
var = (str(dbconfig['sensorId']),
cDate,
temperature,
weatherCode,
windSpeed,
windDirection)
try:
cursor.execute(sql, var)
conn.commit()
except mariadb.Error as e:
print(f"Error: {e}")
conn.close()
|
StarcoderdataPython
|
1827488
|
# --------------
# Code starts here
# Create the lists
class_1 = ['<NAME>','<NAME>','<NAME>','<NAME>' ]
class_2 = ['<NAME>','<NAME>','<NAME>']
# Concatenate both the strings
new_class = (class_1 + class_2)
print (new_class)
# Append the list
new_class.append('<NAME>')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('<NAME>')
# Print the list
print(new_class)
# Code ends here
# --------------
# Code starts here
courses={"Math":65,"English":70,"History":80,"French":70,"Science":60}
keys=courses.values()
print(keys)
total=(courses["Math"]+courses["English"]+courses["History"]+courses["French"]+courses["Science"])
print(total)
percentage=(total*100)/500
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics={"<NAME>":78,"<NAME>":95,
"<NAME>":65,"<NAME>":50,"<NAME>":70
,"<NAME>":66,"<NAME>":75}
topper=max(mathematics,key=mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
first_name=topper.split()[0]
#print(first_name)
last_name=topper.split()[1]
full_name=last_name+" "+first_name
print(full_name)
certificate_name=full_name.upper()
print(certificate_name)
# Code ends here
|
StarcoderdataPython
|
3570805
|
# Copyright (c) 2014 mogoweb. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
# A hook that can be overridden in other repositories to add additional
# compilation targets to 'All'
'android_app_targets%': [],
'chromium_code': 1,
},
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'libchromeview',
],
},
{
'target_name': 'libchromeview',
'type': 'shared_library',
'android_unmangled_name': 1,
'dependencies': [
'../android_webview/android_webview.gyp:android_webview_common',
],
'sources': [
'native/jni_entry_point.cpp',
'native/draw_gl_functor.cpp',
],
'include_dirs': [
'./native',
],
'cflags!': [
'-Werror',
],
},
],
}
|
StarcoderdataPython
|
367510
|
<gh_stars>0
# --------------
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Code starts here
df = pd.read_csv(path)
print(df.head())
print(df.info())
cols = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']
df[cols] = df[cols].replace({'\$': '', ',': ''}, regex=True)
X = df.drop(['CLAIM_FLAG'], axis = 1)
y = df['CLAIM_FLAG']
count = y.value_counts()
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3 , random_state = 6)
# Code ends here
# --------------
# Code starts here
cols = ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']
X_train[cols] = X_train[cols].astype(float)
X_test[cols] = X_test[cols].astype(float)
print(X_train.isnull().sum())
print(X_test.isnull().sum())
# Code ends here
# --------------
# Code starts here
X_train.dropna(subset = ['YOJ','OCCUPATION'], inplace = True)
X_test.dropna(subset = ['YOJ','OCCUPATION'], inplace =True )
y_train = y_train[X_train.index]
y_test = y_test[X_test.index]
cols = ['AGE','CAR_AGE','INCOME', 'HOME_VAL']
for col in cols:
X_train[col].fillna((X_train[col].mean()),inplace=True)
X_test[col].fillna((X_test[col].mean()),inplace=True)
#X_train.isnull().sum()
print(X_train.shape)
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"]
# Code starts here
for col in columns:
le = LabelEncoder()
X_train[col] = le.fit_transform(X_train[col].astype(str))
X_test[col] = le.transform(X_test[col].astype(str))
# Code ends here
# --------------
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# code starts here
model = LogisticRegression(random_state = 6)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test,y_pred)
precision = precision_score(y_test,y_pred)
print('Accracy score : ',score)
print('Precision :',precision)
# Code ends here
# --------------
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# code starts here
smote = SMOTE(random_state = 9)
X_train, y_train = smote.fit_sample(X_train,y_train)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
# Code ends here
# --------------
# Code Starts here
model = LogisticRegression()
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test, y_pred)
print('Accuracy score :', score)
# Code ends here
|
StarcoderdataPython
|
6648388
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Kill or send a signal to the container"
class Input:
ID = "id"
SIGNAL = "signal"
class Output:
SUCCESS = "success"
class ContainerKillInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Container ID",
"order": 1
},
"signal": {
"type": "string",
"title": "Signal",
"description": "Signal to send E.g. SIGKILL",
"default": "SIGKILL",
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ContainerKillOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "True if successful",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
StarcoderdataPython
|
9681374
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import time
### DATA PREPROCESSING ###
#Importing data
data = pd.read_csv (r'NASDAQ.csv')
print (data)
print (data['LABEL'].unique().tolist()) #To check number of variables i.e. should be only 2 (0 and1)
print(data.isnull().sum())
#Deleting non-useful attributes
del data['Date']
#Setting targets
y = data.LABEL
print (y)
### SPLITTING DATA ###
#Splitting training & Testing data in 3:1 ratio (only for Naive Bayesian & AdaBoost)
X = data.drop('LABEL',axis=1)
print (X)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
### CLASSIFYING ###
## 1. Gaussian Naive Bayesian Classifier
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
#Training
start_NB_train = time.time()
gnb = GaussianNB()
gnb.fit(X_train,y_train)
end_NB_train = time.time()
print ()
print ("GAUSSIAN NAIVE BAYESIAN CLASSIFIER:")
print ("GNB Processing time : Training = %0.3fs " % (end_NB_train - start_NB_train))
#Testing
start_NB_test = time.time()
y_pred_NB = gnb.predict(X_test)
end_NB_test = time.time()
print ("GNB Processing time : Testing = %0.3fs " % (end_NB_test - start_NB_test))
print ("GNB Accuracy percentage = ",round(metrics.accuracy_score(y_test, y_pred_NB)*100,2),"%")
print ()
## 2. AdaBoost Classifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
#Training
start_AB_train = time.time()
ada = AdaBoostClassifier(DecisionTreeClassifier(), n_estimators=100, random_state = 0)
model = ada.fit(X_train, y_train)
end_AB_train = time.time()
print ("ADABOOST CLASSIFIER:")
print ("AdaBoost Processing time : Training = %0.3fs " % (end_AB_train - start_AB_train))
#Testing
start_AB_test = time.time()
y_pred_AB = ada.predict(X_test)
end_AB_test = time.time()
print ("AdaBoost Processing time : Testing = %0.3fs " % (end_AB_test - start_AB_test))
print ("AdaBoost Accuracy percentage = ",round(metrics.accuracy_score(y_test, y_pred_AB)*100,2),"%")
print ()
### Confusion Matrix
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
con_NB = confusion_matrix(y_test,y_pred_NB)
con_AB = confusion_matrix(y_test,y_pred_AB)
con_SVM = confusion_matrix(y_test,y_pred_SVM)
class_names=[0,1]
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
sns.heatmap(pd.DataFrame(con_NB), annot=True, cmap="PuRd" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix for Naive Bayesian', y=1.1)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
sns.heatmap(pd.DataFrame(con_AB), annot=True, cmap="Blues" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix for AdaBoost', y=1.1)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
sns.heatmap(pd.DataFrame(con_SVM), annot=True, cmap="Blues" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix for AdaBoost', y=1.1)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
### ROC
import sklearn.metrics as metrics
probs_gnb = gnb.predict_proba(X_test)
preds_gnb = probs_gnb[:,1]
fpr_gnb, tpr_gnb, threshold_gnb = metrics.roc_curve(y_test, preds_gnb)
roc_auc_gnb = metrics.auc(fpr_gnb, tpr_gnb)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr_gnb, tpr_gnb, 'b', label = 'AUC of Naive Bayesian = %0.2f' % roc_auc_gnb)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
|
StarcoderdataPython
|
1972368
|
<filename>binding/python/test_leopard.py
#
# Copyright 2018-2022 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import os
import struct
import sys
import unittest
import wave
from leopard import Leopard
from util import *
class LeopardTestCase(unittest.TestCase):
_AUDIO_PATH = os.path.join(os.path.dirname(__file__), '../../resources/audio_samples/test.wav')
_TRANSCRIPT = "MR QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL"
_o = None
@classmethod
def setUpClass(cls):
cls._o = Leopard(
access_key=sys.argv[1],
library_path=default_library_path('../..'),
model_path=default_model_path('../..')
)
@classmethod
def tearDownClass(cls):
cls._o.delete()
def test_process(self):
with wave.open(self._AUDIO_PATH, 'rb') as f:
buffer = f.readframes(f.getnframes())
pcm = struct.unpack('%dh' % (len(buffer) / struct.calcsize('h')), buffer)
self.assertEqual(self._o.process(pcm), self._TRANSCRIPT)
def test_process_file(self):
self.assertEqual(self._o.process_file(self._AUDIO_PATH), self._TRANSCRIPT)
def test_version(self):
self.assertIsInstance(self._o.version, str)
self.assertGreater(len(self._o.version), 0)
if __name__ == '__main__':
unittest.main(argv=sys.argv[:1])
|
StarcoderdataPython
|
1999270
|
<gh_stars>0
"""Test mixing sources of arguments/settings."""
from os.path import join
import pytest
from click.testing import CliRunner
from sphinxcontrib.versioning.cli import cli
from sphinxcontrib.versioning.git import IS_WINDOWS
@pytest.fixture(autouse=True)
def setup(monkeypatch, local_empty):
"""Set cli.NO_EXECUTE to True before every test in this module and sets CWD to an empty git repo.
:param monkeypatch: pytest fixture.
:param local_empty: conftest fixture.
"""
monkeypatch.setattr('sphinxcontrib.versioning.cli.NO_EXECUTE', True)
monkeypatch.chdir(local_empty)
@pytest.mark.parametrize('push', [False, True])
@pytest.mark.parametrize('source_cli', [False, True])
@pytest.mark.parametrize('source_conf', [False, True])
def test_overflow(local_empty, push, source_cli, source_conf):
"""Test -- overflow to sphinx-build.
:param local_empty: conftest fixture.
:param bool push: Run push sub command instead of build.
:param bool source_cli: Set value from command line arguments.
:param bool source_conf: Set value from conf.py file.
"""
if push:
args = ['push', 'docs', 'gh-pages', '.']
else:
args = ['build', 'docs', join('docs', '_build', 'html')]
# Setup source(s).
if source_cli:
args += ['--', '-D', 'setting=value']
if source_conf:
local_empty.ensure('docs', 'contents.rst')
local_empty.ensure('docs', 'conf.py').write('scv_overflow = ("-D", "key=value")')
# Run.
result = CliRunner().invoke(cli, args)
config = result.exception.args[0]
# Verify.
if source_cli:
assert config.overflow == ('-D', 'setting=value')
elif source_conf:
assert config.overflow == ('-D', 'key=value')
else:
assert config.overflow == tuple()
@pytest.mark.parametrize('push', [False, True])
def test_args(push):
"""Test positional arguments.
:param bool push: Run push sub command instead of build.
"""
# Single rel_source.
if push:
result = CliRunner().invoke(cli, ['push', 'docs', 'gh-pages', '.'])
rel_source, dest_branch, rel_dest = result.exception.args[1:]
assert dest_branch == 'gh-pages'
assert rel_dest == '.'
else:
result = CliRunner().invoke(cli, ['build', 'docs', join('docs', '_build', 'html')])
rel_source, destination = result.exception.args[1:]
assert destination == join('docs', '_build', 'html')
assert rel_source == ('docs',)
# Multiple rel_source.
if push:
result = CliRunner().invoke(cli, ['push', 'docs', 'docs2', 'documentation', 'dox', 'feature', 'html'])
rel_source, dest_branch, rel_dest = result.exception.args[1:]
assert dest_branch == 'feature'
assert rel_dest == 'html'
else:
result = CliRunner().invoke(cli, ['build', 'docs', 'docs2', 'documentation', 'dox', 'html'])
rel_source, destination = result.exception.args[1:]
assert destination == 'html'
assert rel_source == ('docs', 'docs2', 'documentation', 'dox')
@pytest.mark.parametrize('push', [False, True])
def test_global_options(monkeypatch, tmpdir, caplog, local_empty, push):
"""Test options that apply to all sub commands.
:param monkeypatch: pytest fixture.
:param tmpdir: pytest fixture.
:param caplog: pytest extension fixture.
:param local_empty: conftest fixture.
:param bool push: Run push sub command instead of build.
"""
if push:
args = ['push', 'docs', 'gh-pages', '.']
else:
args = ['build', 'docs', join('docs', '_build', 'html')]
# Defaults.
result = CliRunner().invoke(cli, args)
config = result.exception.args[0]
assert config.chdir == str(local_empty)
if IS_WINDOWS:
assert config.git_root.lower() == str(local_empty).lower()
else:
assert config.git_root == str(local_empty)
assert config.local_conf is None
assert config.no_colors is False
assert config.no_local_conf is False
assert config.verbose == 0
# Defined.
empty = tmpdir.ensure_dir('empty')
repo = tmpdir.ensure_dir('repo')
pytest.run(repo, ['git', 'init'])
local_empty.ensure('conf.py')
args = ['-L', '-l', 'conf.py', '-c', str(empty), '-g', str(repo), '-N', '-v', '-v'] + args
result = CliRunner().invoke(cli, args)
config = result.exception.args[0]
assert config.chdir == str(empty)
if IS_WINDOWS:
assert config.git_root.lower() == str(repo).lower()
else:
assert config.git_root == str(repo)
assert config.local_conf is None # Overridden by -L.
assert config.no_colors is True
assert config.no_local_conf is True
assert config.verbose == 2
# Set in conf.py. They'll be ignored.
monkeypatch.chdir(local_empty)
local_empty.ensure('docs', 'contents.rst')
local_empty.ensure('docs', 'conf.py').write(
'scv_chdir = ".."\n'
'scv_git_root = ".."\n'
'scv_no_colors = False\n'
'scv_verbose = 1\n'
)
args = args[7:] # Remove -L -l -c and -g.
result = CliRunner().invoke(cli, args)
records = [(r.levelname, r.message) for r in caplog.records]
config = result.exception.args[0]
assert config.chdir == str(local_empty)
if IS_WINDOWS:
assert config.git_root.lower() == str(local_empty).lower()
else:
assert config.git_root == str(local_empty)
assert config.local_conf == join('docs', 'conf.py')
assert config.no_colors is True
assert config.no_local_conf is False
assert config.verbose == 2
assert ('DEBUG', 'chdir already set in config, skipping.') in records
assert ('DEBUG', 'git_root already set in config, skipping.') in records
assert ('DEBUG', 'no_colors already set in config, skipping.') in records
assert ('DEBUG', 'verbose already set in config, skipping.') in records
@pytest.mark.parametrize('mode', ['bad filename', 'rel_source', 'override'])
@pytest.mark.parametrize('no_local_conf', [False, True])
@pytest.mark.parametrize('push', [False, True])
def test_global_options_local_conf(caplog, local_empty, mode, no_local_conf, push):
"""Test detection of local conf.py file.
:param caplog: pytest extension fixture.
:param local_empty: conftest fixture.
:param str mode: Scenario to test for.
:param no_local_conf: Toggle -L.
:param bool push: Run push sub command instead of build.
"""
args = ['-L'] if no_local_conf else []
if push:
args += ['push', 'docs', 'gh-pages', '.']
else:
args += ['build', 'docs', join('docs', '_build', 'html')]
# Run.
if mode == 'bad filename':
local_empty.ensure('docs', 'config.py')
args = ['-l', join('docs', 'config.py')] + args
elif mode == 'rel_source':
local_empty.ensure('docs', 'conf.py')
else:
local_empty.ensure('other', 'conf.py')
args = ['-l', join('other', 'conf.py')] + args
result = CliRunner().invoke(cli, args)
config = result.exception.args[0]
records = [(r.levelname, r.message) for r in caplog.records]
# Verify.
if no_local_conf:
assert config.local_conf is None
assert config.no_local_conf is True
return
if mode == 'bad filename':
assert config == 1 # SystemExit.
assert records[-2] == ('ERROR', 'Path "{}" must end with conf.py.'.format(join('docs', 'config.py')))
elif mode == 'rel_source':
assert config.local_conf == join('docs', 'conf.py')
assert config.no_local_conf is False
else:
assert config.local_conf == join('other', 'conf.py')
assert config.no_local_conf is False
@pytest.mark.parametrize('push', [False, True])
@pytest.mark.parametrize('source_cli', [False, True])
@pytest.mark.parametrize('source_conf', [False, True])
def test_sub_command_options(local_empty, push, source_cli, source_conf):
"""Test non-global options that apply to all sub commands.
:param local_empty: conftest fixture.
:param bool push: Run push sub command instead of build.
:param bool source_cli: Set value from command line arguments.
:param bool source_conf: Set value from conf.py file.
"""
if push:
args = ['push', 'docs', 'gh-pages', '.']
else:
args = ['build', 'docs', join('docs', '_build', 'html')]
# Setup source(s).
if source_cli:
args += ['-itT', '-p', 'branches', '-r', 'feature', '-s', 'semver', '-w', 'master', '-W', '[0-9]']
args += ['-aAb', '-B', 'x']
if push:
args += ['-e' 'README.md', '-P', 'rem']
if source_conf:
local_empty.ensure('docs', 'contents.rst')
local_empty.ensure('docs', 'conf.py').write(
'import re\n\n'
'scv_banner_greatest_tag = True\n'
'scv_banner_main_ref = "y"\n'
'scv_banner_recent_tag = True\n'
'scv_greatest_tag = True\n'
'scv_invert = True\n'
'scv_priority = "tags"\n'
'scv_push_remote = "origin2"\n'
'scv_recent_tag = True\n'
'scv_root_ref = "other"\n'
'scv_show_banner = True\n'
'scv_sort = ("alpha",)\n'
'scv_whitelist_branches = ("other",)\n'
'scv_whitelist_tags = re.compile("^[0-9]$")\n'
'scv_grm_exclude = ("README.rst",)\n'
)
# Run.
result = CliRunner().invoke(cli, args)
config = result.exception.args[0]
# Verify.
if source_cli:
assert config.banner_greatest_tag is True
assert config.banner_main_ref == 'x'
assert config.banner_recent_tag is True
assert config.greatest_tag is True
assert config.invert is True
assert config.priority == 'branches'
assert config.recent_tag is True
assert config.root_ref == 'feature'
assert config.show_banner is True
assert config.sort == ('semver',)
assert config.whitelist_branches == ('master',)
assert config.whitelist_tags == ('[0-9]',)
if push:
assert config.grm_exclude == ('README.md',)
assert config.push_remote == 'rem'
elif source_conf:
assert config.banner_greatest_tag is True
assert config.banner_main_ref == 'y'
assert config.banner_recent_tag is True
assert config.greatest_tag is True
assert config.invert is True
assert config.priority == 'tags'
assert config.recent_tag is True
assert config.root_ref == 'other'
assert config.show_banner is True
assert config.sort == ('alpha',)
assert config.whitelist_branches == ('other',)
assert config.whitelist_tags.pattern == '^[0-9]$'
if push:
assert config.grm_exclude == ('README.rst',)
assert config.push_remote == 'origin2'
else:
assert config.banner_greatest_tag is False
assert config.banner_main_ref == 'master'
assert config.banner_recent_tag is False
assert config.greatest_tag is False
assert config.invert is False
assert config.priority is None
assert config.recent_tag is False
assert config.root_ref == 'master'
assert config.show_banner is False
assert config.sort == tuple()
assert config.whitelist_branches == tuple()
assert config.whitelist_tags == tuple()
if push:
assert config.grm_exclude == tuple()
assert config.push_remote == 'origin'
@pytest.mark.parametrize('push', [False, True])
def test_sub_command_options_other(push):
"""Test additional option values for all sub commands.
:param bool push: Run push sub command instead of build.
"""
if push:
args = ['push', 'docs', 'gh-pages', '.']
else:
args = ['build', 'docs', join('docs', '_build', 'html')]
# Defined.
args += ['-p', 'tags', '-s', 'semver', '-s', 'time']
if push:
args += ['-e' 'one', '-e', 'two', '-e', 'three', '-e', 'four']
result = CliRunner().invoke(cli, args)
config = result.exception.args[0]
assert config.priority == 'tags'
assert config.sort == ('semver', 'time')
if push:
assert config.grm_exclude == ('one', 'two', 'three', 'four')
|
StarcoderdataPython
|
1859699
|
<filename>python3/42.trapping-rain-water.201502705.ac.py
#
# @lc app=leetcode id=42 lang=python3
#
# [42] Trapping Rain Water
#
# https://leetcode.com/problems/trapping-rain-water/description/
#
# algorithms
# Hard (47.69%)
# Likes: 6376
# Dislikes: 111
# Total Accepted: 476.5K
# Total Submissions: 998.8K
# Testcase Example: '[0,1,0,2,1,0,1,3,2,1,2,1]'
#
# Given n non-negative integers representing an elevation map where the width
# of each bar is 1, compute how much water it is able to trap after raining.
#
#
# The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In
# this case, 6 units of rain water (blue section) are being trapped. Thanks
# Marcos for contributing this image!
#
# Example:
#
#
# Input: [0,1,0,2,1,0,1,3,2,1,2,1]
# Output: 6
#
#
# @lc code=start
class Solution:
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
l: int = 0
r: int = len(height) - 1
l_max: int = 0
r_max: int = len(height) - 1
water: int = 0
while l < r:
if height[l] < height[r]:
if height[l_max] <= height[l]:
l_max = l
else:
water += height[l_max] - height[l]
l += 1
else:
if height[r_max] <= height[r]:
r_max = r
else:
water += height[r_max] - height[r]
r -= 1
return water
# @lc code=end
|
StarcoderdataPython
|
6498211
|
# This is taken from:
# https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5
# i.e.
# Detectron2 Tutorial.ipynb
from detectron2.utils.visualizer import ColorMode
from detectron2.utils.visualizer import Visualizer
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer
from detectron2 import model_zoo
from detectron2.data import DatasetCatalog, MetadataCatalog, Metadata
import random
import time
import cv2
# download, decompress the data
# !wget https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip
# !unzip balloon_dataset.zip > /dev/null
# if your dataset is in COCO format, this cell can be replaced by the following three lines:
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
import os
import numpy as np
import json
from detectron2.structures import BoxMode
def get_balloon_dicts(img_dir: str) -> list[dict]:
json_file = os.path.join(img_dir, "via_region_data.json")
with open(json_file) as f:
imgs_anns = json.load(f)
dataset_dicts = []
for idx, v in enumerate(imgs_anns.values()):
record = {}
filename = os.path.join(img_dir, v["filename"])
height, width = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
annos = v["regions"]
objs = []
for _, anno in annos.items():
assert not anno["region_attributes"]
anno = anno["shape_attributes"]
px = anno["all_points_x"]
py = anno["all_points_y"]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = [p for x in poly for p in x]
obj = {
"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": 0,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def register_dataset() -> Metadata:
if not os.path.isdir("balloon/"):
print("download and decompress the data first:")
print(
"https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip"
)
exit()
# registering the dataset in detectron:
print("registering dataset...")
for d in ["train", "val"]:
DatasetCatalog.register(
"balloon_" + d, lambda d=d: get_balloon_dicts("balloon/" + d)
)
MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
return MetadataCatalog.get("balloon_train")
# training:
def train():
print("preparing training...")
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)
)
cfg.DATASETS.TRAIN = ("balloon_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
) # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
# 300 iterations seems good enough for this toy dataset;
# you may need to train longer for a practical dataset
cfg.SOLVER.MAX_ITER = 300
# faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
print("this is a good moment to launch Tensorboard..., namely:")
print(f"tensorboard --logdir {cfg.OUTPUT_DIR}")
print("starting in 5 s ...")
time.sleep(5)
trainer.train()
# testing:
def cv2_imshow(image):
cv2.imshow("result", image)
res = cv2.waitKey(0)
if res & 0xFF == ord("q"):
exit()
def testing():
print("testing...")
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)
)
balloon_metadata = MetadataCatalog.get("balloon_train")
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # now needed again...
# set the testing threshold for this model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.DATASETS.TEST = ("balloon_val",)
predictor = DefaultPredictor(cfg)
dataset_dicts = get_balloon_dicts("balloon/val")
for d in random.sample(dataset_dicts, 3):
im = cv2.imread(d["file_name"])
outputs = predictor(im)
v = Visualizer(
im[:, :, ::-1],
metadata=balloon_metadata,
scale=0.8,
instance_mode=ColorMode.IMAGE_BW, # remove the colors of unsegmented pixels
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
try:
cv2_imshow(v.get_image()[:, :, ::-1])
except: # noqa
print(d["file_name"])
print(outputs["instances"].to("cpu"))
def main():
register_dataset()
train()
testing()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
391729
|
"""A layered graph, backed by redis.
Licensed under the 3-clause BSD License:
Copyright (c) 2013, <NAME> (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time
import redis
from nkpylib.redisutils import *
import nkpylib.utils as utils
import numpy as np
from line_profiler import LineProfiler
profile = LineProfiler()
GENERIC_NODE_KEY_FMT = 'nodes:%(layername)s:%(id)s'
def spark(vals, f=sys.stdout):
"""Custom spark that wraps to 201 and scales by 1000"""
return utils.spark(vals, wrap=201, scale=1000.0, f=f)
class RedisLayeredGraph(object):
"""A layered graph backed by redis.
Connections should only be within the same level or adjoining level.
However, you can have multiple "layers" at the same "level".
"""
def __init__(self, db=None, reset=0, symmetric=1, prefix='', callbacks=[], **kw):
"""Initializes ourself.
You can either pass in a redis connection in 'db', or give kw params.
They should include 'host', 'port', 'db', and 'password'.
Optionally, they can include 'charset' and 'socket_timeout'
If you set reset=1, then it will reset the database.
You can also add a prefix, which will be used whenever we need to save files.
"""
self.db = db if db else redis.Redis(**kw)
self.symmetric = symmetric
if prefix:
self.prefix = prefix
else:
# try to read it from the database
self.prefix = self.db.get('prefix')
self.edgematrices = {}
self.layerkeys = {}
self.dirty = {}
self.callbacks = callbacks[:]
if reset:
print 'RESETTING DATABASE!!! Press control-c to cancel, within 5 seconds!'
time.sleep(5)
self.db.flushdb()
self.db.set('prefix', self.prefix)
@memoize
def layerkey(self, layername):
"""Returns the key for a given layer name"""
return 'layers:%s' % (layername)
def getLayer(self, layername):
"""Returns a dict of the given layer"""
ret = self.db.hgetall(self.layerkey(layername))
ret = specializeDict(ret)
return ret
def layers(self):
"""Returns a list of (layername, level) pairs, in sorted order"""
layers = [(name, int(level)) for name, level in self.db.hgetall('layers').items()]
return sorted(layers, key=lambda pair: pair[1])
@memoize
def nodekeyfmt(self, layername):
"""Returns the node key format for the given layer"""
return self.db.hget(self.layerkey(layername), 'nodekeyfmt')
def nodekey(self, nodeprimary, layername):
"""Returns the node key given the primary id for the node and the layer.
If this does not exist, then returns None"""
id = self.db.hget('layers:%s:nodes' % (layername), nodeprimary)
if not id: return None
return self.nodekeyfmt(layername) % dict(layername=layername, id=id)
def nodekeyFromID(self, nodeid, layername):
"""Returns the node key given the node id and the layer"""
return self.nodekeyfmt(layername) % dict(layername=layername, id=nodeid)
@memoize
def splitNodekey(self, nodekey):
"""Splits a node key into (layername, node id)"""
_, layername, nodeid = nodekey.split(':')
return (layername, nodeid)
def nodes(self, layername):
"""Returns a list of nodekeys for a given layer"""
ids = sorted(map(int, self.db.hvals('layers:%s:nodes' % (layername))))
ret = [self.nodekeyFromID(id, layername) for id in ids]
return ret
def addcallback(self, callback, predfunc=None):
"""Adds the given node callback and optionally a predicate function.
The callback takes (nodekey, nodedict, layer, action), where:
nodekey - The key to the node
nodedict - The dict of elements in the node
layer - The layer the node is in
action - The action that was performed on this node. One of:
create - Created for the first time
update - Update a node that already existed
init - Called on first init of the current process
The predicate takes the same args and if False, the callback is not called.
With no predicate, the callback is always executed.
"""
self.callbacks.append((callback, predfunc))
def runcallbacks(self, nodekey, nodedict, layer, action):
"""Runs all applicable callbacks for the given args.
This does the appropriate checking with the predicate functions.
"""
for callback, predfunc in self.callbacks:
if not predfunc(nodekey, nodedict, layer, action): continue
callback(nodekey, nodedict, layer, action)
def addLayer(self, name, level, nodekeyfmt=GENERIC_NODE_KEY_FMT):
"""Creates a new layer with given name and level.
Optionally specify a fmt that generates nodekeys given a dict containing (id, layername).
Returns the layer name.
"""
# check if we already added this level
oldl = self.db.hget('layers', name)
if oldl is not None:
# we did, so check the level
if int(oldl) != level:
raise ValueError('Layer "%s" already exists at level %s, not %d!' % (name, oldl, level))
# level matches, so just return the name of the layer
#print 'Layer "%s" already existed' % (name)
return name
key = self.layerkey(name)
p = self.db.pipeline()
l = dict(name=name, level=level, key=key, nodekeyfmt=nodekeyfmt)
p.hmset(key, l)
p.hset('layers', name, level)
p.execute()
#print 'Added layer "%s" at level %s: %s' % (name, level, l)
return name
def addNode(self, layername, primary=None, checkexisting=1, pipeline=None, added=None, addindicator=None, **nodekw):
"""Adds a node to the given layer.
You can optionally pass in a "primary key", which can be used
to lookup the node id in O(1) time. Else, this becomes the node id.
If checkexisting is true (default), then it first checks
to see if a node with that primary exists. If so, it sets the given
nodekw on that existing node, and doesn't create a new id, etc.
If added is given, then checkexisting also tries to lookup the
'primary' value in the given dict, which should return a key.
If the key was not found, then the primary->key mapping is added to it.
The actual data to add should be given as key-value pairs in nodekw.
Note that the values should be serializable to fit in a redis hash.
Also, the data is ALWAYS set, even if 'checkexisting' was true.
If pipeline is given, then all db modifications are done within the
given pipeline. Else, creates a new pipeline for the duration of this
function, and then executes it at the end.
If addindicator is true, then it's assumed to be an array with at least 1 element.
This element is set to 1 if we actually added the node, else 0.
Returns the node keyname."""
# see if this key exists
key = None
if checkexisting and primary:
try:
primary = str(primary)
except Exception:
pass
if isinstance(added, dict) and primary in added:
# first check the given 'added' dict, if it exists
key = added[primary]
else:
# now check the database
key = self.nodekey(primary, layername=layername)
if not key or not self.db.exists(key):
#log(' Key %s did not exist!' % (key,))
key = None
p = pipeline if pipeline else self.db.pipeline()
if addindicator:
addindicator[0] = 0
# if we don't have a key yet, generate one
action = 'update'
if not key:
action = 'create'
lkey = self.layerkey(layername)
keyfmt = self.nodekeyfmt(layername)
id = self.db.incr('counters:layernodes:%s' % (layername))
key = keyfmt % dict(id=id, layername=layername)
# some bookkeeping
p.hincrby(lkey, 'nnodes', 1)
if not primary:
primary = id
try:
primary = str(primary)
except Exception: pass
#log('Actually adding node %s with primary %s to layer %s' % (key, primary, layername))
p.hset('layers:%(layername)s:nodes' % (locals()), primary, id)
# add to the 'added' cache of added keys
if isinstance(added, dict):
added[primary] = key
# mark layerkeys dirty for this layer
self.dirty['layerkeys-%s' % (layername)] = 1
# mark all edges connected to this layer as dirty
for l1, _ in self.layers():
for l2, _ in self.layers():
self.dirty['edgematrix-%s-%s' % (l1, l2)] = 1
# set the indicator
if addindicator:
addindicator[0] = 1
# also delete all flows and edgematrices
self.deletePickles()
# set the main kw for this node
p.hmset(key, nodekw)
if not pipeline:
p.execute()
# run callbacks
self.runcallbacks(key, nodekw, layername, action)
return key
def addScores(self, scorename, layername, hashkey=None, **kw):
"""Adds a zset for the given layername.
If you give a hashkey, then it initializes this using the given key
extracted from all nodes. Otherwise, it uses the kwargs to initialize it.
"""
if hashkey:
nodes = self.nodes(layername)
vals = pipefunc(self.db, nodes, 'hget', hashkey)
for n, v in zip(nodes, vals):
nodeid = n.rsplit(':', 1)[-1]
kw[nodeid] = v
if not kw: return
key = 'scores:%s:%s' % (layername, scorename)
p = self.db.pipeline()
p.zadd(key, **kw)
p.sadd('layers:%s:scores' % (layername), key)
p.execute()
def addEdges(self, edges, pipeline=None):
"""Adds edges, each of which is (nodekey1, nodekey2, weight).
This does various normalization and then adds relevant entries.
Returns a list of (edgekey, edgefield) entries.
Note that if self.symmetric is true, then adds the symmetric entries as well,
but will still return only as many return pairs as inputs.
Also, we will filter out any edges that connect a node to itself.
"""
p = pipeline if pipeline else self.db.pipeline()
ret = []
for nk1, nk2, w in edges:
if not nk1 or not nk2: continue
(l1, l2), (n1, n2) = zip(*[self.splitNodekey(nk) for nk in [nk1, nk2]])
if nk1 == nk2: continue
ekey1, efield1 = ('%s:edges:%s' % (nk1, l2), str(n2))
ret.append((ekey1, efield1))
p.zadd(ekey1, **{efield1:w})
p.hincrby('layeredges:%s' % (l1), l2, 1)
# mark this edgematrix as dirty
self.dirty['edgematrix-%s-%s' % (l1, l2)] = 1
if self.symmetric:
ekey2, efield2 = ('%s:edges:%s' % (nk2, l1), str(n1))
p.zadd(ekey2, **{efield2:w})
p.hincrby('layeredges:%s' % (l2), l1, 1)
# mark this edgematrix as dirty
self.dirty['edgematrix-%s-%s' % (l2, l1)] = 1
#print ' ', nk1, nk2, n1, n2, l1, l2, ekey1, efield1, ekey2, efield2, w
self.deletePickles()
if not pipeline:
p.execute()
return ret
def deletePickles(self):
"""Deletes all our pickles"""
from shutil import rmtree
rmtree(os.path.join(self.prefix, 'edgematrices'), ignore_errors=1)
rmtree(os.path.join(self.prefix, 'flows'), ignore_errors=1)
def deleteEdges(self, layer1, layer2, dosym=1):
"""Deletes edges from layer1 to layer2.
If self.symmetric and dosym, then also deletes edges the other way."""
p = self.db.pipeline()
l1keys = self.nodes(layer1)
for k in l1keys:
p.delete('%s:edges:%s' % (k, layer2))
self.dirty['edgematrix-%s-%s' % (layer1, layer2)] = 1
p.hdel('layeredges:%s' % (layer1), layer2)
p.execute()
if self.symmetric and dosym:
self.deleteEdges(layer2, layer1, dosym=0) # so that we don't keep iterating forever
def getEdges(self, nodekeys, valid=None, sort=1):
"""Gets edges from the given nodekeys, optionally limited to some layers.
Returns a dict mapping layer names to lists of results.
Each result list has the same length as 'nodekeys', and consists of
edge lists, which are (nodeid, weight) pairs.
If 'valid' is a string, then only returns edges that connect to that layer.
If 'valid' is a list, then only returns edges that connect to one of those layers.
if 'sort' is true (default), then each list is sorted by highest weight first.
All input nodekeys must be in the same layer."""
if not nodekeys: return []
# basic init and quick checks
layername, _ = self.splitNodekey(nodekeys[0])
elayers = self.db.hkeys('layeredges:%s' % (layername))
if valid:
if isinstance(valid, basestring): # single valid layer
elayers = [l for l in elayers if l==valid]
else: # list of layers
elayers = [l for l in elayers if l in valid]
if not elayers: return {}
ret = {}
for l in elayers:
edges = pipefunc(self.db, ['%s:edges:%s' % (nk, l) for nk in nodekeys], 'zrevrangebyscore', withscores=1, min=0.00001, max='inf')
assert len(edges) == len(nodekeys)
ret[l] = [[(int(k), float(v)) for k, v in e] for e in edges]
if sort:
for lst in ret[l]:
lst.sort(key=lambda pair: pair[1], reverse=1)
return ret
def summedEdges(self, keys, dstlayer):
"""Returns a summed list of edges out from the given key inputs.
Essentially one stage of a flow computation, but done without matrices.
The inputs are either a list of keys (assumed weight=1), a list of (key,score) pairs,
or a dict of key->weights. The edges to the given `dstlayer` are retrieved,
summed, and then multiplied by these scores.
The output is a dict of key->scores.
"""
from collections import defaultdict
if not keys: return {}
if isinstance(keys, dict): # already a dict
inputs = keys
else:
if isinstance(keys[0], basestring): # only keys
inputs = dict.fromkeys(keys, 1.0)
else: # (key,score) pairs
inputs = dict(keys)
ikeys = sorted(inputs)
edges = self.getEdges(ikeys, dstlayer, sort=0)[dstlayer]
ret = defaultdict(float)
for ikey, curedges in zip(ikeys, edges):
inscore = inputs[ikey]
#print ' %s : score %f, %d edges' % (ikey, inscore, len(curedges))
for nodeid, s in curedges:
ret[self.nodekeyFromID(nodeid, dstlayer)] += inscore * s
return dict(ret)
def recursiveFlow(self, keys, layers):
"""Repeated calls to summedEdges() with initial keys, going through many `layers`.
The outputs of one call are then fed to the next.
Returns a dict of key->scores at the last layer."""
if not keys: return {}
ret = keys
for dstlayer in layers:
ret = self.summedEdges(ret, dstlayer)
return ret
def updateLayerKeys(self, layername):
"""Updates the cached layer keys for the given layer"""
l = layername
dkey = 'layerkeys-%s' % (l)
if l not in self.layerkeys:
self.dirty[dkey] = 1
if self.dirty.get(dkey, 0):
#nnodes = self.db.hlen('layers:%s:nodes' % (l)) + 1
try:
nnodes = max(map(int, self.db.hvals('layers:%s:nodes' % (l)))) + 1
except Exception:
nnodes = 0
self.layerkeys[l] = [self.nodekeyFromID(id, l) for id in range(nnodes)]
#log(' Precached %d layerkeys for layer %s' % (len(self.layerkeys[l]), l))
if dkey in self.dirty:
del self.dirty[dkey]
#@timed
def getEdgeMatrix(self, srclayer, dstlayer, srckeys=None, dstkeys=None, usesparse=1):
"""Computes a matrix of weights that transforms from srclayer to dstlayer.
i.e., you have a vector V_s of values from srclayer, and this function
returns M_ds. Then you can do V_d = np.dot(M_ds, V_s).
Returns (M_ds, list of srclayer keys, list of dstlayer keys).
You can optionally pass in lists of srckeys and dstkeys.
If so, then only fills in values that exist in these lists.
If src and dst layers are the same, then initializes the matrix with identity.
Otherwise, initializes the matrix with 0s.
If usesparse is true (default), then uses sparse matrices. Notes:
- we initialize data using lil_matrix, because it's fastest to modify
- we convert to csr_matrix at the end, because that's fastest to multiply
"""
import scipy as sp
import scipy.sparse as sparse
from scipy.sparse import lil_matrix as sparsemat
times = [time.time()]
# init keys and matrix
if not srckeys:
srckeys = self.nodes(srclayer)
if not dstkeys:
dstkeys = self.nodes(dstlayer)
dstrows = dict((int(self.splitNodekey(dk)[1]), i) for i, dk in enumerate(dstkeys))
times.append(time.time())
ns, nd = len(srckeys), len(dstkeys)
assert ns > 0 and nd > 0
if srclayer == dstlayer:
if usesparse:
M = sparsemat((nd,nd))
M.setdiag(np.ones(nd))
else:
M = np.eye(nd)
else:
if usesparse:
M = sparsemat((nd,ns))
else:
M = np.zeros((nd, ns))
times.append(time.time())
# fill in the matrix, only if we have something to fill
if self.db.hexists('layeredges:%s' % (srclayer), dstlayer):
edges = self.getEdges(srckeys, valid=dstlayer, sort=0)[dstlayer]
for col, row in enumerate(edges):
for nodeid, w in row:
if nodeid not in dstrows: continue
row = dstrows[nodeid]
M[row, col] = w
times.append(time.time())
nz = len(M.nonzero()[0])
nels = M.shape[0]*M.shape[1]
if nz == 0:
M = None
else:
if ns == nd:
# check if it's identity
if usesparse:
eye = sparsemat((nd,nd))
eye.setdiag(np.ones(nd))
else:
eye = np.eye(nd)
eye -= M
iseye = (len(eye.nonzero()[0]) == 0)
if iseye:
M = None
else:
iseye = 0
log(' Matrix from %s (%d) to %s (%d) had %d/%d nonzeros (%0.5f%%) (iseye=%s)' % (srclayer, len(srckeys), dstlayer, len(dstkeys), nz, nels, nz*100.0/float(nels), iseye))
log(' Matrix took: %s' % (getTimeDiffs(times)))
if sparse.issparse(M):
M = M.tocsr()
return (M, srckeys, dstkeys)
def cachedEdgeMatrix(self, l1, l2):
"""Updates the cached edge matrix between the given layers (if needed).
Assumes associated layerkeys are already up-to-date.
Returns the matrix."""
import cPickle as pickle
#FIXME if things are symmetric, then only compute one half of the symmetries, and generate the others on-the-fly
dkey = 'edgematrix-%s-%s' % (l1, l2)
picklename = os.path.join(self.prefix, 'edgematrices', dkey+'.pickle')
try:
os.makedirs(os.path.dirname(picklename))
except OSError:
pass
if (l1, l2) not in self.edgematrices:
self.dirty[dkey] = 1
if self.dirty.get(dkey, 0): #FIXME the pickles are always assumed to be up-to-date right now!
try:
M = pickle.load(open(picklename))
#log(' Loaded %s of size %s' % (picklename, M.shape if M is not None else 0))
except Exception, e:
M, _, _ = self.getEdgeMatrix(l1, l2, self.layerkeys[l1], self.layerkeys[l2])
try:
os.makedirs(os.path.dirname(picklename))
except Exception: pass
#pickle.dump(M, open(picklename, 'wb'), -1)
#log(' Due to exception "%s", saved matrix of shape %s, with pickle size %d to "%s"' % (e, M.shape if M is not None else 0, os.stat(picklename).st_size, picklename))
self.edgematrices[(l1, l2)] = M #FIXME experiment to not use all this memory
#log(' Precached edgematrix %s x %s from layer %s to %s' % (M.shape[0], M.shape[1], l1, l2))
if dkey in self.dirty:
del self.dirty[dkey]
else:
M = self.edgematrices[(l1, l2)]
if 0 and l1 != l2 and M is not None: #debugging
log('Got M of type %s' % (type(M),))
import array
scores = array.array('f', [])
import numpy.random as random
for i in range(M.shape[1]):
scores.append(random.random())
log('Got %d scores: %s' % (len(scores), scores[:5]))
try:
t1 = time.time()
v = M.dot(scores)
t2 = time.time()
except Exception:
log('in exc')
t1 = time.time()
v = np.dot(M, scores)
t2 = time.time()
M = M.todense()
t3 = time.time()
v1 = np.dot(M, scores)
t4 = time.time()
log('Got %d in v, in %0.4f secs, compared to %0.4fs for dense: %s, %s, %s' % (len(v), t2-t1, t4-t3, v[:5], v1[:5], v==v1))
sys.exit()
return M
def nodeweights(self, layername, lkeys=None):
"""Returns the nodeweights for the given layer.
If lkeys is given, then the weights are returned in that order.
Otherwise, returns weights for all nodes in this layer, as returned by nodes()"""
if not lkeys:
lkeys = self.nodes(layername)
weights = np.ones(len(lkeys))
key = 'layers:%s:weights' % (layername)
if not self.db.exists(key): return weights
ids = [self.splitNodekey(k)[1] for k in lkeys]
for i, w in enumerate(self.db.hmget(key, ids)):
if w is None: continue
weights[i] = float(w)
#log('For layer %s, got %s' % (layername, zip(lkeys, ids, weights)))
#log('For layer %s, with %d lkeys, got %d weights: %s' % (layername, len(lkeys), len(weights), weights))
return weights
def updateCache(self):
"""Updates our cache"""
# update our list of layerkeys as needed
for l, _ in self.layers():
self.updateLayerKeys(l)
def createFlow(self, *args, **kw):
"""Creates a flow object.
If args and/or kw are given, then calls flow.add() with those params.
Note that for now, we precache layerkeys and edge matrices here."""
#log('In create flow, got dirty: %s' % (self.dirty,))
self.updateCache()
f = RGLFlow(self)
if args or kw:
f.add(*args, **kw)
return f
def updateIfDirty(self, dkey, func, *args, **kw):
"""Runs the given func if the dirty bit is set for the given key"""
if dkey in self.dirty:
func(*args, **kw)
del self.dirty[dkey]
else:
log('Got precached dkey %s' % (dkey))
class RGLFlow(object):
"""A flow object for a given RedisLayeredGraph (RGL)"""
def __init__(self, g, id=None, tempflow=0, debugfmt='str'):
"""Initialize this flow object from the given graph.
If an id is given, then tries to load the values from disk.
If tempflow is true (default false), then save() and load() become no-ops.
The debugfmt determines how to print out ourself:
'spark': using spark lines
'str': using get()
"""
self.g = g
self.db = g.db
self.scores = {}
self.tempflow = tempflow
self.debugfmt = debugfmt
if id:
# load from disk
self.id = id
try:
self.load()
except Exception:
# could not load, so just reset
self.reset()
else:
# create new id and reset
self.newid()
self.reset()
def reset(self, *layernames):
"""Resets the score arrays.
Optionally, you can give a list of layernames to reset.
Otherwise, it resets all layers."""
# update the cached list of layerkeys as needed
for lname, level in self.g.layers():
if layernames and lname not in layernames: continue
nnodes = len(self.g.layerkeys[lname])
a = self.scores[lname] = np.zeros(nnodes)
#print 'Reset scores for layer %s (level %d) with %d nodes' % (lname, level, len(a))
self.save()
def binop(self, other, binfunc):
"""Base function for binary operators.
Does all the necessary checks, and then calls the binfunc(v1, v2) to get the output.
'other' can be either another flow, or a scalar."""
ret = RGLFlow(g=self.g, debugfmt=self.debugfmt, tempflow=1)
assert sorted(self.scores) == sorted(ret.scores)
if isinstance(other, RGLFlow):
# combine two flows
assert self.g == other.g
assert sorted(self.scores) == sorted(other.scores)
for layer in self.scores:
s1, s2, out = self.scores[layer], other.scores[layer], ret.scores[layer]
assert len(s1) == len(s2) == len(out)
ret.scores[layer] = binfunc(s1, s2)
elif isinstance(other, (float, long, int)):
# apply the given scalar to this flow
for layer in self.scores:
s, out = self.scores[layer], ret.scores[layer]
assert len(s) == len(out)
ret.scores[layer] = binfunc(s, other)
else:
raise NotImplementedError('cannot handle type %s for RGLFlow.binop()' % (type(other)))
return ret
def __add__(self, other):
"""Adds 'other' rgl flow to this one and returns new RGLFlow"""
return self.binop(other, binfunc=lambda v1, v2: v1+v2)
def __radd__(self, other):
"""Addition with flipped order"""
return self.__add__(other)
def __iadd__(self, other):
"""Runs the normal __add__, and then resets our variables"""
temp = self+other
self.scores = temp.scores
self.save()
return self
def __sub__(self, other):
"""Subtracts 'other' flow from this one and returns the result.
Note that values are clamped to remain positive."""
def binfunc(v1, v2):
c = v1-v2
c[c < 0] = 0.0
return c
return self.binop(other, binfunc=binfunc)
def __mul__(self, other):
"""Multiplies two flows, or this flow and a scalar"""
return self.binop(other, binfunc=lambda v1, v2: v1*v2)
def __rmul__(self, other):
"""Multiplication with flipped order"""
return self.__mul__(other)
def __eq__(self, other):
"""Returns true if our layers are the same and the values are (almost) the same."""
if sorted(self.scores) != sorted(other.scores): return False
for l in self.scores:
s1 = self.scores[l]
s2 = other.scores[l]
if not np.allclose(s1, s2): return False
return True
def newid(self):
"""Changes our id"""
import uuid
self.id = uuid.uuid1().hex
def save(self):
"""Saves ourself to disk"""
import cPickle as pickle
#from scipy.sparse import lil_matrix as sparsemat
from scipy.sparse import csr_matrix as sparsemat
#M = M.tocsr()
if self.tempflow: return
fname = os.path.join(self.g.prefix, 'flows', self.id+'.pickle')
try:
os.makedirs(os.path.dirname(fname))
except OSError: pass
if 0:
todump = {}
for k in self.scores:
todump[k] = sparsemat(self.scores[k])
else:
todump = self.scores
pickle.dump(todump, open(fname, 'wb'), -1)
#log('Saving flow with id %s' % (self.id))
def load(self):
"""Loads ourself from disk. Our id must be set"""
import cPickle as pickle
fname = os.path.join(self.g.prefix, 'flows', self.id+'.pickle')
self.scores = pickle.load(open(fname))
# check if the lengths of these scores match RLG's list of layerkeys
for l in self.g.layerkeys:
alen, blen = len(self.g.layerkeys[l]), len(self.scores.get(l,[]))
if alen != blen:
log('### checking l "%s": exists %s, lens %d vs %d' % (l, l in self.scores, alen, blen))
for l in self.scores:
if l not in self.g.layerkeys:
log('### layer "%s" from flow %s does not exist in layerkeys' % (l, self.id))
pass#TODO HERE
#data = pickle.load(open(fname))
#self.scores = dict((k, v.todense()) for k, v in data.iteritems())
#log('Loading flow with id %s' % (self.id))
def __str__(self):
"""Returns our values as a string"""
from StringIO import StringIO
s = StringIO()
print >>s, 'Flow %s, tempflow %s, debugfmt %s' % (self.id, self.tempflow, self.debugfmt)
for lname, level in self.g.layers():
sc = self.get(lname)
if self.debugfmt == 'spark' or sc:
print >>s, '%s (%d nodes, %d nz):' % (lname, len(self.scores[lname]), len(sc))
s.flush()
if self.debugfmt == 'spark':
spark(self.scores[lname], f=s)
elif self.debugfmt == 'str':
if sc:
print >>s, sc
return s.getvalue()
def incr(self, op='add', **scores):
"""Increments scores, given as a mapping of nodekey=increment.
Increment type depends on 'op':
'add': add to existing [default]
'mul': multiply to existing
"""
for nk, incr in scores.iteritems():
lname, nodeid = self.g.splitNodekey(nk)
a = self.scores[lname]
if int(nodeid) >= len(a): continue
if op == 'add':
try:
a[int(nodeid)] += incr
except Exception:
log('Got nk %s, incr %s, lname %s, nodeid %s, a len %d, lkeys %d: %s' % (nk, incr, lname, nodeid, len(a), len(self.g.layerkeys[lname]), self.g.layerkeys[lname][-5:]))
raise
elif op == 'mul':
a[int(nodeid)] *= incr
else:
raise NotImplementedError()
@timed
#@profile
def add(self, dir, destlevel, layerfuncs=None, debug=0, **flows):
"""Adds the given flows.
The scores are calculated going in the given dir (+1 or -1).
Computation proceeds until the given destination level.
Each flow should be given as nodekey=score.
You can optionally pass in a dict of layerfuncs.
These are applied at the given layer as:
self.scores[layer] = layerfuncs[layer](self.scores[layer])
Returns self.
"""
import numpy as np
import operator as op
#if not flows: return
mylog = lambda s: log(s, funcindent=-1)
if debug > 0:
mylog('Adding %d flows in dir %d to destination level %d' % (len(flows), dir, destlevel))
# basic init
g = self.g
db = self.db
layers = dict((layer, g.getLayer(layer)) for layer, level in g.layers())
layerOrder = sorted(layers, key=lambda l: layers[l]['level'], reverse=(dir<0))
if debug > 0:
mylog('Layers: %s' % (layers,))
mylog('Layer order: %s' % (layerOrder,))
# add all todo flows
if flows:
self.incr(**flows)
# start accumulating flows
for l in layerOrder:
curlevel = layers[l]['level']
if dir > 0 and curlevel > destlevel: break
if dir < 0 and curlevel < destlevel: break
if debug > 0:
mylog('Adding flows to layer %s (level %d)' % (l, curlevel))
lkeys = g.layerkeys[l] # these are guaranteed to line up with our scores array.
# quick check for non-zero elements
nz = len(self.scores[l].nonzero()[0])
if nz == 0: continue
if 0: #FIXME temporarily disabled
# get the self-edge matrix
if debug > 1:
mylog(' Running local flows')
#M, lkeys2, _ = g.getEdgeMatrix(l, l, lkeys, lkeys)
M = g.cachedEdgeMatrix(l, l)
#assert lkeys == _ == lkeys2
#print M, M.shape, len(lkeys), lkeys[:5]
#FIXME see if we run into any issues due to M being None for identity
if M is not None:
# multiply scores by this matrix
try:
v = M.dot(self.scores[l])
except Exception, e:
v = np.dot(M, self.scores[l])
mylog(' ***** Hit exception %s: %s vs %s' % (e, M.shape, len(self.scores[l])))
mylog('%d Layerkeys: %s, %s' % (len(g.layerkeys[l]), g.layerkeys[l][:3], g.layerkeys[l][-3:]))
sys.exit()
#assert len(v) == len(lkeys) == M.shape[0] == M.shape[1] == len(self.scores[l])
assert len(v) == len(self.scores[l])
self.scores[l] = v
#print len(v), v, v.max(), v.argmax(), v.sum()
# at this point, multiply by our weights
self.scores[l] *= g.nodeweights(l, lkeys)
# now apply the layerfunc, if we have one
if layerfuncs and l in layerfuncs and layerfuncs[l]:
self.scores[l] = layerfuncs[l](self.scores[l])
# another quick check for nonzeros
nz = len(self.scores[l].nonzero()[0])
if nz == 0: continue
# now run flows from this layer to all others in dir
if debug > 1:
mylog(' Running neighboring flows')
for l2 in db.hkeys('layeredges:%s' % (l)):
if l2 == l: continue
l2level = layers[l2]['level']
if dir > 0 and (l2level > destlevel or l2level < curlevel): continue
if dir < 0 and (l2level < destlevel or l2level > curlevel): continue
l2keys = g.layerkeys[l2]
if debug > 2:
mylog(' Neighboring flow from %s (%d) to %s (%d), dir %s, destlevel %s' % (l, curlevel, l2, l2level, dir, destlevel))
# get the edge matrix
#M, _, _ = g.getEdgeMatrix(l, l2, lkeys, l2keys)
M = g.cachedEdgeMatrix(l, l2) #TODO most time spent here
if M is not None:
#print M, M.shape, len(l2keys), l2keys[:5]
# multiply scores by this matrix to get dst scores
try:
v = M.dot(self.scores[l])
except Exception, e:
v = np.dot(M, self.scores[l])
log(' **** In exception: %s' % (e,))
raise
assert len(v) == len(self.scores[l2])
# add these scores to existing scores at that level
self.scores[l2] += v
# at this point, multiply by the weights in the 2nd layer
#self.scores[l2] *= g.nodeweights(l2, l2keys) #FIXME I think this will cause a double-weighting
self.save()
return self
def get(self, layername, thresh=0.0, withscores=1, tokeys=0):
"""Returns (nodeid, score) pairs from the given layer, where score > thresh.
Results are sorted from high score to low.
If withscores is true (default) the returns scores as well.
If tokeys is true, then maps ids to keys.
"""
a = self.scores[layername]
#ret = [(i, score) for i, score in enumerate(a) if score > thresh]
#ret.sort(key=lambda pair: pair[1], reverse=1)
# only process rest if we have any values above the threshold
if not np.any(a > thresh): return []
inds = np.argsort(a)[::-1]
scores = a[inds]
scores = scores[scores > thresh]
if tokeys:
inds = [self.g.nodekeyFromID(id, layername) for id in inds]
if withscores:
ret = zip(inds, scores)
else:
ret = inds[:len(scores)]
return ret
def outliers(self, layers=None):
"""Returns the "outliers" amongst the given layers (or all if None)."""
if not layers:
layers = self.scores.keys()
ret = []
for l in layers:
scores = [(self.g.nodekeyFromID(id, l), score) for id, score in self.get(l)]
if not scores: continue
nk, s = scores[0]
oscore = 1.0 if len(scores) == 1 else 1.0-(scores[1][1]/s)
if oscore == 0: continue
#print l, scores, nk, s, oscore
ret.append((nk, oscore, s))
ret.sort(key=lambda r: (r[1], r[2]), reverse=1)
return ret
@classmethod
def combine(cls, tocomb, op='add', **kw):
"""Combines a list of (factor, flow) flows into one:
ret = op(factor*flow for factor, flow in tocomb)
Where 'op' is one of:
'add': adds flows
'mul': multiplies flows
If you given any other keywords, they are used in the initialization.
Returns None on error.
"""
if not tocomb: return None
first = tocomb[0][1]
defkw = dict(tempflow=1, debugfmt=first.debugfmt)
defkw.update(kw)
f = RGLFlow(first.g, **defkw)
for layer, a in f.scores.iteritems():
if op == 'mul': # re-initialize the layer to be ones if we're multiplying
a += 1.0
for fac, flow in tocomb:
if op == 'add':
a += fac*flow.scores[layer]
elif op == 'mul':
a *= fac*flow.scores[layer]
# clamp back to positive
a[a < 0] = 0.0
f.save()
return f
|
StarcoderdataPython
|
6465256
|
<reponame>coinForRich/coin-for-rich
# This module contains common number helpers
from decimal import Decimal
from typing import Union
def round_decimal(
number: Union[float, int, Decimal, str],
n_decimals: int=2
) -> Union[Decimal, None]:
'''
Rounds a `number` to `n_decimals` decimals
If number is None, returns None
:params:
`number`: float, int or Decimal type or str representing float
`n_decimals`: number of decimals
'''
if number is None:
return None
return round(Decimal(number), n_decimals)
|
StarcoderdataPython
|
92835
|
# -*- coding:utf-8 -*-
"""Sample training code
"""
import numpy as np
import pandas as pd
import argparse
import torch as th
import torch.nn as nn
from sch_qm import SchNetModel
# from mgcn import MGCNModel
# from mpnn import MPNNModel
from torch.utils.data import DataLoader
from Alchemy_dataset_qm import TencentAlchemyDataset, batcher
# def dataset_split(file):
# delaney = pd.read_csv("delaney.csv")
# test_set = delaney.sample(frac=0.1, random_state=0)
# train_set = delaney.drop(test_set.index)
# test_set.to_csv("delaney_test.csv", index=False)
# train_set.to_csv("delaney_train.csv", index=False)
def train(model="sch_qm", epochs=80, device=th.device("cpu"), train_file='', test_file='', save=''):
print("start")
# train_dir = "./"
# train_file = dataset+"_train.csv"
alchemy_dataset = TencentAlchemyDataset()
alchemy_dataset.mode = "Train"
alchemy_dataset.transform = None
alchemy_dataset.file_path = train_file
alchemy_dataset._load()
test_dataset = TencentAlchemyDataset()
# test_dir = train_dir
# test_file = dataset+"_valid.csv"
test_dataset.mode = "Train"
test_dataset.transform = None
test_dataset.file_path = test_file
test_dataset._load()
alchemy_loader = DataLoader(
dataset=alchemy_dataset,
batch_size=500,
collate_fn=batcher(),
shuffle=False,
num_workers=0,
)
test_loader = DataLoader(
dataset=test_dataset,
batch_size=500,
collate_fn=batcher(),
shuffle=False,
num_workers=0,
)
if model == "sch_qm":
model = SchNetModel(norm=False, output_dim=1)
print(model)
# if model.name in ["MGCN", "SchNet"]:
# model.set_mean_std(alchemy_dataset.mean, alchemy_dataset.std, device)
model.to(device)
# print("test_dataset.mean= %s" % (alchemy_dataset.mean))
# print("test_dataset.std= %s" % (alchemy_dataset.std))
loss_fn = nn.MSELoss()
MAE_fn = nn.L1Loss()
optimizer = th.optim.Adam(model.parameters(), lr=0.001)
# scheduler = th.optim.lr_scheduler.StepLR(optimizer, 30, gamma=0.9, eta_min=0.0001, last_epoch=-1)
scheduler = th.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.9, patience=10, threshold=0.00001, threshold_mode='rel', cooldown=0, min_lr=0.0001, eps=1e-08, verbose=False)
def print_res(label, res, op):
size = len(res)
for i in range(size):
line = "%s,%s\n" % (label[i][0], res[i][0])
op.writelines(line)
log = open(save+'/train_log.txt', 'w')
for epoch in range(epochs):
w_loss, w_mae = 0, 0
model.train()
for idx, batch in enumerate(alchemy_loader):
batch.graph.to(device)
batch.label = batch.label.to(device)
batch.graph0 = batch.graph0.to(device)
batch.graph1 = batch.graph1.to(device)
batch.graph2 = batch.graph2.to(device)
batch.graph3 = batch.graph3.to(device)
batch.graph4 = batch.graph4.to(device)
batch.graph5 = batch.graph5.to(device)
batch.graph6 = batch.graph6.to(device)
batch.graph7 = batch.graph7.to(device)
batch.feature = batch.feature.to(device)
res = model(batch.graph, batch.graph0, batch.graph1, batch.graph2,
batch.graph3, batch.graph4, batch.graph5, batch.graph6, batch.graph7, batch.feature)
loss = loss_fn(res, batch.label)
mae = MAE_fn(res, batch.label)
optimizer.zero_grad()
mae.backward()
optimizer.step()
w_mae += mae.detach().item()
w_loss += loss.detach().item()
scheduler.step(w_mae)
w_mae /= idx + 1
w_loss /= idx + 1
print("Epoch {:2d}, loss: {:.7f}, mae: {:.7f}".format(
epoch, w_loss, w_mae))
log.write("Epoch {:2d}, loss: {:.7f}, mae: {:.7f} \n".format(
epoch, w_loss, w_mae))
model.eval()
with th.no_grad():
if epoch % 20 == 0:
res_op = open(save+'/Train_res_'+str(epoch)+'.csv', 'w')
for jdx, batch in enumerate(alchemy_loader):
batch.graph.to(device)
batch.label = batch.label.to(device)
batch.graph0 = batch.graph0.to(device)
batch.graph1 = batch.graph1.to(device)
batch.graph2 = batch.graph2.to(device)
batch.graph3 = batch.graph3.to(device)
batch.graph4 = batch.graph4.to(device)
batch.graph5 = batch.graph5.to(device)
batch.graph6 = batch.graph6.to(device)
batch.graph7 = batch.graph7.to(device)
batch.feature = batch.feature.to(device)
res = model(batch.graph, batch.graph0, batch.graph1, batch.graph2,
batch.graph3, batch.graph4, batch.graph5, batch.graph6, batch.graph7, batch.feature)
l = batch.label.cpu().detach().numpy()
r = res.cpu().detach().numpy()
print_res(l, r, res_op)
res_op.close()
# res_pd = pd.DataFrame(res_op)
# res_pd.to_csv(save+'/Train_res_'+str(epoch)+'.csv')
# Validation section
val_loss, val_mae = 0, 0
for jdx, batch in enumerate(test_loader):
batch.graph.to(device)
batch.label = batch.label.to(device)
batch.graph0 = batch.graph0.to(device)
batch.graph1 = batch.graph1.to(device)
batch.graph2 = batch.graph2.to(device)
batch.graph3 = batch.graph3.to(device)
batch.graph4 = batch.graph4.to(device)
batch.graph5 = batch.graph5.to(device)
batch.graph6 = batch.graph6.to(device)
batch.graph7 = batch.graph7.to(device)
batch.feature = batch.feature.to(device)
res = model(batch.graph, batch.graph0, batch.graph1, batch.graph2,
batch.graph3, batch.graph4, batch.graph5, batch.graph6, batch.graph7, batch.feature)
loss = loss_fn(res, batch.label)
mae = MAE_fn(res, batch.label)
# optimizer.zero_grad()
# mae.backward()
# optimizer.step()
val_mae += mae.detach().item()
val_loss += loss.detach().item()
val_mae /= jdx + 1
val_loss /= jdx + 1
print(
"Epoch {:2d}, val_loss: {:.7f}, val_mae: {:.7f}".format(
epoch, val_loss, val_mae
))
log.write("Epoch {:2d}, loss: {:.7f}, mae: {:.7f}\n".format(
epoch, w_loss, w_mae))
if epoch % 20 == 0:
# op_file = open('save/'+str(epoch)+'re')
res_op = open(save+'/Test_res_'+str(epoch)+'.csv', 'w')
th.save(model.state_dict(), save+'/model_'+str(epoch))
for jdx, batch in enumerate(test_loader):
batch.graph.to(device)
batch.label = batch.label.to(device)
batch.graph0 = batch.graph0.to(device)
batch.graph1 = batch.graph1.to(device)
batch.graph2 = batch.graph2.to(device)
batch.graph3 = batch.graph3.to(device)
batch.graph4 = batch.graph4.to(device)
batch.graph5 = batch.graph5.to(device)
batch.graph6 = batch.graph6.to(device)
batch.graph7 = batch.graph7.to(device)
batch.feature = batch.feature.to(device)
res = model(batch.graph, batch.graph0, batch.graph1, batch.graph2,
batch.graph3, batch.graph4, batch.graph5, batch.graph6, batch.graph7, batch.feature)
l = batch.label.cpu().detach().numpy()
r = res.cpu().detach().numpy()
print_res(l, r, res_op)
res_op.close()
# res_op.append([batch.label.cpu().detach().numpy(), res.cpu().detach().numpy()])
# res_pd = pd.DataFrame(res_op)
# res_pd.to_csv(save+'/Test_res_'+str(epoch)+'.csv')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-M",
"--model",
help="model name (sch_qm)",
default="sch_qm")
parser.add_argument("--epochs", help="number of epochs", default=10000)
parser.add_argument("--train_file", help="dataset to train", default="")
parser.add_argument("--test_file", help="dataset to test", default="")
parser.add_argument(
"--save", help="directory to save the model", default="")
device = th.device('cuda:0' if th.cuda.is_available() else 'cpu')
args = parser.parse_args()
assert args.model in ["sch_qm"]
# dataset_split("delaney.csv")
train(args.model, int(args.epochs), device,
args.train_file, args.test_file, args.save)
|
StarcoderdataPython
|
11336408
|
<reponame>LucasCarioca/split<gh_stars>10-100
import os
from typing import Dict, List, Optional
DEFINED_ACTION_OUTPUTS_NUMBER = 100
def set_action_output(name: str, value: str):
print(f'::set-output name={name}::{value}')
def print_action_error(msg: str):
print(f'::error file={__name__}::{msg}')
def get_action_input(
name: str, required: bool = False, default: Optional[str] = None
) -> str:
v = os.environ.get(f'INPUT_{name.upper()}', '')
if v == '' and default:
v = default
if required and v == '':
print_action_error(f'input required and not supplied: {name}')
exit(1)
return v
def split(msg: str, sep: str = ' ', maxsplit: int = -1) -> List[str]:
results = msg.split(sep=sep, maxsplit=maxsplit)
if len(results) > DEFINED_ACTION_OUTPUTS_NUMBER:
results = msg.split(
sep=sep, maxsplit=DEFINED_ACTION_OUTPUTS_NUMBER - 1
)
return results
def to_outputs(results: List[str]) -> Dict[str, str]:
outputs = {
'length': str(len(results)),
}
for i, result in enumerate(results):
outputs[f'_{i}'] = result
return outputs
def main():
msg = get_action_input('msg', required=True)
separator = get_action_input('separator', required=False, default=' ')
maxsplit = int(get_action_input('maxsplit', required=False, default='-1'))
results = split(msg, separator, maxsplit)
outputs = to_outputs(results)
for k, v in outputs.items():
set_action_output(k, v)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6640737
|
<filename>aop/extensions/searchandreplace.py<gh_stars>1-10
import os
from distutils.util import strtobool
from aop.aop import extends
search_and_replace = bool(strtobool(os.getenv("USE_SEARCH_AND_REPLACE")))
if search_and_replace:
print("Using feature: Search and replace")
@extends('feature_states')
def search_and_replace_feature(features):
features['search-and-replace'] = True
@extends('feature_scripts')
def external_search_js(scripts):
scripts.append('/static/search_and_replace/index.js')
@extends('feature_modal_htmls')
def external_search_modal_html(htmls):
htmls.append('/search_and_replace/modal.html')
@extends('feature_dropdown_file_htmls')
def external_search_dropdown_file_html(htmls):
htmls.append('/search_and_replace/navbar.html')
|
StarcoderdataPython
|
3429803
|
'''
Created on 26 juil. 2017
@author: worm
'''
from django.urls.base import reverse
from snapshotServer.tests.views.Test_Views import TestViews
class Test_TestListView(TestViews):
def test_test_exists(self):
"""
Simple test to show that we get all test cases from session
"""
response = self.client.get(reverse('testlistView', args=[1]))
self.assertEqual(len(response.context['object_list']), 2)
self.assertEqual(list(response.context['object_list'])[0].testCase.name, "test1")
def test_session_id_added(self):
"""
Check that sessionId is available in context
"""
response = self.client.get(reverse('testlistView', args=[1]))
self.assertEqual(len(response.context['sessionId']), 1)
def test_no_test(self):
"""
Test no error raised when no test exist in session
"""
response = self.client.get(reverse('testlistView', args=[9]))
self.assertEqual(len(response.context['object_list']), 0)
|
StarcoderdataPython
|
3469448
|
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.views.generic import DetailView
from django.views import View
from django.db.models import Q
from django.conf import settings
from django.template.loader import render_to_string
from django.shortcuts import get_object_or_404, render
from django.contrib.auth.decorators import login_required
from django.utils.html import escape
import sys
from post.models import Post
from post.forms import CommentForm
from comment.models import Comment
from author.models import Author
from node.models import Node, build_comment
from urllib.parse import urlparse
import simplejson as json
import requests
import re
import base64
import uuid
from CommonMark import commonmark
APP_URL = settings.APP_URL
@login_required
def EditView(request, pk):
post = get_object_or_404(Post, id=pk)
if post.contentType.startswith("image"):
base64image = post.content
post.content = "<img alt=\"{}\" class=\"img-responsive\" src=\"{}\"/>".format(post.title,base64image)
authorContext = Author.objects.get(id=request.user)
if authorContext != post.author:
return HttpResponseRedirect('/post/{}/'.format(pk))
return render(request, 'edit.html', {'post':post})
@login_required
def EditPostView(request, pk):
# Only process the author's post if it is a POST request
if (request.method != 'POST'):
return HttpResponseRedirect('/post/{}/edit/'.format(pk))
if (request.POST['post_content'] == '' and 'image' not in request.FILES.keys()):
return HttpResponseRedirect('/post/{}/edit/'.format(pk))
try:
# Get the logged in user and the associated author object.
# userContext = User.objects.get(username=request.user.username)
# post_body = request.POST['post_content']
authorContext = Author.objects.get(id=request.user)
post = get_object_or_404(Post, id=pk)
if authorContext != post.author:
return HttpResponseRedirect('/post/{}/'.format(pk))
content = request.POST['post_content']
content = escape(content) # Should always be escaping HTML tags
if post.contentType.startswith("image"):
# Create and save a new post.
# encode image into base64 here and make nice image url too
if ('image' in request.FILES.keys()):
imgname = re.sub('[^._0-9a-zA-Z]+','',request.FILES['image'].name)
base64Image = base64.b64encode(request.FILES['image'].read())
post.content='data:' + str(post.contentType) + ',' + str(base64Image.decode('utf-8'))
post.contentType=request.FILES['image'].content_type + ";base64"
post.image_url = '{0}_{1}_{2}'.format(request.user, str(uuid.uuid4())[:8], imgname)
post.privacyLevel=request.POST['privacy_level']
post = setVisibility(request, post)
post.save()
elif request.POST['post_content'] != '':
#content = request.POST['post_content']
#content = escape(content)
post.content=content
post.privacyLevel=request.POST['privacy_level']
post.contentType=request.POST['contentType']
post = setVisibility(request, post)
post.save()
else:
return HttpResponseRedirect('/post/{}/edit/'.format(pk))
if request.POST['privacy_level'] == '5':
return HttpResponseRedirect('/post/{}/'.format(pk))
except:
return HttpResponse(sys.exc_info[0])
return HttpResponseRedirect('/post/{}/'.format(pk))
def setVisibility(request, post):
if post.privacyLevel == '0':
post.visibility = 'PUBLIC'
elif post.privacyLevel == '1':
post.visibility = 'FRIENDS'
elif post.privacyLevel == '2':
post.visibility = 'FOAF'
elif post.privacyLevel == '3':
post.visibility = 'PRIVATE'
elif post.privacyLevel == '4':
post.visibility = 'PRIVATE'
elif post.privacyLevel == '5':
post.visibility = 'UNLISTED'
post.unlisted = True
if 'serverOnly' in request.POST:
post.serverOnly = True
else:
post.serverOnly = False
return post
@login_required
def AjaxComments(request, pk):
author_post = get_object_or_404(Post, id=pk)
comments = Comment.objects.filter(Q(post=author_post.id)).order_by('-publishDate')
parsed_post_url = urlparse(author_post.origin)
parsed_app_url = urlparse(APP_URL)
if parsed_post_url.netloc != parsed_app_url.netloc:
comment_ids = comments.values_list('UID',flat = True)
comment_ids = [str(x) for x in comment_ids]
host = "http://"+parsed_post_url.netloc + "/"
n = Node.objects.get(url=host)
r = requests.get(author_post.origin + "comments" +"/", auth = requests.auth.HTTPBasicAuth(n.username,n.password))
if r.status_code == requests.codes.ok:
post_objects = json.loads(r.text)
for o in post_objects['comments']:
if o['id'] not in comment_ids:
build_comment(o, author_post)
# get the new comments
comments = Comment.objects.filter(Q(post=author_post.id)).order_by('-publishDate')
context = dict()
html = dict()
context['comment_list'] = comments
html['comments'] = render_to_string('ajaxcomments.html', context, request=request)
return JsonResponse(html)
# Create your views here.
class PostView(DetailView):
model = Post
template_name = 'view.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context['comment_list'] = self.get_comment_list()
context['form'] = CommentForm()
context['content'] = self.get_content()
return context
def get_comment_list(self):
author_post = self.get_object()
comments = Comment.objects.filter(Q(post=author_post.id)).order_by('-publishDate')
parsed_post_url = urlparse(author_post.origin)
parsed_app_url = urlparse(APP_URL)
if parsed_post_url.netloc != parsed_app_url.netloc:
comment_ids = comments.values_list('UID',flat = True)
comment_ids = [str(x) for x in comment_ids]
host = author_post.author.host
try:
n = Node.objects.get(url=host)
except:
pass
r = requests.get(author_post.origin + "comments" +"/", auth = requests.auth.HTTPBasicAuth(n.username,n.password))
if r.status_code == requests.codes.ok:
post_objects = json.loads(r.text)
for o in post_objects['comments']:
if o['id'] not in comment_ids:
build_comment(o, author_post)
# get the new comments
comments = Comment.objects.filter(Q(post=author_post.id)).order_by('-publishDate')
return comments
elif r.status_code == requests.codes.forbidden:
# can't retrieve posts, just return servers post
return comments
else:
# can't retrieve posts, just return servers post
return comments
return comments
def dispatch(self, request, *args, **kwargs):
author = Author.objects.get(id=request.user)
if author.canViewPost(self.get_object()):
return super(DetailView, self).dispatch(request, *args, **kwargs)
else:
return HttpResponseRedirect("/author/")
def get_content(self):
if self.object.contentType.startswith("image"):
base64image = self.object.content
return "<img alt=\"{}\" class=\"img-responsive\" src=\"{}\"/>".format(self.object.title,base64image)
elif self.object.contentType == 'text/markdown':
return commonmark(self.object.content)
return self.object.content
class AddComment(View):
def post(self, request, pk):
form = CommentForm(request.POST)
if form.is_valid():
comment_author = Author.objects.get(id=request.user)
comment_post = Post.objects.get(id=pk)
comment = Comment(
content=form.cleaned_data['content'],
author=comment_author,
post=comment_post,
)
comment.setApiID()
parsed_comment_url = urlparse(comment_post.origin)
parsed_app_url = urlparse(APP_URL)
if parsed_comment_url.netloc == parsed_app_url.netloc:
# post is on server, proceed as normal
comment.save()
return HttpResponseRedirect("/post/" + pk)
else:
# we gotta send a request and see if it's successful
body = dict()
body['query'] = "addComment"
body['post'] = comment_post.origin
obj_author = dict()
obj_author['id'] = str(comment_author.UID)
obj_author['host'] = comment_author.host
obj_author['displayName'] = comment_author.displayName
obj_author['url'] = comment_author.url
obj_author['github'] = comment_author.github
obj_comment = dict()
obj_comment['author'] = obj_author
obj_comment['comment'] = form.cleaned_data['content']
obj_comment['contentType'] = 'text/markdown'
obj_comment['published'] = comment.publishDate.isoformat()
# commented out for T5 atm
#obj_comment['guid'] = str(comment.UID)
obj_comment['id'] = str(comment.UID)
body['comment'] = obj_comment
msg = json.dumps(body)
host = comment_post.author.host
try:
n = Node.objects.get(url=host)
except:
pass
r = requests.post(
comment_post.origin + "comments" +"/",
data=msg,
auth = requests.auth.HTTPBasicAuth(n.username,n.password),
headers={
"content-type": "application/json"
})
if r.status_code == requests.codes.ok:
return HttpResponseRedirect("/post/" + pk)
elif r.status_code == requests.codes.forbidden:
Comment.objects.filter(UID=comment.UID).delete()
return HttpResponseForbidden()
else:
Comment.objects.filter(UID=comment.UID).delete()
# return HttpResponse(str(r.status_code) + ':' + r.content.decode("utf-8"))
return HttpResponseRedirect("/post/" + pk)
return HttpResponseRedirect("/post/" + pk)
|
StarcoderdataPython
|
196178
|
import re
from collections import defaultdict
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For each user, if the group with the exact scope of permissions exists,
add the user to it, else create a new group with this scope of permissions
and add the user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
counter = get_counter_value(Group)
mapping = create_permissions_mapping(User)
for perms, users in mapping.items():
group = get_group_with_given_permissions(perms, groups)
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, counter, Group)
group.user_set.add(*users)
counter += 1
def get_counter_value(Group):
"""Get the number of next potential group."""
pattern = r"^Group (\d+)$"
group = Group.objects.filter(name__iregex=pattern).order_by("name").last()
if not group:
return 1
return int(re.match(pattern, group.name).group(1)) + 1
def create_permissions_mapping(User):
"""Create mapping permissions to users and potential new group name."""
mapping = defaultdict(set)
users = User.objects.filter(user_permissions__isnull=False).distinct().iterator()
for user in users:
permissions = user.user_permissions.all().order_by("pk")
perm_pks = tuple([perm.pk for perm in permissions])
mapping[perm_pks].add(user.pk)
user.user_permissions.clear()
return mapping
def get_group_with_given_permissions(permissions, groups):
"""Get group with given set of permissions."""
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, counter, Group):
"""Create new group with given set of permissions."""
group_name = f"Group {counter:03d}"
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0040_auto_20200415_0443"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
StarcoderdataPython
|
4929469
|
<reponame>CrankySupertoon01/Toontown-2<filename>toontown/estate/GardenGameGlobals.py
acceptErrorDialog = 0
doneEvent = 'game Done'
colorRed = (1, 0, 0, 1)
colorBlue = (0, 0, 1, 1)
colorGreen = (0, 1, 0, 1)
colorGhostRed = (1, 0, 0, 0.5)
colorGhostGreen = (0, 1, 0, 0.5)
colorWhite = (1, 1, 1, 1)
colorBlack = (0.5, 0.5, 0.5, 1.0)
colorShadow = (0, 0, 0, 0.5)
running = 0
maxX = 0.46999999999999997
minX = -0.46999999999999997
maxZ = 0.75000000000000002
minZ = -0.00000000000000001
newBallX = 0.0
newBallZ = 0.69999999999999998
rangeX = (maxX - minX)
rangeZ = (maxZ - minZ)
size = 0.085000000000000006
sizeZ = (size * 0.80000000000000004)
gX = int((rangeX / size))
gZ = int((rangeZ / sizeZ))
maxX = (minX + (gX * size))
maxZ = (minZ + (gZ * sizeZ))
controlOffsetX = 0.0
controlOffsetZ = 0.0
queExtent = 3
gridDimX = gX
gridDimZ = gZ
gridBrick = False
newBallTime = 1.0
newBallCountUp = 0.0
cogX = 0
cogZ = 0
controlSprite = None
|
StarcoderdataPython
|
3389784
|
import os
import sys
import atexit
def daemon(pid_file=None):
if os.path.exists("/Users/admin/devops/socket_uvloop/{0}".format(pid_file)):
raise RuntimeError("Already running")
pid = os.fork()
if pid:
sys.exit(0)
os.chdir('/')
# 子进程默认继承父进程的umask(文件权限掩码),重设为0(完全控制),以免影响程序读写文件
os.umask(0)
# 让子进程成为新的会话组长和进程组长
os.setsid()
_pid = os.fork()
if _pid:
# 退出子进程
sys.exit(0)
# 此时,孙子进程已经是守护进程了,接下来重定向标准输入、输出、错误的描述符(是重定向而不是关闭, 这样可以避免程序在 print 的时候出错)
# 刷新缓冲区先,小心使得万年船
sys.stdout.flush()
sys.stderr.flush()
# dup2函数原子化地关闭和复制文件描述符,重定向到/dev/nul,即丢弃所有输入输出
with open('/dev/null') as read_null, open('/dev/null', 'w') as write_null:
os.dup2(read_null.fileno(), sys.stdin.fileno())
os.dup2(write_null.fileno(), sys.stdout.fileno())
os.dup2(write_null.fileno(), sys.stderr.fileno())
# 写入pid文件
if pid_file:
try:
with open("/Users/admin/devops/socket_uvloop/{0}".format(pid_file), 'w+') as f:
f.write(str(os.getpid()))
# 注册退出函数,进程异常退出时移除pid文件
atexit.register(os.remove, "/Users/admin/devops/socket_uvloop/{0}".format(pid_file))
except Exception as e:
print(e)
while 1:
print("sub process is alive")
daemon("pid_file.txt")
|
StarcoderdataPython
|
3527837
|
from discord.ext import commands
import discord
from discord_slash import SlashContext
from discord_slash.cog_ext import cog_slash as slash
from discord_slash.utils.manage_commands import create_option
from utils import command_option_type
from helpers import programmes_helper
from services import offers_service
import constants
class OffergraphCommand(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash(name='offergraph',
description='Show a graph of ranking numbers and the dates when they received offers',
options=[
create_option(
name='programme_id',
description='Study programme',
option_type=command_option_type.STRING,
required=True,
choices=programmes_helper.get_programme_choices()
),
create_option(
name='year',
description='Year of application',
option_type=command_option_type.INTEGER,
required=False,
choices=programmes_helper.get_year_choices()
),
create_option(
name='step',
description='Show only step graph',
option_type=command_option_type.BOOLEAN,
required=False
)
])
async def offergraph(self, ctx: SlashContext, programme_id: str, year: int = None, step: bool = False):
if year is None:
year = constants.current_year
# Show "Bot is thinking" message
await ctx.defer()
async with (await self.bot.get_db_conn()).acquire() as connection:
offers = offers_service.OffersService(connection)
await offers.generate_graph(programmes_helper.programmes[programme_id], step, year)
image = discord.File(offers_service.filename)
await ctx.send(file=image)
def setup(bot):
bot.add_cog(OffergraphCommand(bot))
|
StarcoderdataPython
|
2868
|
<reponame>Andy-Wilkinson/ChemMLToolk
import tensorflow as tf
class VariableScheduler(tf.keras.callbacks.Callback):
"""Schedules an arbitary variable during training.
Arguments:
variable: The variable to modify the value of.
schedule: A function that takes an epoch index (integer, indexed
from 0) and current variable value as input and returns a new
value to assign to the variable as output.
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, variable, schedule, verbose=0):
super(VariableScheduler, self).__init__()
self.variable = variable
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
value = self.variable.read_value()
value = self.schedule(epoch, value)
self.variable.assign(value)
if self.verbose > 0:
print(f'\nEpoch {epoch + 1}: VariableScheduler assigning '
f'variable {self.variable.name} to {value}.')
|
StarcoderdataPython
|
12838347
|
<reponame>kanson1996/IIMS
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created by Kanson on 2020/1/12 16:03.
"""
import random
import datetime
def tid_maker():
a = int (datetime.datetime.now ().timestamp ())
b = int (''.join ([str (random.randint (0, 9)) for i in range (3)]))
a = str (a)
b = str (b)
c = a + b
return c
|
StarcoderdataPython
|
64565
|
from brownie import reverts
from fixtures import setup_wallet, owners_2
from eth_abi import encode_abi
from web3 import Web3
from fixtures import ACCOUNTS
from eth_account.messages import encode_defunct
def calculate_transaction_hash(nonce: int, to: str, value: int, data: str='00'):
encoded: bytes = nonce.to_bytes(32, 'big') + bytes.fromhex(to).rjust(32,b'\0') + value.to_bytes(32, 'big') + bytes.fromhex(data)
# '0x66aB6D9362d4F35596279692F0251Db635165871'
return Web3.keccak(encoded)
def get_sigdata(nonce: int, to: str, value: int, data: str='00', accounts=[]):
message_to_be_signed = encode_defunct(calculate_transaction_hash(nonce, to, value, data))
sigdata = [[0,0,0]] * 10
for i, account in enumerate(accounts):
signature = account.sign_message(message_to_be_signed)
sigdata[i] = [signature['v'], signature['r'], signature['s']]
return sigdata
def test_execute(setup_wallet):
nonce: int = 0
to: str = '77aB6D9362d4F35596279692F0251Db635165871'
value: int = 1
sigdata = get_sigdata(nonce, to, value, accounts=[ACCOUNTS[0], ACCOUNTS[1]])
assert setup_wallet.execute(to, value, '', sigdata, {'value': value})
assert setup_wallet.nonce() == 1
def test_execute_non_owner(setup_wallet):
nonce: int = 0
to: str = '77aB6D9362d4F35596279692F0251Db635165871'
value: int = 1
sigdata = get_sigdata(nonce, to, value, accounts=[ACCOUNTS[2]])
with reverts():
setup_wallet.execute(to, value, '', sigdata, {'value': value})
def test_execute_threshold_not_reached(setup_wallet):
nonce: int = 0
to: str = '77aB6D9362d4F35596279692F0251Db635165871'
value: int = 1
sigdata = get_sigdata(nonce, to, value, accounts=[ACCOUNTS[0]])
with reverts():
setup_wallet.execute(to, value, '', sigdata, {'value': value})
|
StarcoderdataPython
|
8022562
|
import yaml
from beanstalkio.connection import Connection
from beanstalkio.errors import CommandError
class Client:
def __init__(self, host, port):
self.connection = Connection(host, port)
def _send_command(self, command):
self.connection.write(command)
def _read_server_response(self):
line = self.connection.read_line()
return line.split()
def _get_response_with_status(self):
response = self._read_server_response()
return response[0]
def _get_response_with_result(self):
response = self._read_server_response()
status = response.pop(0)
if response:
result = response.pop(0)
return result, status
return None, status
def _get_response_with_body(self):
body = status = None
response = self._read_server_response()
status = response.pop(0)
if response:
length = int(response.pop(0))
body = self.connection.read_bytes(length + 2)
return body, status
def _get_response_with_yaml_body(self):
body, status = self._get_response_with_body()
if body:
parsed = yaml.load(body, Loader=yaml.SafeLoader)
return parsed, status
return None, status
def _get_response_complex(self):
status = result = body = None
response = self._read_server_response()
status = response.pop(0)
if response:
result = response.pop(0)
if response:
length = int(response.pop(0))
body = self.connection.read_bytes(length + 2)[:-2]
return body, result, status
def stats(self):
self._send_command("stats\r\n")
body, status = self._get_response_with_yaml_body()
if not body:
raise CommandError(status)
return body
def stats_tube(self, tube: str):
self._send_command(f"stats-tube {tube}\r\n")
body, status = self._get_response_with_yaml_body()
if not body:
raise CommandError(status)
return body
def put(self, body: str, priority=2 ** 31, delay=0, ttr=120):
command = f"put {priority} {delay} {ttr} {len(body)}\r\n{body}\r\n"
self._send_command(command)
result, status = self._get_response_with_result()
if not result:
raise CommandError(status)
return result
def reserve(self):
self._send_command("reserve\r\n")
body, result, status = self._get_response_complex()
if not body:
raise CommandError(status)
return body, result
def release(self, job_id, priority=2 ** 31, delay=0):
command = f"release {job_id} {priority} {delay}\r\n"
self._send_command(command)
status = self._get_response_with_status()
if status != "RELEASED":
raise CommandError(status)
def delete(self, job_id):
self._send_command(f"delete {job_id}\r\n")
status = self._get_response_with_status()
if status != "DELETED":
raise CommandError(status)
def use(self, tube):
self._send_command(f"use {tube}\r\n")
result, status = self._get_response_with_result()
if not result:
raise CommandError(status)
return result
def watch(self, tube):
self._send_command(f"watch {tube}\r\n")
result, status = self._get_response_with_result()
if not result:
raise CommandError(status)
return result
def watching(self):
self._send_command(f"list-tubes-watched\r\n")
body, status = self._get_response_with_yaml_body()
if not body:
raise CommandError(status)
return body
def ignore(self, tube):
self._send_command(f"ignore {tube}\r\n")
result, status = self._get_response_with_result()
if not result:
raise CommandError(status)
return result
def disconnect(self):
self.connection.close()
|
StarcoderdataPython
|
6570729
|
"""
Tools for testing this library
the name begins with an underscore so that the functions aren't run as tests
"""
import time
import contextlib
import joblib
import numpy as np
import nose.tools
from nose.plugins.attrib import attr
eq_ = nose.tools.eq_ # for compatibility
# TODO rename to assert_equal and all other functions similarly
equal = nose.tools.eq_
raises = nose.tools.raises
@contextlib.contextmanager
def assert_time(min_time, max_time):
"""Raise an exception if the time to process the block is within the
expected range of (min_time, max_time), measured in ms.
Example:
with assert_time(1, 10):
do_something_for_5ms()
"""
start_time = time.time()
yield
end_time = time.time()
actual_time = end_time - start_time
assert min_time <= actual_time <= max_time, dict(min=min_time,
max=max_time,
actual=actual_time)
def hash_equal(thing1, thing2, msg=None):
equal(joblib.hash(thing1, coerce_mmap=True),
joblib.hash(thing2, coerce_mmap=True),
msg)
hash_eq = hash_equal # for compatibility
numpy_equal = np.testing.assert_array_equal
numpy_almost_equal = np.testing.assert_array_almost_equal
def numpy_allclose(a, b, msg=None, **kwargs):
"""
can be better than numpy_almost_equal because you have more
fine-grained control over absolute and relative tolerance
"""
assert np.allclose(a, b, **kwargs), dict(
a=a,
b=b,
msg=msg,
)
def assertion_error_complement(fn):
"""
returns a function raises an assertion error if the inner funtion does not
NOTE: resulting functions are not serializable
TODO reimplement with class to make them serializable
"""
def inner(*args, **kwargs):
try:
fn(*args, **kwargs)
except AssertionError:
pass
else:
raise AssertionError(dict(
fn=fn,
args=args,
kwargs=kwargs,
))
return inner
not_equal = assertion_error_complement(equal)
numpy_not_almost_equal = assertion_error_complement(numpy_almost_equal)
numpy_not_equal = assertion_error_complement(numpy_equal)
numpy_not_allclose = assertion_error_complement(numpy_allclose)
# decorator to mark slow tests
# ---
# this allows you to run only non-slow tests with "nosetests -a '!slow'"
slow = attr("slow")
|
StarcoderdataPython
|
389398
|
"""AppleByte database support."""
from cryptoassets.core import models
from cryptoassets.core.coin.registry import CoinModelDescription
from cryptoassets.core.coin.validate import HashAddresValidator
coin_description = CoinModelDescription(
coin_name="aby",
wallet_model_name="cryptoassets.core.coin.applebyte.models.AppleByteWallet",
address_model_name="cryptoassets.core.coin.applebyte.models.AppleByteAddress",
account_model_name="cryptoassets.core.coin.applebyte.models.AppleByteAccount",
transaction_model_name="cryptoassets.core.coin.applebyte.models.AppleByteTransaction",
network_transaction_model_name="cryptoassets.core.coin.applebyte.models.AppleByteNetworkTransaction",
address_validator=HashAddresValidator())
class AppleByteAccount(models.GenericAccount):
coin_description = coin_description
class AppleByteAddress(models.GenericAddress):
coin_description = coin_description
class AppleByteTransaction(models.GenericConfirmationTransaction):
coin_description = coin_description
class AppleByteWallet(models.GenericWallet):
coin_description = coin_description
class AppleByteNetworkTransaction(models.GenericConfirmationNetworkTransaction):
coin_description = coin_description
|
StarcoderdataPython
|
11293040
|
<filename>lms/views/reports.py
from pyramid.view import view_config
from lms.models import ApplicationInstance, LtiLaunches
@view_config(
route_name="reports",
renderer="lms:templates/reports/application_report.html.jinja2",
permission="view",
)
def list_application_instances(request):
launches = request.db.execute(
"SELECT context_id, count(context_id), \
lms_url, requesters_email, consumer_key FROM lti_launches LEFT JOIN \
application_instances on \
lti_launches.lti_key=application_instances.consumer_key GROUP BY \
context_id, consumer_key, requesters_email, lms_url ORDER BY \
count(CONTEXT_ID) DESC;"
).fetchall()
return {
"apps": request.db.query(ApplicationInstance).all(),
"launches": launches,
"num_launches": request.db.query(LtiLaunches).count(),
"logout_url": request.route_url("logout"),
}
|
StarcoderdataPython
|
1986504
|
<gh_stars>0
__author__ = 'Andy'
import time
from whirlybird.server.position_contoller import PositionController
if __name__ == "__main__":
position_controller = PositionController()
position_controller.run()
time.sleep(10)
position_controller.kill()
|
StarcoderdataPython
|
5101387
|
<gh_stars>100-1000
#!/usr/bin/env python
"""
Kernels.
Authors:
- <NAME>, 2012 (<EMAIL>)
- <NAME>, 2016 (<EMAIL>)
Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def rbf_kernel(x1, x2, sigma):
"""
Compute normalized and unnormalized graph Laplacians.
Parameters
----------
x1 : Nx1 numpy array
x2 : Nx1 numpy array
sigma : float
Returns
-------
rbf : float
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.kernels import rbf_kernel
>>> x1 = np.array([0.1,0.2,0.4,0])
>>> x2 = np.array([0.1,0.3,0.5,0])
>>> sigma = 0.5
>>> rbf = rbf_kernel(x1, x2, sigma)
>>> print('{0:0.5f}'.format(rbf))
0.96079
"""
import numpy as np
return np.exp(-np.linalg.norm(x1 - x2) ** 2 / (2 * sigma ** 2))
# def cotangent_kernel(Nodes, Meshes):
# """
# This function constructs weighted edges of a graph.
#
# Parameters
# ----------
# Nodes : numpy array
# Meshes : numpy array
#
# Returns
# -------
# W : N x N matrix
# weight matrix
#
# Examples
# --------
# >>> import numpy as np
# >>> from mindboggle.guts.kernels import cotangent_kernel
# >>> Nodes = np.array([0,1,2,3,4])
# >>> Meshes = np.array([[1,2,3],[0,1,2],[0,1,3],[0,1,4],[0,2,3],[0,3,4]])
# >>> cotangent_kernel(Nodes, Meshes)
# ValueError: 'axisa' out of bounds
#
# """
# import numpy as np
# from scipy.sparse import lil_matrix
#
# num_nodes = Nodes.shape[0]
# W = lil_matrix((num_nodes, num_nodes))
# #print('Constructing sparse affinity matrix...')
# for c in Meshes:
# # Obtain vertices which comprise face
# v0, v1, v2 = Nodes[c[0]], Nodes[c[1]], Nodes[c[2]]
#
# # Obtain cotangents of angles
# cot0 = np.dot(v1-v0, v2-v0) / np.linalg.norm(np.cross(v1-v0, v2-v0))
# cot1 = np.dot(v2-v1, v0-v1) / np.linalg.norm(np.cross(v2-v1, v0-v1))
# cot2 = np.dot(v0-v2, v1-v2) / np.linalg.norm(np.cross(v0-v2, v1-v2))
#
# # Update weight matrix accordingly
# W[c[1], c[2]] += cot0
# W[c[2], c[1]] += cot0
# W[c[0], c[2]] += cot1
# W[c[2], c[0]] += cot1
# W[c[0], c[1]] += cot2
# W[c[1], c[0]] += cot2
#
# return W
def inverse_distance(x1, x2, epsilon):
"""
This function constructs weighted edges of a graph,
where the weight is the inverse of the distance between two nodes.
Parameters
----------
x1 : Nx1 numpy array
x2 : Nx1 numpy array
epsilon : float
Returns
-------
d : float
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.kernels import inverse_distance
>>> x1 = np.array([0.1,0.2,0.4,0])
>>> x2 = np.array([0.1,0.3,0.5,0])
>>> epsilon = 0.05
>>> d = inverse_distance(x1, x2, epsilon)
>>> print('{0:0.5f}'.format(d))
5.22408
"""
import numpy as np
return 1.0/(np.linalg.norm(x1 - x2) + epsilon)
# ============================================================================
# Doctests
# ============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # py.test --doctest-modules
|
StarcoderdataPython
|
11323764
|
r"""Perform continuous measure record.
The gRPC API is built from the C API. NI-DCPower documentation is installed with the driver at:
C:\Program Files (x86)\IVI Foundation\IVI\Drivers\niDCPower\Documentation\NIDCPowerCref.chm
Getting Started:
To run this example, install "NI-DCPower Driver" on the server machine:
https://www.ni.com/en-in/support/downloads/drivers/download.ni-dcpower.html
For instructions on how to use protoc to generate gRPC client interfaces, see our "Creating a gRPC
Client" wiki page:
https://github.com/ni/grpc-device/wiki/Creating-a-gRPC-Client
Refer to the NI DCPOWER gRPC Wiki for the latest C Function Reference:
https://github.com/ni/grpc-device/wiki/NI-DCPOWER-C-Function-Reference
Running from command line:
Server machine's IP address, port number, and resource name can be passed as separate command line
arguments.
> python measure-record.py <server_address> <port_number> <resource_name>
If they are not passed in as command line arguments, then by default the server address will be
"localhost:31763", with "SimulatedDCPower" as the resource name.
"""
import math
import sys
import time
import grpc
import matplotlib.pyplot as plt
import nidcpower_pb2 as nidcpower_types
import nidcpower_pb2_grpc as grpc_nidcpower
import numpy as np
SERVER_ADDRESS = "localhost"
SERVER_PORT = "31763"
SESSION_NAME = "NI-DCPower-Session"
# Resource name, channel name and options for a simulated 4147 client. Change them according to
# NI-DCPower model.
RESOURCE = "SimulatedDCPower"
OPTIONS = "Simulate=1,DriverSetup=Model:4147;BoardType:PXIe"
CHANNELS = "0"
# Parameters
RECORD_LENGTH = 10
BUFFER_MULTIPLIER = 10
VOLTAGE_LEVEL = 5.0
# Read in cmd args
if len(sys.argv) >= 2:
SERVER_ADDRESS = sys.argv[1]
if len(sys.argv) >= 3:
SERVER_PORT = sys.argv[2]
if len(sys.argv) >= 4:
RESOURCE = sys.argv[3]
OPTIONS = ""
def check_for_error(vi, status):
"""Raise an exception if the status indicates an error."""
if status != 0:
error_message_response = client.ErrorMessage(
nidcpower_types.ErrorMessageRequest(vi=vi, error_code=status)
)
raise Exception(error_message_response.error_message)
# Create the communication channel for the remote host and create connections to the NI-DCPower and
# session services.
channel = grpc.insecure_channel(f"{SERVER_ADDRESS}:{SERVER_PORT}")
client = grpc_nidcpower.NiDCPowerStub(channel)
try:
# Initialize the session.
initialize_with_channels_response = client.InitializeWithChannels(
nidcpower_types.InitializeWithChannelsRequest(
session_name=SESSION_NAME,
resource_name=RESOURCE,
channels=CHANNELS,
reset=False,
option_string=OPTIONS,
)
)
vi = initialize_with_channels_response.vi
check_for_error(vi, initialize_with_channels_response.status)
# Specify when the measure unit should acquire measurements.
configure_measure_when = client.SetAttributeViInt32(
nidcpower_types.SetAttributeViInt32Request(
vi=vi,
attribute_id=nidcpower_types.NiDCPowerAttribute.NIDCPOWER_ATTRIBUTE_MEASURE_WHEN,
attribute_value=nidcpower_types.NiDCPowerInt32AttributeValues.NIDCPOWER_INT32_MEASURE_WHEN_VAL_AUTOMATICALLY_AFTER_SOURCE_COMPLETE,
)
)
check_for_error(vi, configure_measure_when.status)
# set the voltage level.
configure_voltage_level = client.ConfigureVoltageLevel(
nidcpower_types.ConfigureVoltageLevelRequest(vi=vi, level=VOLTAGE_LEVEL)
)
check_for_error(vi, configure_voltage_level.status)
# Sspecify how many measurements compose a measure record.
configure_measure_record_length = client.SetAttributeViInt32(
nidcpower_types.SetAttributeViInt32Request(
vi=vi,
attribute_id=nidcpower_types.NiDCPowerAttribute.NIDCPOWER_ATTRIBUTE_MEASURE_RECORD_LENGTH,
attribute_value=RECORD_LENGTH,
)
)
check_for_error(vi, configure_measure_record_length.status)
# Specify whether to take continuous measurements. Set it to False for continuous measurement.
configure_measure_record_length_is_finite = client.SetAttributeViBoolean(
nidcpower_types.SetAttributeViBooleanRequest(
vi=vi,
attribute_id=nidcpower_types.NiDCPowerAttribute.NIDCPOWER_ATTRIBUTE_MEASURE_RECORD_LENGTH_IS_FINITE,
attribute_value=False,
)
)
check_for_error(vi, configure_measure_record_length_is_finite.status)
# commit the session.
commit_response = client.Commit(
nidcpower_types.CommitRequest(
vi=vi,
)
)
check_for_error(vi, commit_response.status)
# get measure_record_delta_time.
get_measure_record_delta_time = client.GetAttributeViReal64(
nidcpower_types.GetAttributeViReal64Request(
vi=vi,
attribute_id=nidcpower_types.NiDCPowerAttribute.NIDCPOWER_ATTRIBUTE_MEASURE_RECORD_DELTA_TIME,
)
)
check_for_error(vi, get_measure_record_delta_time.status)
# initiate the session.
initiate_response = client.Initiate(
nidcpower_types.InitiateRequest(
vi=vi,
)
)
check_for_error(vi, initiate_response.status)
# Setup a plot to draw the captured waveform.
fig = plt.figure("Waveform Graph")
fig.show()
fig.canvas.draw()
# Handle closing of plot window.
closed = False
def _on_close(event):
global closed
closed = True
fig.canvas.mpl_connect("close_event", _on_close)
print("\nReading values in loop. CTRL+C or Close window to stop.\n")
# Create a buffer for fetching the values.
y_axis = [0.0] * (RECORD_LENGTH * BUFFER_MULTIPLIER)
x_start = 0
try:
while not closed:
# Clear the plot and setup the axis.
plt.clf()
plt.axis()
plt.xlabel("Samples")
plt.ylabel("Amplitude")
fetch_multiple_response = client.FetchMultiple(
nidcpower_types.FetchMultipleRequest(vi=vi, timeout=10, count=RECORD_LENGTH)
)
check_for_error(vi, fetch_multiple_response.status)
# Append the fetched values in the buffer.
y_axis.extend(fetch_multiple_response.voltage_measurements)
y_axis = y_axis[RECORD_LENGTH:]
# Updating the precision of the fetched values.
y_axis_new = []
for value in y_axis:
if value < VOLTAGE_LEVEL:
y_axis_new.append(math.floor(value * 100) / 100)
else:
y_axis_new.append(math.ceil(value * 100) / 100)
# Plotting
y_axis = y_axis_new
x_axis = np.arange(
start=x_start, stop=x_start + RECORD_LENGTH * BUFFER_MULTIPLIER, step=1
)
x_start = x_start + RECORD_LENGTH
plt.plot(x_axis, y_axis)
plt.pause(0.001)
time.sleep(0.1)
except KeyboardInterrupt:
pass
print(f"Effective measurement rate : {1 / get_measure_record_delta_time.attribute_value}")
except grpc.RpcError as rpc_error:
error_message = rpc_error.details()
if rpc_error.code() == grpc.StatusCode.UNAVAILABLE:
error_message = f"Failed to connect to server on {SERVER_ADDRESS}:{SERVER_PORT}"
elif rpc_error.code() == grpc.StatusCode.UNIMPLEMENTED:
error_message = (
"The operation is not implemented or is not supported/enabled in this service"
)
print(f"{error_message}")
finally:
if "vi" in vars() and vi.id != 0:
# close the session.
check_for_error(vi, (client.Close(nidcpower_types.CloseRequest(vi=vi))).status)
|
StarcoderdataPython
|
4803294
|
#!/usr/bin/env python
u"""
nsidc_subset_altimetry.py
Written by <NAME> (07/2018)
Program to acquire and plot subsetted NSIDC data using the Valkyrie prototype
CALLING SEQUENCE:
to use a bounding box:
python nsidc_subset_altimetry.py --bbox=-29.25,69.4,-29.15,69.50 ILVIS2
to use start and end time:
python nsidc_subset_altimetry.py -T 2009-04-28T12:23:51,2009-04-28T12:24:00 ILATM1B
to use a polygon:
python nsidc_subset_altimetry.py --longitude=-48,-47.9,-48,-48 \
--latitude=69,69,69.1,69 ILATM1B
to create a plot (note polygon is same as bounding box above)
python nsidc_subset_altimetry.py --longitude=-29.25,-29.15,-29.15,-29.25,-29.25 \
--latitude=69.4,69.4,69.50,69.50,69.4 --plot ILATM1B
INPUTS:
ILATM2: Airborne Topographic Mapper Icessn Elevation, Slope, and Roughness
ILATM1B: Airborne Topographic Mapper QFIT Elevation
ILVIS1B: Land, Vegetation and Ice Sensor Geolocated Return Energy Waveforms
ILVIS2: Geolocated Land, Vegetation and Ice Sensor Surface Elevation Product
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory=X: Data directory
-B X, --bbox=X: Bounding box (lonmin,latmin,lonmax,latmax)
--longitude=X: Polygon longitudinal coordinates (comma-separated)
--latitude=X: Polygon latitudinal coordinates (comma-separated)
-T X, --time=X: Time range (comma-separated start and end)
-M X, --mode=X: Permissions mode of data
-V, --verbose: Verbose output of transferred file
-P, --plot: Check output with a scatter plot
UPDATE HISTORY:
Updated 06/2018: using python3 compatible octal, input and urllib
Written 06/2017
"""
from __future__ import print_function
import future.standard_library
import os
import sys
import time
import h5py
import shutil
import getopt
import inspect
import posixpath
import dateutil.parser
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
with future.standard_library.hooks():
import urllib.request
#-- PURPOSE: program to acquire (and plot) subsetted NSIDC data
def nsidc_subset_altimetry(PRODUCT, filepath, BOUNDS=None, LATITUDE=None,
LONGITUDE=None, TIME=None, VERBOSE=False, PLOT=False, MODE=0o775):
#-- create output directory if non-existent
os.makedirs(filepath) if not os.access(filepath, os.F_OK) else None
#-- if using latitude and longitude points
if LATITUDE and LONGITUDE:
ll = ','.join(['{0:f},{1:f}'.format(ln,lt) for ln,lt in zip(LONGITUDE,LATITUDE)])
poly_flag = '?polygon={0}'.format(ll)
else:
poly_flag = ''
#-- if using bounding box
if BOUNDS:
#-- min_lon,min_lat,max_lon,max_lat
bbox_flag = '?bbox={0:f},{1:f},{2:f},{3:f}'.format(*BOUNDS)
else:
bbox_flag = ''
#-- if using time start and end
if TIME:
#-- verify that start and end times are in ISO format
start_time = dateutil.parser.parse(TIME[0]).isoformat()
end_time = dateutil.parser.parse(TIME[1]).isoformat()
time_flag = '?time_range={0},{1}'.format(start_time, end_time)
else:
time_flag = ''
#-- full url for subset dataset
HOST = 'http://staging.valkyrie-vm.apps.nsidc.org'
remote_file = '{0}{1}{2}{3}'.format(PRODUCT,poly_flag,bbox_flag,time_flag)
#-- local file
today = time.strftime('%Y-%m-%dT%H-%M-%S',time.localtime())
local_file = os.path.join(filepath,'{0}_{1}.H5'.format(PRODUCT,today))
#-- Printing files transferred if VERBOSE
args = (posixpath.join(HOST,remote_file),local_file)
print('{0} -->\n\t{1}\n'.format(*args)) if VERBOSE else None
#-- Create and submit request. There are a wide range of exceptions
#-- that can be thrown here, including HTTPError and URLError.
request = urllib.request.Request(posixpath.join(HOST,remote_file))
response = urllib.request.urlopen(request)
#-- copy contents to local file using chunked transfer encoding
#-- transfer should work properly with ascii and binary data formats
CHUNK = 16 * 1024
with open(local_file, 'wb') as f:
shutil.copyfileobj(response, f, CHUNK)
#-- check output with a scatter plot
if PLOT:
#-- extract X, Y and Z variables from file
keys = {}
keys['ILATM1B'] = ['Latitude','Longitude','Elevation']
keys['ILVIS2'] = ['LATITUDE_LOW','LONGITUDE_LOW','ELEVATION_LOW']
dinput = {}
with h5py.File(local_file,'r') as fileID:
#-- map X, Y and Z to values from input dataset
for k,v in zip(['Y','X','Z'],keys[PRODUCT]):
dinput[k] = fileID[v][:]
#-- create scatter plot
fig, ax1 = plt.subplots(num=1, figsize=(8,6))
sc = ax1.scatter(dinput['X'], dinput['Y'], c=dinput['Z'], s=1,
cmap=plt.cm.Spectral_r)
#-- Add colorbar for elevation and adjust size
#-- add extension triangles to upper and lower bounds
#-- pad = distance from main plot axis
#-- shrink = percent size of colorbar
#-- aspect = lengthXwidth aspect of colorbar
cbar = plt.colorbar(sc, ax=ax1, extend='both', extendfrac=0.0375,
pad=0.025, drawedges=False, shrink=0.92, aspect=22.5)
#-- rasterized colorbar to remove lines
cbar.solids.set_rasterized(True)
#-- Add labels to the colorbar
cbar.ax.set_ylabel('Height Above Reference Ellipsoid', labelpad=10,
fontsize=12)
cbar.ax.set_xlabel('m', fontsize=12)
cbar.ax.xaxis.set_label_coords(0.5, 1.045)
#-- ticks lines all the way across
cbar.ax.tick_params(which='both', width=1, direction='in',
length=13, labelsize=12)
#-- set x and y labels, adjust tick labels and adjust tick label size
ax1.set_ylabel(u'Latitude [\u00B0]', fontsize=12)
ax1.set_xlabel(u'Longitude [\u00B0]', fontsize=12)
ax1.xaxis.set_major_formatter(ticker.FormatStrFormatter('%g'))
ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter('%g'))
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(12)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(12)
#-- show the plot
plt.show()
#-- PURPOSE: help module to describe the optional input command-line parameters
def usage():
print('\nHelp: {0}'.format(os.path.basename(sys.argv[0])))
print(' -D X, --directory=X\tData directory')
print(' -B X, --bbox=X\t\tBounding box (lonmin,latmin,lonmax,latmax)')
print(' --longitude=X\t\tPolygon longitudinal coordinates (comma-separated)')
print(' --latitude=X\t\tPolygon latitudinal coordinates (comma-separated)')
print(' -T X, --time=X\t\tTime range (comma-separated start and end)')
print(' -M X, --mode=X\t\tPermissions mode of data')
print(' -V, --verbose\t\tVerbose output of transferred file')
print(' -P, --plot\t\tCheck output with a scatter plot\n')
#-- program that calls nsidcAltimGet with arguments listed
def main():
#-- Read the system arguments listed after the program
long_options = ['help','bbox=','longitude=','latitude=','time=',
'directory=','mode=','verbose','plot']
optlist,arglist = getopt.getopt(sys.argv[1:],'hB:T:D:M:VP',long_options)
#-- command line parameters
filename = inspect.getframeinfo(inspect.currentframe()).filename
filepath = os.path.dirname(os.path.abspath(filename))
BOUNDS = None
lat = None
lon = None
TIME = None
VERBOSE = False
PLOT = False
#-- permissions mode of the output files (number in octal)
MODE = 0o775
for opt, arg in optlist:
if opt in ('-h','--help'):
usage()
sys.exit()
elif opt in ('-D','--directory'):
filepath = os.path.expanduser(arg)
elif opt in ('-B','--bbox'):
BOUNDS = [float(i) for i in arg.split(',')]
elif opt in ('--longitude'):
lon = [float(i) for i in arg.split(',')]
elif opt in ('--latitude'):
lat = [float(i) for i in arg.split(',')]
elif opt in ('-T','--time'):
TIME = arg.split(',')
elif opt in ('-V','--verbose'):
VERBOSE = True
elif opt in ('-M','--mode'):
MODE = int(arg, 8)
elif opt in ('-P','--plot'):
PLOT = True
#-- IceBridge Products for the NSIDC subsetter
P = {}
P['ILATM2'] = 'Icebridge Airborne Topographic Mapper Icessn Product'
P['ILATM1B'] = 'Icebridge Airborne Topographic Mapper QFIT Elevation'
P['ILVIS1B'] = 'Icebridge LVIS Geolocated Return Energy Waveforms'
P['ILVIS2'] = 'Icebridge Land, Vegetation and Ice Sensor Elevation Product'
#-- enter dataset to transfer (ATM, LVIS, etc) as system argument
if not arglist:
for key,val in P.iteritems():
print('{0}: {1}'.format(key, val))
raise Exception('No System Arguments Listed')
#-- check that each data product entered was correctly typed
keys = ','.join(sorted([key for key in P.keys()]))
for p in arglist:
if p not in P.keys():
raise IOError('Incorrect Data Product Entered ({0})'.format(keys))
#-- run the program for each argument
for p in arglist:
nsidc_subset_altimetry(p, filepath, BOUNDS=BOUNDS, LATITUDE=lat,
LONGITUDE=lon, TIME=TIME, VERBOSE=VERBOSE, MODE=MODE, PLOT=PLOT)
#-- run main program
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9737345
|
#!/usr/bin/env python
#
# This script is being used to find the deeply hidden bug in the move generator!
#
# It attempts to generate a position where ccore's perftdiv (at depth 1) is different
# to that given by critter.
#
# usage: find_buggy_pos.py [number|filename]
# If filename is specified then FENs are read from the file, else they are generated
# using ccore's randompos function.
#
import sys, os, subprocess, re, string
if sys.platform != "win32":
import signal
progname = ""
critterPath = "/Users/andy/Documents/ChessEngines/critter16a/critter-16a"
ccorePath = "build/ccore -q findbuggypos"
def sigHandler(signum, frame):
""" Handle signals """
sys.stderr.write("{0}: Terminated with signal {1}\n".format(progname, signum))
sys.exit(2)
def randomPos(ccore):
ccore.stdin.write("randompos\n")
fen = ccore.stdout.readline().strip()
#print "got fen", fen
return fen
def ccoreNodeCount(ccore, fen):
#print "writing fen {0}".format(fen)
ccore.stdin.write("setboard {0}\n".format(fen))
ccore.stdin.write("perftdiv 1\n")
nodes = ccore.stdout.readline().strip()
#print "got nodes", nodes
return int(nodes)
def critterNodeCount(critter, fen):
#print "writing fen {0}".format(fen)
critter.stdin.write("setboard {0}\n".format(fen))
critter.stdin.write("divide 1\n")
nodes = 0
while not nodes:
line = critter.stdout.readline()
#print line
match = re.match(r'^(\d+) nodes in.*$', line)
if match:
nodes = match.group(1)
return int(nodes)
def main(argv = None):
""" Program entry point """
global progname
filename = ""
count = 1000
if sys.platform != "win32":
for sig in (signal.SIGINT, signal.SIGHUP, signal.SIGTERM):
signal.signal(sig, sigHandler)
if argv is None:
argv = sys.argv
progname = os.path.basename(argv[0])
if len(argv) == 2:
if os.path.isfile(argv[1]):
filename = argv[1]
else:
count = int(argv[1])
try:
# Keep the ccore and criter sub-processes permanently open, to save time and resources
# continually starting them.
ccore = subprocess.Popen(ccorePath, stdin = subprocess.PIPE,
stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True)
critter = subprocess.Popen(critterPath, stdin = subprocess.PIPE,
stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True)
if filename:
print "Reading positions from", filename
i = 0
fenfile = open(filename)
for fen in fenfile:
fen = fen.strip()
i += 1
expected = critterNodeCount(critter, fen)
actual = ccoreNodeCount(ccore, fen)
if expected != actual:
print i+1, "MISMATCH:", fen, "(", actual, "!=", expected, ")"
return 1
else:
print i+1, "match", fen
else:
print "Using", count, "random positions"
for i in range(0, count):
fen = randomPos(ccore)
if fen:
expected = critterNodeCount(critter, fen)
actual = ccoreNodeCount(ccore, fen)
if expected != actual:
print i+1, "MISMATCH:", fen, "(", actual, "!=", expected, ")"
return 1
else:
print i+1, "match", fen
except IOError, msg:
print "{0}: exception {1}\n".format(progname, msg)
return 2
return 0
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
3327636
|
#!/bin/python
# path to python
# where is your writable directory? Jobs will be managed in a .queue directory here.
SCRATCH="FULL_PATH_TO_YOUR_SCRATCH_SPACE"
# username
USER="YOUR_USERNAME_HERE"
# how big is one batch of jobs? ex 10 means there must be 10 free slots to run another batch.
JOB_ARRAY_MAX=20
# max total jobs to run in parallel with this tool (ideally multiple of JOB_ARRAY_MAX)
QUEUE_MAX=140
##
##
## Customize everything before this line
##
##
import os, sys, shutil
import glob
import pickle
import time
import re
from subprocess import check_output
import argparse
basedir = os.path.dirname(os.path.realpath(__file__))
def joinpath(*args):
build = args[0]
for iarg in range(1,len(args)):
build = os.path.join(build,args[iarg]);
return build;
def existingProjects():
projects = [];
confdir = joinpath(SCRATCH, ".queue");
if os.path.isdir(confdir):
for folder in os.walk(confdir).next()[1]:
projectdir = joinpath(confdir,folder);
projects.append(projectdir);
return projects
else:
return False
def quitWithMsg(msg):
print("\n");
print(msg);
sys.exit(1);
def runningJobs():
'''Returns list of job ids (just the 12345 number part)
for all running jobs of the specified user'''
list_running = check_output(["qstat -au "+USER+" | tail -n +6 | cut -d'.' -f1 | cut -d'[' -f1"], shell=True);
list_running = list_running.split('\n');
list_running = [int(e) for e in list_running if len(e) != 0];
return list_running;
def splitArrayJob(jobfile,jobtext,begin,end):
begin = int(begin);
end = int(end);
rxBegin = re.compile(r'''XBEGINX''');
rxEnd = re.compile(r'''XENDX''');
rxMultiplier = re.compile(r'''XMULTIPLIERX''');
rxSet = re.compile(r'''XSETX''');
rxOffset = re.compile(r'''XOFFSETX''');
SETS = (end - begin + 1) / JOB_ARRAY_MAX;
REMAINDER = (end - begin + 1) % JOB_ARRAY_MAX;
firstJobMade=False;
projectdir=joinpath(SCRATCH,".queue");
jobs_to_add = [];
projdirname = "array_"+jobfile;
if not os.path.exists(joinpath(SCRATCH,".queue",projdirname)):
os.makedirs(joinpath(SCRATCH,".queue",projdirname));
for set in range(SETS):
this_filename = str(set*JOB_ARRAY_MAX+begin)+"."+str(set*JOB_ARRAY_MAX+begin+JOB_ARRAY_MAX-1)+".sh";
jobs_to_add.append(this_filename);
open(joinpath(projectdir,"array_"+jobfile,this_filename),'w').write(rxOffset.sub(str(begin-1),rxSet.sub(str(JOB_ARRAY_MAX),rxMultiplier.sub(str(set),rxEnd.sub(str(JOB_ARRAY_MAX),rxBegin.sub(str(1),jobtext)))))); ## for each set, multiplier=set, begin=begin, end=begin+JOB_ARRAY_MAX-1
firstJobMade=True;
if (REMAINDER != 0):
this_filename = str(SETS*JOB_ARRAY_MAX+begin)+"."+str(end)+".sh";
jobs_to_add.append(this_filename);
if (firstJobMade == True):
open(joinpath(projectdir,"array_"+jobfile,this_filename),'w').write(rxOffset.sub(str(begin-1),rxSet.sub(str(JOB_ARRAY_MAX),rxMultiplier.sub(str(SETS),rxEnd.sub(str(REMAINDER),rxBegin.sub(str(1),jobtext)))))); ## at remainder, assume first job started: multiplier=SETS, begin=1, end=end
else:
open(joinpath(projectdir,"array_"+jobfile,this_filename),'w').write(rxOffset.sub(str(begin-1),rxSet.sub(str(JOB_ARRAY_MAX),rxMultiplier.sub(str(SETS),rxEnd.sub(str(end-begin+1),rxBegin.sub(str(1),jobtext)))))); ## at remainder, assume NOT first job started: multiplier=
pickle.dump(jobs_to_add, open(joinpath(SCRATCH,".queue",projdirname,".held"),"wb"));
submitted = [];
pickle.dump(submitted, open(joinpath(SCRATCH,".queue",projdirname,".submitted"),"wb")); #write empty array so file exists
def checkOnJobsForProjects():
"""Submits jobs for folders found in the .queue hidden folder.
Returns False if there were jobs found to submit or running, True otherwise."""
projects = existingProjects();
if (projects == False):
quitWithMsg("No projects found\n");
running_jobs = runningJobs();
available_slots = 0; #how many jobs we can submit at the end of evaluation
zeroJobsLeft = True;
for project in projects:
submitted_jobs = pickle.load(open(joinpath(project,".submitted"),"rb"));
held_jobs = pickle.load(open(joinpath(project,".held"),"rb"));
for job in submitted_jobs:
if (job not in running_jobs):
submitted_jobs.remove(job);
if (len(held_jobs) == 0 and len(submitted_jobs) == 0):
shutil.rmtree(project) #remove finished project
continue;
else:
zeroJobsLeft = False;
available = QUEUE_MAX - (len(running_jobs)*JOB_ARRAY_MAX);
if (available >= 0):
available_slots += (available/JOB_ARRAY_MAX);
while ((available_slots > 0) and (len(held_jobs) > 0)):
job = held_jobs.pop();
jobID = submitJobGetID(joinpath(project,job));
submitted_jobs.append(jobID);
available_slots -= 1;
pickle.dump(submitted_jobs, open(joinpath(project,".submitted"),"wb"));
pickle.dump(held_jobs, open(joinpath(project,".held"),"wb"));
return zeroJobsLeft;
def submitJobGetID(jobFileFullPath):
'''Submits a job file given the full path
and returns the integer job id part of 12345.job.mgr[] etc.'''
ID = check_output(["qsub "+jobFileFullPath], shell=True);
ID = int(ID.split('\n')[0].split('.')[0].split('[')[0]);
return ID;
def daemonIsRunning():
daemonfile = joinpath(SCRATCH,'.queue','.daemon');
if os.path.exists(daemonfile) and os.path.isfile(daemonfile):
return True;
else:
return False;
def markDaemonIsRunning():
daemonfile = joinpath(SCRATCH,'.queue','.daemon');
open(daemonfile, 'a').close();
def markDaemonNotRunning():
daemonfile = joinpath(SCRATCH,'.queue','.daemon');
if os.path.exists(daemonfile) and os.path.isfile(daemonfile):
os.remove(daemonfile);
def getArrayRangeAndTemplateFile(filename):
'''takes a filename of a qsub submit file.
returns the numbers used in this type of line: #PBS -t 1-100
and returns the new file contents replaced with template symbols: #PBS -t XBEGINX-XENDX
and a new line beneath that: PBS_ARRAYID=$[${PBS_ARRAYID}+XMULTIPLIERX*XSETX+XOFFSETX]'''
filetext = None;
with open(filename, 'r') as filehandle:
filetext = filehandle.read();
regex = re.compile(r'''PBS -t[\s]*(\d+)[\s]*-[\s]*(\d+)''');
match = regex.search(filetext);
filetext = regex.sub(r'''PBS -t XBEGINX-XENDX
PBS_ARRAYID=$[${PBS_ARRAYID}+XMULTIPLIERX*XSETX+XOFFSETX]''',filetext);
return (match.group(1), match.group(2), filetext);
def submitSelf():
check_output(["qsub -j oe -o /dev/null -l mem=2gb,nodes=1:ppn=1,walltime=03:55:00 "+joinpath(SCRATCH,'.queue','queue.py')], shell=True);
def help():
'''Returns args object.'''
print(" Usage: ./queue.py [qsub file or resume]")
print("")
print(" qsub file: A qsub array job.")
print(" resume: Resubmits self in job manager mode if necessary.")
print(" <no arguments>: Assumes run in automated job manager mode")
print(" and will kill itself and resubmit itself every 4 hours.")
print("")
print(" Warning: this script possibly produces many files.")
print(" take care to remove job directories no longer")
print(" needed by the end of the qsub file.")
print("")
print(" The working folder is "+joinpath(SCRATCH,".queue")+" which")
print(" contains a semaphore .daemon file, and a project directory")
print(" for each submitted qsub array file using this tool.")
print(" Each project directory contains prepared qsub files for")
print(" each smaller array segment, as well as two hidden files")
print(" .held and .submitted. .held is a list of qsub files yet")
print(" to be submitted. .submitted contains job ids for jobs")
print(" that are running.")
print("")
print(" Workflow: submit a qsub array job. queue.py is automatically")
print(" submitted as a 4 hour job which every minute checks if job")
print(" status has changed. If so then submits a new chunk of jobs")
print(" if there is room. A .daemon file is created at the beginning")
print(" of the job manager 4 hour run, and removed at the end.")
print(" This .daemon file helps prevent multiple job managers running.")
print(" However, if you kill the job manager, simpley ./queue.py resume")
print(" and the job manager will be forced into a running state after")
print(" submission. The project directories in")
print(" "+joinpath(SCRATCH,".queue")+" are each removed after")
print(" all jobs in them are completed.");
print("")
def main():
if (len(sys.argv) > 1): # Users wants to submit a new job
if (sys.argv[1] in ["-h","--h","--help","-?","--?"]):
help()
sys.exit(0);
if (sys.argv[1] == "resume"):
markDaemonIsRunning();
shutil.copy("queue.py", joinpath(SCRATCH,'.queue','queue.py'));
submitSelf();
sys.exit(0);
rangeBegin, rangeEnd, fileTemplated = getArrayRangeAndTemplateFile(sys.argv[1]);
splitArrayJob(sys.argv[1], fileTemplated, rangeBegin, rangeEnd);
shutil.copy("queue.py", joinpath(SCRATCH,'.queue','queue.py'));
checkOnJobsForProjects();
if not daemonIsRunning():
markDaemonIsRunning();
submitSelf();
else: # Not user-mode, but automated startup to maintain jobs
if not daemonIsRunning():
sys.stderr.write("Something is wrong, because we're running a new instance but the daemon flag is gone. Shutting down.\n");
sys.exit(0);
justUnder4Hours = 3600*4 - 60*10; #10 minutes under
timeStart = time.time();
while ((time.time() - timeStart) < justUnder4Hours):
done = checkOnJobsForProjects();
if (done == True):
markDaemonNotRunning();
sys.exit(0);
else:
time.sleep(60); # wait one minute
submitSelf();
sys.exit(0);
if __name__ == "__main__":
main();
|
StarcoderdataPython
|
191070
|
<reponame>schallerdavid/perses
from perses.bias.bias_engine import *
|
StarcoderdataPython
|
11212771
|
import logging
from scout.models.hgnc_map import HgncGene
LOG = logging.getLogger(__name__)
def build_phenotype(phenotype_info):
phenotype_obj = {}
phenotype_obj["mim_number"] = phenotype_info["mim_number"]
phenotype_obj["description"] = phenotype_info["description"]
phenotype_obj["inheritance_models"] = list(phenotype_info.get("inheritance", set()))
phenotype_obj["status"] = phenotype_info["status"]
return phenotype_obj
def build_hgnc_gene(gene_info, build="37"):
"""Build a hgnc_gene object
Args:
gene_info(dict): Gene information
Returns:
gene_obj(dict)
{
'_id': ObjectId(),
# This is the hgnc id, required:
'hgnc_id': int,
# The primary symbol, required
'hgnc_symbol': str,
'ensembl_id': str, # required
'build': str, # '37' or '38', defaults to '37', required
'chromosome': str, # required
'start': int, # required
'end': int, # required
'description': str, # Gene description
'aliases': list(), # Gene symbol aliases, includes hgnc_symbol, str
'entrez_id': int,
'omim_id': int,
'pli_score': float,
'primary_transcripts': list(), # List of refseq transcripts (str)
'ucsc_id': str,
'uniprot_ids': list(), # List of str
'vega_id': str,
'transcripts': list(), # List of hgnc_transcript
# Inheritance information
'inheritance_models': list(), # List of model names
'incomplete_penetrance': bool, # Acquired from HPO
# Phenotype information
'phenotypes': list(), # List of dictionaries with phenotype information
}
"""
try:
hgnc_id = int(gene_info["hgnc_id"])
except KeyError as err:
raise KeyError("Gene has to have a hgnc_id")
except ValueError as err:
raise ValueError("hgnc_id has to be integer")
try:
hgnc_symbol = gene_info["hgnc_symbol"]
except KeyError as err:
raise KeyError("Gene has to have a hgnc_symbol")
try:
ensembl_id = gene_info["ensembl_gene_id"]
except KeyError as err:
raise KeyError("Gene has to have a ensembl_id")
try:
chromosome = gene_info["chromosome"]
except KeyError as err:
raise KeyError("Gene has to have a chromosome")
try:
start = int(gene_info["start"])
except KeyError as err:
raise KeyError("Gene has to have a start position")
except TypeError as err:
raise TypeError("Gene start has to be a integer")
try:
end = int(gene_info["end"])
except KeyError as err:
raise KeyError("Gene has to have a end position")
except TypeError as err:
raise TypeError("Gene end has to be a integer")
gene_obj = HgncGene(
hgnc_id=hgnc_id,
hgnc_symbol=hgnc_symbol,
ensembl_id=ensembl_id,
chrom=chromosome,
start=start,
end=end,
build=build,
)
if gene_info.get("description"):
gene_obj["description"] = gene_info["description"]
# LOG.debug("Adding info %s", gene_info['description'])
if gene_info.get("previous_symbols"):
gene_obj["aliases"] = gene_info["previous_symbols"]
if gene_info.get("entrez_id"):
gene_obj["entrez_id"] = int(gene_info["entrez_id"])
if gene_info.get("omim_id"):
gene_obj["omim_id"] = int(gene_info["omim_id"])
if gene_info.get("pli_score"):
gene_obj["pli_score"] = float(gene_info["pli_score"])
if gene_info.get("ref_seq"):
gene_obj["primary_transcripts"] = gene_info["ref_seq"]
if gene_info.get("ucsc_id"):
gene_obj["ucsc_id"] = gene_info["ucsc_id"]
if gene_info.get("uniprot_ids"):
gene_obj["uniprot_ids"] = gene_info["uniprot_ids"]
if gene_info.get("vega_id"):
gene_obj["vega_id"] = gene_info["vega_id"]
if gene_info.get("incomplete_penetrance"):
gene_obj["incomplete_penetrance"] = True
if gene_info.get("inheritance_models"):
gene_obj["inheritance_models"] = gene_info["inheritance_models"]
phenotype_objs = []
for phenotype_info in gene_info.get("phenotypes", []):
phenotype_objs.append(build_phenotype(phenotype_info))
if phenotype_objs:
gene_obj["phenotypes"] = phenotype_objs
for key in list(gene_obj):
if gene_obj[key] is None:
gene_obj.pop(key)
return gene_obj
|
StarcoderdataPython
|
1855421
|
<reponame>danielfranca/find_duplicate_content
from pipeline_lib.pipeline import build_structure
from pipeline_lib.pipeline import get_duplicated_content
from pipeline_lib.pipeline import generate_content_hash
from pipeline_lib.pipeline import run_next_action
from pipeline_lib.utils import save_state
import unittest
from unittest import mock
import hashlib
import json
class DuplicateFinderTestCase(unittest.TestCase):
def test_build_structure_missing_parameter(self):
initial_state = {
"last_error": None
}
expected_state = {
"last_error": "Missing required parameter: root_path"
}
self.assertEqual(build_structure(initial_state, []), expected_state)
def test_build_structure(self):
initial_state = {
"next_action": 0,
"last_error": None,
"root_path": ".",
"save_file_path": None
}
expected_state = {
"next_action": 0,
"hash_structure": {
"92af19e633fe03482882098e4ba7619afe09c547": ["filename2"],
"f3390fe2e5546dac3d1968970df1a222a3a39c00": ["filename1", "filename1equal"],
"fee3dd4822f64d1a12c9b6b945fc1c07ecf4981c": ["singlefile"]
},
"root_path": ".",
"save_file_path": None,
"last_error": None
}
class MockFileReader():
once = True
def __init__(self, filename):
self.filename = filename
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def read(self, size):
if self.once:
self.once = False
if self.filename == 'filename1' or self.filename == 'filename1equal':
return b"FOOBAR"
elif self.filename == 'filename2':
return b"FILENAME2"
elif self.filename == 'singlefile':
return b"SINGLEFILE"
return None
def mock_open(filename, mode='r'):
return MockFileReader(filename)
self.maxDiff = None
with mock.patch('pipeline_lib.pipeline.open', mock_open), mock.patch('os.path.isfile', return_value=True), \
mock.patch('glob.iglob', return_value=['filename1', 'filename2', 'singlefile', 'filename1equal']):
self.assertEqual(build_structure(initial_state, []), expected_state)
def test_get_duplicated_content(self):
hash_structure = {
"da39a3ee5e6b4b0d3255bfef95601890afd80709": ["file1", "file2"],
"970093678b182127f60bb51b8af2c94d539eca3a": ["file3"],
"7c4a8d09ca3762af61e59520943dc26494f8941b": ["file4", "file5", "file6"],
"9a79be611e0267e1d943da0737c6c51be67865a0": ["file7"]
}
initial_state = {
"next_action": 0,
"duplicated_content": [],
"last_error": None,
"save_file_path": "",
"hash_structure": hash_structure
}
expected_state = {
"next_action": 0,
"hash_structure": hash_structure,
"duplicated_content": [["file1", "file2"], ["file4", "file5", "file6"]],
"last_error": None,
"save_file_path": ""
}
self.maxDiff = None
self.assertEqual(get_duplicated_content(initial_state, []), expected_state)
def test_get_duplicated_content_empty_hash(self):
# Empty hash structure
initial_state = {
"next_action": 0,
"duplicated_content": [],
"last_error": None,
"save_file_path": "",
"hash_structure": {}
}
expected_state = {
"next_action": 0,
"hash_structure": {},
"duplicated_content": [],
"last_error": None,
"save_file_path": ""
}
self.assertEqual(get_duplicated_content(initial_state, []), expected_state)
def test_generate_hash_small_file(self):
class MockFileReader():
content = b"ILM"
once = True
def read(self, size):
if self.once:
self.once = False
return self.content
else:
return None
f = MockFileReader()
hash = generate_content_hash(f)
self.assertEqual(len(hash), 40)
sha1 = hashlib.sha1()
sha1.update(b"ILM")
self.assertEqual(hash, sha1.hexdigest())
def test_generate_hash_huge_file(self):
FILE_SIZE = 524288
class MockFileReader():
idx = 0
def read(self, size):
if self.idx >= FILE_SIZE:
return None
else:
self.idx += 1
return b"A"
f = MockFileReader()
hash = generate_content_hash(f)
self.assertEqual(len(hash), 40)
sha1 = hashlib.sha1()
sha1.update(b"A" * FILE_SIZE)
self.assertEqual(hash, sha1.hexdigest())
def test_run_no_action(self):
initial_state = {
"next_action": 0
}
expected_state = {
"next_action": 0
}
self.assertEqual(run_next_action(initial_state, []), expected_state)
def test_run_next_action(self):
initial_state = {
"next_action": 0
}
expected_state = {
"next_action": 1
}
called = False
def action(state, actions):
nonlocal called
called = True
return state
actions = [action]
self.assertEqual(run_next_action(initial_state, actions), expected_state)
self.assertTrue(called)
def test_run_no_more_actions(self):
initial_state = {
"next_action": 1
}
expected_state = {
"next_action": 0
}
actions = []
self.assertEqual(run_next_action(initial_state, actions), expected_state)
def test_save_and_restore_new_state(self):
hash_structure = {
"da39a3ee5e6b4b0d3255bfef95601890afd80709": ["file1", "file2"],
"970093678b182127f60bb51b8af2c94d539eca3a": ["file3"],
"7c4a8d09ca3762af61e59520943dc26494f8941b": ["file4", "file5", "file6"],
"9a79be611e0267e1d943da0737c6c51be67865a0": ["file7"]
}
state = {
"next_action": 0,
"root_path": "~/dir",
"save_file_path": "da39a3ee5e6b4b0d3255bfef95601890afd80709/970093678b182127f60bb51b8af2c94d539eca3a",
"hash_structure": hash_structure,
"duplicated_content": [["file1", "file2"], ["file4", "file5", "file6"]],
"last_error": None,
"param1": "val1"
}
saved_data = None
self.maxDiff = None
def mock_json_dump(obj, fp, indent=4):
nonlocal saved_data
saved_data = json.dumps(obj, indent=indent, sort_keys=True)
with mock.patch('json.dump', new=mock_json_dump):
with mock.patch('pipeline_lib.utils.restore_state', return_value={}):
self.assertFalse(save_state(state))
state["save_file_path"] = "./saved_state.json"
self.assertTrue(save_state(state))
self.assertEqual(saved_data, json.dumps({
state["root_path"]: {
"state": {
"next_action": state["next_action"],
"hash_structure": state["hash_structure"],
"duplicated_content": state["duplicated_content"],
"save_file_path": state["save_file_path"],
"last_error": None,
"root_path": "~/dir"
}
}
}, indent=4, sort_keys=True))
def test_save_and_restore_extra_state(self):
hash_structure = {
"da39a3ee5e6b4b0d3255bfef95601890afd80709": ["file1", "file2"],
"970093678b182127f60bb51b8af2c94d539eca3a": ["file3"],
"7c4a8d09ca3762af61e59520943dc26494f8941b": ["file4", "file5", "file6"],
"9a79be611e0267e1d943da0737c6c51be67865a0": ["file7"]
}
state = {
"next_action": 0,
"root_path": "~/dir",
"save_file_path": "",
"hash_structure": hash_structure,
"duplicated_content": [["file1", "file2"], ["file4", "file5", "file6"]],
"last_error": None,
"param1": "val1"
}
saved_data = None
self.maxDiff = None
def mock_json_dump(obj, fp, indent=4):
nonlocal saved_data
saved_data = json.dumps(obj, indent=indent, sort_keys=True)
restored_state = {
"/old_dir": {
"state": {
"next_action": 1,
"root_path": "/old_dir",
"save_file_path": "./",
"hash_structure": hash_structure,
"last_error": "Something is broken"
}
}
}
with mock.patch('json.dump', new=mock_json_dump):
with mock.patch('pipeline_lib.utils.restore_state', return_value=restored_state):
self.assertFalse(save_state(state))
state["save_file_path"] = "./saved_state.json"
self.assertTrue(save_state(state))
restored_state.update({
state["root_path"]: {
"state": {
"next_action": state["next_action"],
"hash_structure": state["hash_structure"],
"duplicated_content": state["duplicated_content"],
"last_error": None,
"save_file_path": state["save_file_path"],
"root_path": "~/dir"
}
}
})
self.assertEqual(saved_data, json.dumps(restored_state, indent=4, sort_keys=True))
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
27620
|
<filename>Neural Network/NNToyFx/python/activations.py<gh_stars>0
from simulation import *
def relu(ctx: SimulationContext, x: Connection):
relu1 = ctx.max(x, ctx.variable(0))
return relu1
|
StarcoderdataPython
|
4833554
|
# coding=utf-8
# python imports
from __future__ import unicode_literals, print_function
from ardy.core.triggers.driver import Trigger
from ardy.utils.log import logger
class Driver(Trigger):
_DEPLOY_KEYS_WHITELIST = ["Id", "LambdaFunctionArn", "Events", "Filter"]
_LAMBDA_ARN_KEY = "LambdaFunctionArn"
get_awsservice_method = "get_s3_resource"
trigget_type = "s3"
def put(self, *args, **kwargs):
triggers_conf = self.get_triggers()
for trigger_conf in triggers_conf:
logger.info("START to deploy S3 triggers for bucket {}".format(trigger_conf['bucket_name']))
bucket_notification = self.client.BucketNotification(trigger_conf['bucket_name'])
StatementId = "{}-{}".format(self.lambda_conf["FunctionName"], trigger_conf['bucket_name'])
if not self.lambda_exist_policy(self.lambda_conf["FunctionName"], StatementId):
self.awslambda.add_permission(
Action='lambda:InvokeFunction',
FunctionName=self.lambda_function_arn,
Principal='s3.amazonaws.com',
StatementId=StatementId
)
conf = self.get_deploy_conf(trigger_conf)
conf.update({self._LAMBDA_ARN_KEY: self.lambda_function_arn})
bucket_notification.put(
NotificationConfiguration={'LambdaFunctionConfigurations': [conf, ]}
)
|
StarcoderdataPython
|
5044702
|
import json
from typing import Optional
import pulumi_aws as aws
import pulumi_random as random
from infra.config import LOCAL_GRAPL
import pulumi
class JWTSecret(pulumi.ComponentResource):
""" Represents the frontend's JWT secret stored in Secretsmanager. """
def __init__(self, opts: Optional[pulumi.ResourceOptions] = None) -> None:
super().__init__("grapl:JWTSecret", "jwt-secret", None, opts)
self.secret = aws.secretsmanager.Secret(
"edge-jwt-secret",
# TODO: Ultimately we don't want to care about this... it's
# just what the local services expect at the moment. As we
# move more things over to Pulumi, we'll be able to inject
# this automatically into, e.g., Lambda function environments.
name="JWT_SECRET_ID" if LOCAL_GRAPL else None,
description="The JWT secret that Grapl uses to authenticate its API",
opts=pulumi.ResourceOptions(parent=self),
)
self.random_uuid = random.RandomUuid(
"jwt-secret-uuid",
opts=pulumi.ResourceOptions(
parent=self, additional_secret_outputs=["result"]
),
)
# TODO: What do we do about rotation?
self.version = aws.secretsmanager.SecretVersion(
"secret",
secret_id=self.secret.id,
secret_string=self.random_uuid.result,
opts=pulumi.ResourceOptions(parent=self),
)
self.register_outputs({})
def grant_read_permissions_to(self, role: aws.iam.Role) -> None:
"""
Grants permission to the given `Role` to read this secret.
The name of the resource is formed from the Pulumi name of the `Role`.
"""
aws.iam.RolePolicy(
f"{role._name}-reads-secret",
role=role.name,
policy=self.secret.arn.apply(
lambda secret_arn: json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"secretsmanager:GetSecretValue",
"secretsmanager:DescribeSecret",
],
"Resource": secret_arn,
},
],
}
)
),
opts=pulumi.ResourceOptions(parent=self),
)
|
StarcoderdataPython
|
9737859
|
import re
from markdown.inlinepatterns import Pattern
from markdown.preprocessors import Preprocessor
from markdown.extensions import Extension
from markdown.util import etree, AtomicString
class HintedWikiLinkExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add("hintedwikilink", HintedWikiLinkPattern(), '>html')
class HintedWikiLinkPattern(Pattern):
Pattern = r'(?<!\\){(?P<main>[^{}|=]+)(\|(?P<hints>([^{}|=])*))?}'
def __init__(self, pattern=Pattern):
super().__init__(pattern)
def getCompiledRegExp(self):
return super().getCompiledRegExp()
def handleMatch(self, m):
ret = etree.Element('a')
ret.text = m['main']
ret.set('href', AtomicString(f'/bestmatch/{m["main"]}/{m["hints"] or ""}'))
return ret
class SizeEnabledImageExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add("sizeenbaleimage", SizeEnabledImagePattern(), '<image_link')
class SizeEnabledImagePattern(Pattern):
Pattern = \
r'!\[(?P<alt>[^\]]*)\]\((?P<src>\S+)(\s\"(?P<title>[^\"]*)\")?(\s=(?P<width>[0-9]+)x(?P<height>[0-9]+)?)?\)'
def __init__(self, pattern=Pattern):
super().__init__(pattern)
def getCompiledRegExp(self):
return super().getCompiledRegExp()
def handleMatch(self, m):
ret = etree.Element('img')
ret.set('src', AtomicString(m['src']))
ret.set('alt', AtomicString(m['alt']))
if m['title']:
ret.set('title', AtomicString(m['title']))
if m['width']:
ret.set('width', m['width'])
if m['height']:
ret.set('height', m['height'])
return ret
class AutoTitleExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add("autotitle", AutoTitleProcessor(md), ">meta")
class AutoTitleProcessor(Preprocessor):
def run(self, lines):
pre_title_len = 0
for l in lines:
if l.startswith('# '):
title = l[len('# '):]
break
else:
pre_title_len += 1
else:
return lines
'''
if not pre_title_len:
return ['titles: ' + title, ''] + lines
return ['titles: ' + title] + lines[:pre_title_len] + lines[pre_title_len:]
'''
self.markdown.Meta.setdefault('titles', []).append(title)
return lines
|
StarcoderdataPython
|
1990299
|
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
X = 2*np.random.rand(100, 1)
y = 4+3*X+np.random.randn(100, 1)
X_b = np.c_[np.ones((100, 1)), X]
X_new = np.array(([0], [2]))
X_new_b = np.c_[np.ones((2, 1)), X_new]
t0, t1 = 5, 50 # learning schedule hyperparameter
def learning_schedule(t):
return t0 / (t + t1)
theta_path_sgd = []
def plot_sgd(theta, theta_path=None):
m = len(X_b)
n_epochs = 50
plt.plot(X, y, "b.")
plt.axis([0, 2, 0, 15])
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
for epoch in range(n_epochs):
for i in range(m):
if epoch == 0 and i < 20:
y_predict = X_new_b.dot(theta)
style = "b-" if i > 0 else "r--"
plt.plot(X_new, y_predict, style)
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta_path_sgd.append(theta)
np.random.seed(42)
theta = np.random.randn(2, 1) # random initialization
plot_sgd(theta, theta_path=theta_path_sgd)
plt.show()
|
StarcoderdataPython
|
6490083
|
#
# Create different sounds with a buzzer
#
# Credit for star wars song: https://gist.github.com/mandyRae/459ae289cdfcf6d98a6b
import time
import numpy as np
import RPi.GPIO as GPIO
sound_pin = 12
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(sound_pin, GPIO.OUT)
GPIO.output(sound_pin, 0)
tone1 = GPIO.PWM(sound_pin, 100)
tone1.start(50)
c = [32, 65, 131, 262, 523]
db= [34, 69, 139, 277, 554]
d = [36, 73, 147, 294, 587]
eb= [37, 78, 156, 311, 622]
e = [41, 82, 165, 330, 659]
f = [43, 87, 175, 349, 698]
gb= [46, 92, 185, 370, 740]
g = [49, 98, 196, 392, 784]
ab= [52, 104, 208, 415, 831]
a = [55, 110, 220, 440, 880]
bb= [58, 117, 223, 466, 932]
b = [61, 123, 246, 492, 984]
cmajor = [c, d, e, f, g, a, b]
aminor = [a, b, c, d, e, f, g]
o=2
starwars_notes = [c[1+o], g[1+o], f[1+o], e[1+o], d[1+o], c[2+o], g[1+o], f[1+o], e[1+o], d[1+o], c[2+o], g[1+o],
f[1+o], e[1+o], f[1+o], d[1+o]]
starwars_beats = [4,4,1,1,1,4,4,1,1,1,4,4,1,1,1,4]
def playScale(scale, pause):
'''
scale: scale name to be played
pause: pause between each notes played
This function plays the given scale in every available octave
I used this to test what was audible on the buzzer
'''
for i in range(0, 5):
for note in scale:
tone1.ChangeFrequency(note[i])
time.sleep(pause)
tone1.stop()
def playSong(songnotes, songbeats, tempo):
'''
songnotes: list of the melodies notes
songbeats: list of melodies beat times
tempo: speed of song, this is not traditional tempo in bpm like on a metronome,
but more like a multiplier for whatever the notes are so a tempo value of 2
make it play twice as fast. Adjust this by ear.
This function plays the melody, simply by iterating through the list.
'''
tone1.ChangeDutyCycle(50)
for i in range(0, len(songnotes)):
tone1.ChangeFrequency(songnotes[i])
time.sleep(songbeats[i]*tempo)
tone1.ChangeDutyCycle(0)
def play_tune(duration, freq):
tone1.ChangeDutyCycle(50)
tone1.ChangeFrequency(freq)
time.sleep(float(duration) / 1000)
tone1.ChangeDutyCycle(0)
def play_change(duration, low_freq, high_freq, start_pause=0, stop_pause=0, inverse=False):
""" duration in ms
"""
tone1.ChangeDutyCycle(50)
num_steps = 20
step_freq = (high_freq - low_freq) / num_steps
step_duration = duration / (num_steps * 1000)
start_pause = step_duration if start_pause == 0.0 else start_pause
stop_pause = step_duration if stop_pause == 0.0 else stop_pause
for i in range(num_steps):
f = low_freq + i * step_freq if not inverse else high_freq - i * step_freq
tone1.ChangeFrequency(f)
if(i == 0):
time.sleep(start_pause)
elif(i == num_steps-1):
time.sleep(stop_pause)
else:
time.sleep(step_duration)
tone1.ChangeDutyCycle(0)
def main():
#for i in range(1, 10000, 100):
# buzzPwm(20, i)
pause = 0.05
# play_change(300, 3000, 14000, inverse=False)
# play_change(300, 3000, 14000, inverse=False)
# play_change(300, 3000, 14000, inverse=False)
# time.sleep(1)
# play_change(300, 3000, 14000, inverse=True)
# play_change(300, 3000, 14000, inverse=True)
# play_change(300, 3000, 14000, inverse=True)
# time.sleep(1)
# play_change(300, 3000, 14000, inverse=True)
# play_change(300, 3000, 14000, inverse=False)
# play_change(300, 3000, 14000, inverse=True)
# play_change(300, 3000, 14000, inverse=False)
# time.sleep(1)
#play_change(100, 4000, 15000, inverse=True, stop_pause=0.0)
#play_change(100, 4000, 15000, stop_pause=0.0)
#time.sleep(0.1)
play_tune(100, 14000)
time.sleep(pause)
play_tune(100, 14000)
time.sleep(pause)
play_tune(100, 8000)
time.sleep(pause)
#play_tune(100, 14000)
play_change(300, 3000, 14000, inverse=True)
time.sleep(2)
play_tune(100, 8000)
time.sleep(pause)
play_tune(100, 8000)
time.sleep(pause)
play_tune(100, 14000)
time.sleep(pause)
#play_tune(100, 14000)
play_change(300, 3000, 14000, inverse=False)
time.sleep(2)
# time.sleep(pause)
# buzzPwm(500, 400)
# time.sleep(pause)
# buzzPwm(500, 800)
# time.sleep(pause)
# buzzPwm(500, 400)
#playSong(starwars_notes, starwars_beats, 0.2)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5061696
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License, Version 2.0 for more details.
import os
import io
import logging
import re
import sys
import traceback
from pyang.context import Context
from pyang.repository import FileRepository
import pyang
ERR_TYPE_GRAMMA = "Gramma"
ERR_TYPE_LOST = "Lost"
ERR_INFO = {
# yang file it self has grammatical errors, use pyang check get detailed error information
ERR_TYPE_GRAMMA: "***parse***: [yang file {0} has grammatical errors] Error 1",
# yang module declared in white list, but correspond yang file not exists
ERR_TYPE_LOST: "***check***: [yang file {0} doesn't exists] Erro 1",
}
ERR_REPORT_STAT = {
ERR_TYPE_GRAMMA: [],
ERR_TYPE_LOST: []
}
start_error = r"#########yang_Error_start##########"
end_error = r"#######yang_Error_end##########"
def init_ctx(path='./yang'):
repos = FileRepository(path)
ctx = Context(repos)
return ctx
def parse_yang_module(yang_file_path, ctx):
r = re.compile(r"^(.*?)(\@(\d{4}-\d{2}-\d{2}))?\.(yang|yin)$")
r1 = re.compile(r"^(.*?)\.(yang|yin)$")
try:
if sys.version_info[0] == 2:
fd = io.open(yang_file_path, "r", encoding="UTF-8")
else:
fd = open(yang_file_path, "r", encoding="UTF-8")
text = fd.read()
m1 = r1.search(yang_file_path)
if m1 is None:
return ctx
m = r.search(yang_file_path)
ctx.yin_module_map = {}
if m is not None:
(name, _dummy, rev, format) = m.groups()
name = os.path.basename(name)
ctx.add_module(yang_file_path, text, format, name, rev)
else:
ctx.add_module(yang_file_path, text)
except Exception:
yang_error_write("can not open the file %s" % yang_file_path)
finally:
if fd is not None:
fd.close()
logging.info('parse yang module %s success!', yang_file_path)
return ctx
def parse_yang_modules(yang_directory, ctx):
if os.path.isfile(yang_directory):
parse_yang_module(yang_directory, ctx)
else:
for yang_file in sorted(os.listdir(yang_directory)):
yang_file_path = os.path.join(yang_directory, yang_file)
if os.path.isdir(yang_file_path):
parse_yang_modules(yang_file_path, ctx)
parse_yang_module(yang_file_path, ctx)
ctx.validate()
return ctx
def get_moudle_name_from_annotations(annotations):
for annotation_obj in annotations:
pos = annotation_obj.str.find('module')
if pos != -1:
module_name = annotation_obj.str[pos + len('module'):].strip(' ').strip('\n')
return module_name
def yang_module_has_error(ctx,module_name=None):
for p,t,a in ctx.errors:
if module_name is None:
if is_error(t):
return True
else:
if (is_error(t) and check_error_if_need(p, module_name)):
return True
return False
def check_error_if_need(pos, module_name):
yang_error_name = ''
try:
yang_error_name = pos.top.arg
except:
pass
if yang_error_name == module_name:
return True
return False
def is_error(error_type):
error_level = pyang.error.err_level(error_type)
if pyang.error.is_error(error_level):
return True
else:
return False
def print_yang_errors(ctx, module_name=None):
for p, t, a in ctx.errors:
error_str = None
error_level = pyang.error.err_level(t)
if pyang.error.is_error(error_level):
error_level_str = "Error"
else:
error_level_str = "Warning"
if module_name is not None:
if is_error(t) and check_error_if_need(p, module_name):
error_str = ''.join([p.label(), " ", error_level_str,': ',pyang.error.err_to_str(t, a)])
else:
error_str = ''.join([p.label(), " ", error_level_str,': ',pyang.error.err_to_str(t, a)])
if error_str is not None:
print(error_str)
yang_error_write(error_str)
return
def yang_error_write(error_string):
exc_info = sys.exc_info()
exc_value = exc_info[1]
if exc_value and traceback.format_exc().strip() != 'None':
logging.error(traceback.format_exc())
logging.error("%s",error_string)
def log_serious_errors(err_type, **kwargs):
if err_type not in ERR_INFO:
logging.info("receive unsupported error type")
return
if kwargs["file_name"] in ERR_REPORT_STAT[err_type]:
return
ERR_REPORT_STAT[err_type].append(kwargs["file_name"])
logging.error(ERR_INFO[err_type].format(kwargs["file_name"]))
if __name__ == '__main__':
pass
|
StarcoderdataPython
|
1718077
|
<reponame>SkygearIO/py-skygear<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# -- General configuration -----------------------------------------------------
import sphinx_rtd_theme
import os
import sys
from pkg_resources import find_distributions
from email import message_from_string
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")
sys.path.append(directory)
from skygear import __version__ as version, __title__ as name
sys.path.insert(0, '../')
source_suffix = '.rst'
master_doc = 'index'
html_use_index = True
project = name
copyright = u'Copyright 2017 Oursky Ltd'
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
exclude_patterns =['_build', 'test']
autodoc_member_order = 'bysource'
autoclass_content = 'both'
napoleon_google_docstring = True
autodoc_inherit_docstrings = False
# -- Options for HTML output ---------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- HTML theme options for `dotted` style -------------------------------------
html_theme_options = {
}
|
StarcoderdataPython
|
8042855
|
from setuptools import setup, find_packages
setup(
name='django-jalali',
version='4.0.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
description=("Jalali Date support for Django model and admin interface"),
url='http://github.com/slashmili/django-jalali',
download_url='http://github.com/slashmili/django-jalali/tarball/master',
author='<NAME>',
author_email='<EMAIL>',
keywords="django jalali",
license='Python Software Foundation License',
platforms='any',
install_requires=["jdatetime>=2.0", "django>=2.0"],
long_description=open('README.rst', encoding="utf-8").read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
],
)
|
StarcoderdataPython
|
11229671
|
import ast
import json
import os
import random
from django.core import serializers
from django.core.cache import cache
from blog.models import Post
from commons.emoji import EMOJI
from constants import KEYS, CAPS
from survey import survey
def add_emoji(**args):
return [{
"thinking_emoji": ''.join(EMOJI["thinking"])
}]
def no_op(**args):
return {}
# def do_small_talk(**args):
# user_profile = get_profile(args[KEYS.FB_ID], args[KEYS.PAGE_ACCESS_TOKEN])
# print("user_profile: ", user_profile)
# restaurant_info = requests.post(os.environ.get('API_URL') + '/restaurant/get_details', json={
# "page_id": args['page_id']
# })
# return [{
# "first_name": user_profile.username,
# "happy_emoji": random.choice(EMOJI["happy"]),
# }]
def get_button_payload(item):
items_as_dict = json.loads(serializers.serialize('json', [item]))
item_as_dict = items_as_dict[0].get('fields')
item_as_dict.update({'item_id': item.id })
return "CART_UPDATE_ITEM/{}".format(item_as_dict)
def attach_emoji(**args):
return [{
"emoji": EMOJI["thinking"][0]
}]
def add_value_count(**args):
cache_key = '{}_{}'.format(args.get(KEYS.FB_ID), CAPS.CART_UPDATE_ITEM)
items = [ast.literal_eval(item) for item in ast.literal_eval(cache.get(cache_key))]
return [{
"item": items[-1].get('item_name'),
"item_count": str(len(items)),
"item_price": '{0:.2f}'.format(sum([float(item.get('item_price')) for item in items])),
"reaction": ''.join([random.choice(EMOJI["happy"]), random.choice(EMOJI["success"]), random.choice(EMOJI["heart"])])
}]
# def interact_with_username(**args):
# print("IAMHERENOW")
#
# user_profile = get_profile(args['fb_id'], args['page_access_token'])
# print("user_profile: ", user_profile)
#
# return [{
# "first_name": user_profile.username,
# "reaction": ''.join([random.choice(EMOJI["happy"]), random.choice(EMOJI["heart"])])
# }]
def add_sadness(**args):
return [{
'reactions': ''.join(EMOJI["sad"]),
'order_id': args['order_id']
}]
def add_joy(**args):
return [{
'reactions': '{}{}'.format(''.join(EMOJI["success"]), ''.join(EMOJI['heart'])),
'order_id': args['order_id']
}]
def add_happy(**args):
return [{
'reactions': '{}{}'.format(''.join(EMOJI["success"]), ''.join(EMOJI['happy'])),
'order_id': args['order_id']
}]
def attach_survey_text(**args):
survey_instance = args[KEYS.SURVEY_INSTANCE]
if survey_instance is not None:
return [{
"habit": survey_instance.get("post")
}]
else:
return [{}]
def create_collection(data, post_id):
collection = []
for doc in data:
for key, val in doc.items():
collection.append({
"title": key,
"payload": str(post_id) + "_" + str(val),
"content_type": "text"
})
return collection
def attach_survey_qr(**args):
survey_instance = args[KEYS.SURVEY_INSTANCE]
if survey_instance is not None:
next_post_id = survey_instance.get("post_id")
data = [{"Yes": "1"}, {"No": "0"}]
collection = create_collection(data, next_post_id)
return collection
else:
return {}
def attach_text(**args):
pass
node_function_map = {
'POST_SURVEY': {
"txt": attach_text,
"qr": no_op,
"at": no_op
},
'SURVEY': {
"txt": attach_survey_text,
"qr": attach_survey_qr,
"at": no_op
},
'GETTING_STARTED': {
"txt": no_op,
"qr": no_op,
"at": no_op
},
"NO_OP": {
"txt": no_op,
"qr": no_op,
"at": no_op
}
}
def function_data_map(node_name):
"""
Returns a chat-node if found in chat-graph layout.
:param node_name: <str>
:return: <ChatNode>
"""
pass
if node_name not in node_function_map:
return {
"txt": no_op,
"qr": no_op,
"at": no_op
}
return node_function_map[node_name]
|
StarcoderdataPython
|
8177181
|
import os
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
extensions = cythonize([
Extension(
"skroute.heuristics.brute._base_brute_force._base_brute_force",
sources=["skroute/heuristics/brute/_base_brute_force/_base_brute_force.pyx"]),
Extension(
"skroute.metaheuristics.genetics._base_genetics._utils_genetic",
sources=["skroute/metaheuristics/genetics/_base_genetics/_utils_genetic.pyx"]),
Extension(
"skroute._utils._utils",
sources=["skroute/_utils/_utils.pyx"]),
Extension(
"skroute.metaheuristics.genetics._base_genetics._base_genetic",
sources=["skroute/metaheuristics/genetics/_base_genetics/_base_genetic.pyx"]),
Extension(
"skroute.metaheuristics.simulated_annealing._base_simulated_annealing._base_simulated_annealing",
sources=["skroute/metaheuristics/simulated_annealing/_base_simulated_annealing/_base_simulated_annealing.pyx"]),
Extension(
"skroute.metaheuristics.simulated_annealing._base_simulated_annealing._utils_sa",
sources=["skroute/metaheuristics/simulated_annealing/_base_simulated_annealing/_utils_sa.pyx"]),
])
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
with open(os.path.join(HERE, "requirements.txt")) as f:
REQUIREMENTS = f.read().splitlines()
setup(
name="scikit-route",
version="1.0.0a2",
description="Compute Routes easy and fast",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/arubiales/scikit-route",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9"
],
packages=find_packages(),
setup_requires=[
'setuptools>=18.0',
"cython"
],
install_requires=REQUIREMENTS,
ext_modules = extensions,
include_package_data=True,
package_data={"": ["datasets/_data/_latitude_longitude/*.tsp", "datasets/*.txt", "datasets/_data/_money_cost/*.pkl"]},
)
|
StarcoderdataPython
|
9664108
|
from es_connection import es_connect
import pandas as pd
import iso8601
import datetime as dt
from hashlib import sha1
from es_request_total_json import json_query
from es_request_protocol_json import json_protocol_query
from es_request_external_json import json_external_query
from es_request_2tags_3aggs_json import json_internal_to_internal
from es_request_2tags_3aggs_json import json_internal_to_external
from es_aggregate_structure_json import agg_firewall_external_dest
#private API --> three Level --> split server_name to domain name
def domain_name(str):
str = str.split(".")
if len(str) >=2:
str = str[-2] + "." + str[-1]
return str
#private API --> three Level --> flatten nest aggs response from elastic
def elasticAggsToDataframe(elasticResult,aggStructure,record={},fulllist=[]):
"""
https://stackoverflow.com/questions/29280480/pandas-dataframe-from-a-nested-dictionary-elasticsearch-result
recursively flatten elastic result into dataframe
takes elastic response as input along with aggregate structure
aggStructure defines what to expect from response
"""
for agg in aggStructure:
buckets = elasticResult[agg['key']]['buckets']
for bucket in buckets:
record = record.copy()
record[agg['key']] = bucket['key']
if 'aggs' in agg:
elasticAggsToDataframe(bucket,agg['aggs'],record,fulllist)
else:
for var in agg['variables']:
record[var['dfName']] = bucket[var['elasticName']]
fulllist.append(record)
df = pd.DataFrame(fulllist)
return df
# private API --> one Level --> convert es response to dataframe, only one level agg
def get_pandas_dataframe(aggsList):
"""
take aggregate response and convert to pandas dataframe
timeseries dataframe with
timeseries index,
protocol used and
total traffic for the protocol
"""
df = pd.DataFrame(aggsList)
df.columns = ['Total', 'unix_time', 'iso_time']
df["iso_time"] = df.iso_time.apply(iso8601.parse_date)
df["Date"] = df["iso_time"].dt.date
df["Time"] = df["iso_time"].dt.time
df.index = df["Time"]
df = df[['Date','Total'] ]
return df
# public API --> one Level --> connect to es, get one level agg and save to csv
def es_traffic_pandas_csv(ind, start, end):
"""
figure out which JSON file to use
Connect to elasticsearch
search elastic using JSON File and body
tease out the aggs and to be converted to dataframe
save to csv file
"""
if ind == "*":
q = json_query(ind, start , end)
elif ind == "external":
q = json_external_query(ind, start, end)
else:
q = json_protocol_query(ind, start , end)
es = es_connect()
es_response = es.search(index="*", body=q)
agg_list = es_response['aggregations']['total_traffic']['buckets']
df = get_pandas_dataframe(agg_list)
return df
#public API -->three Level -->
def es_nested_agg_pandas(start, end, fn_for_json_query, traffic_type):
"""connect to elastic
import aggregate structure and json structure for stackoverflow function
the return dataframe fron that function is cleaned up
SHA1 hash of destination ip is used as index
"""
es = es_connect()
aggStructure = agg_firewall_external_dest()
q = fn_for_json_query(start, end)
es_response = es.search(index="*", body=q)
agg_list = es_response['aggregations']
df = elasticAggsToDataframe(agg_list, aggStructure)
if traffic_type == "iSrc_eDst":
df['URL'] = [domain_name(val) for val in df['server_name'] ]
elif traffic_type == "iSrc_iDst":
df['URL'] = [val.lower().replace(".kphc.org","") for val in df['server_name']]
df['sha1'] = [sha1(str(val).encode('utf-8')).hexdigest() for val in df['dest_ip']]
df.index = df['sha1']
df = df[['dest_ip', 'dest_port', 'URL']]
df.drop_duplicates(keep = 'first', inplace=True)
return df
#testing only
def test_main():
start = "now-1d"
end = "now"
ind = "brp_dhcp"
q = json_external_query(ind, start, end)
es = es_connect()
es_response = es.search(index="*", body=q)
agg_list = es_response['aggregations']['total_traffic']['buckets']
df = get_pandas_dataframe(agg_list)
df = es_traffic_pandas_csv("*", start, end)
import pprint
pprint.pprint(es_response)
def main():
start = "now-2d"
end = "now"
df1 = es_nested_agg_pandas(start, end, json_internal_to_external, "iSrc_eDst")
# df2 = es_nested_agg_pandas(start, end, json_internal_to_internal, "iSrc_iDst")
print(df1)
# print(df1)
if __name__ == "__main__":
main()
# test_main()
|
StarcoderdataPython
|
8067380
|
import os
from defcon import Font
def combineGlyphs(path1, path2, destPath):
"""
Combines the glyphs of two UFOs and saves result to a new ufo.
This only combines glyphs, so the first UFO path should be the one
that you want all the metadata from.
"""
ufo1 = Font(path1)
ufo2 = Font(path2)
added_glyphs = []
for glyph in ufo2:
if glyph.name not in ufo1:
print "Inserting %s" % glyph.name
ufo1.insertGlyph(glyph)
added_glyphs.append(glyph.name)
ufo1.save(destPath)
path1 = "Path to UFO"
path2 = "Path to UFO"
destPath = "Path to final UFO"
combineGlyphs(path1, path2, destPath)
|
StarcoderdataPython
|
11352740
|
<reponame>JHP4911/Quantum-Computing-UK
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute,IBMQ
from qiskit.tools.monitor import job_monitor
from qiskit.circuit.library import QFT
import numpy as np
pi = np.pi
IBMQ.enable_account(‘ENTER API KEY HERE’)
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_qasm_simulator')
q = QuantumRegister(5,'q')
c = ClassicalRegister(5,'c')
circuit = QuantumCircuit(q,c)
circuit.x(q[4])
circuit.x(q[2])
circuit.x(q[0])
circuit += QFT(num_qubits=5, approximation_degree=0, do_swaps=True, inverse=False, insert_barriers=False, name='qft')
circuit.measure(q,c)
circuit.draw(output='mpl', filename='qft1.png')
print(circuit)
job = execute(circuit, backend, shots=1000)
job_monitor(job)
counts = job.result().get_counts()
print("\n QFT Output")
print("-------------")
print(counts)
input()
q = QuantumRegister(5,'q')
c = ClassicalRegister(5,'c')
circuit = QuantumCircuit(q,c)
circuit.x(q[4])
circuit.x(q[2])
circuit.x(q[0])
circuit += QFT(num_qubits=5, approximation_degree=0, do_swaps=True, inverse=False, insert_barriers=True, name='qft')
circuit += QFT(num_qubits=5, approximation_degree=0, do_swaps=True, inverse=True, insert_barriers=True, name='qft')
circuit.measure(q,c)
circuit.draw(output='mpl',filename='qft2.png')
print(circuit)
job = execute(circuit, backend, shots=1000)
job_monitor(job)
counts = job.result().get_counts()
print("\n QFT with inverse QFT Output")
print("------------------------------")
print(counts)
input()
|
StarcoderdataPython
|
254893
|
import numpy as np
def filter_directionality(prq, list_veh_obj, nr_best_veh, routing_engine, selected_veh):
"""This function filters the nr_best_veh from list_veh_obj according to the difference in directionality between
request origin and destination and planned vehicle route. Vehicles with final position equal to current position
are treated like driving perpendicular to the request direction.
:param prq: plan request in question
:param list_veh_obj: list of simulation vehicle objects in question
:param nr_best_veh: number of vehicles that should be returned
:param routing_engine: required to get coordinates from network positions
:param selected_veh: set of vehicles that were already selected by another heuristic
:return: list of simulation vehicle objects
"""
if nr_best_veh >= len(list_veh_obj):
return list_veh_obj
prq_o_coord = np.array(routing_engine.return_position_coordinates(prq.o_pos))
prq_d_coord = np.array(routing_engine.return_position_coordinates(prq.d_pos))
tmp_diff = prq_d_coord - prq_o_coord
prq_norm_vec = tmp_diff / np.sqrt(np.dot(tmp_diff, tmp_diff))
tmp_list_veh_val = []
for veh_obj in list_veh_obj:
# vehicle already selected by other heuristic
if veh_obj in selected_veh:
continue
if veh_obj.assigned_route:
veh_coord = np.array(routing_engine.return_position_coordinates(veh_obj.pos))
last_position = veh_obj.assigned_route[-1].destination_pos
veh_final_coord = np.array(routing_engine.return_position_coordinates(last_position))
if not np.array_equal(veh_coord, veh_final_coord):
tmp_diff = veh_final_coord - veh_coord
veh_norm_vec = tmp_diff / np.sqrt(np.dot(tmp_diff, tmp_diff))
else:
veh_norm_vec = np.array([0, 0])
else:
veh_norm_vec = np.array([0, 0])
val = np.dot(prq_norm_vec, veh_norm_vec)
tmp_list_veh_val.append((val, veh_obj.vid, veh_obj))
# sort and return
tmp_list_veh_val.sort(reverse=True)
return_list = [x[2] for x in tmp_list_veh_val[:nr_best_veh]]
return return_list
def filter_least_number_tasks(list_veh_obj, nr_best_veh, selected_veh):
"""This function filters the vehicles according to the number of assigned tasks.
:param list_veh_obj: list of simulation vehicle objects in question (sorted by distance from destination)
:param nr_best_veh: number of vehicles that should be returned
:param selected_veh: set of vehicles that were already selected by another heuristic
:return: list of simulation vehicle objects
"""
if len(list_veh_obj) <= nr_best_veh:
return list_veh_obj
return_list = []
remaining_dict = {}
for veh_obj in list_veh_obj:
if veh_obj in selected_veh:
continue
if not veh_obj.assigned_route:
return_list.append(veh_obj)
else:
nr_vrl = len(veh_obj.assigned_route)
try:
remaining_dict[nr_vrl].append(veh_obj)
except KeyError:
remaining_dict[nr_vrl] = [veh_obj]
if len(return_list) == nr_best_veh:
break
if len(return_list) < nr_best_veh:
break_outer_loop = False
for nr_vrl in sorted(remaining_dict.keys()):
for veh_obj in remaining_dict[nr_vrl]:
return_list.append(veh_obj)
if len(return_list) == nr_best_veh:
break_outer_loop = True
break
if break_outer_loop:
break
return return_list
|
StarcoderdataPython
|
6669856
|
import utils.processing as proc
import numpy as np
import utils.template_match_target as tmt
from keras.models import load_model
import pandas as pd
import h5py
########################
def get_metrics(data, craters, dim, model, beta=1):
"""Function that prints pertinent metrics at the end of each epoch.
Parameters
----------
data : hdf5
Input images.
craters : hdf5
Pandas arrays of human-counted crater data.
dim : int
Dimension of input images (assumes square).
model : keras model object
Keras model
beta : int, optional
Beta value when calculating F-beta score. Defaults to 1.
"""
X, Y = data[0], data[1]
# Get csvs of human-counted craters
csvs = []
minrad, maxrad, cutrad, n_csvs = 3, 50, 0.8, len(X)
diam = 'Diameter (pix)'
for i in range(n_csvs):
csv = craters[proc.get_id(i,4)]
# remove small/large/half craters
csv = csv[(csv[diam] < 2 * maxrad) & (csv[diam] > 2 * minrad)]
csv = csv[(csv['x'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['y'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['x'] - cutrad * csv[diam] / 2 > 0)]
csv = csv[(csv['y'] - cutrad * csv[diam] / 2 > 0)]
if len(csv) < 3: # Exclude csvs with few craters
csvs.append([-1])
else:
csv_coords = np.asarray((csv['x'], csv['y'], csv[diam] / 2)).T
csvs.append(csv_coords)
# Calculate custom metrics
print("")
print("*********Custom Loss*********")
recall, precision, fscore = [], [], []
frac_new, frac_new2, maxrad = [], [], []
err_lo, err_la, err_r = [], [], []
frac_duplicates = []
preds=[]
for i in range(0, n_csvs, 5):
d= X[i:i + 5].copy()
pred = model.predict(d)
for j in range(len(pred)):
preds.append(pred[j])
print("predict over")
for i in range(n_csvs):
if len(csvs[i]) < 3:
continue
(N_match, N_csv, N_detect, maxr,
elo, ela, er, frac_dupes) = tmt.template_match_t2c(preds[i], csvs[i],
rmv_oor_csvs=0)
if N_match > 0:
p = float(N_match) / float(N_match + (N_detect - N_match))
r = float(N_match) / float(N_csv)
f = (1 + beta ** 2) * (r * p) / (p * beta ** 2 + r)
diff = float(N_detect - N_match)
fn = diff / (float(N_detect))
fn2 = diff / (float(N_csv) + diff)
recall.append(r)
precision.append(p)
fscore.append(f)
frac_new.append(fn)
frac_new2.append(fn2)
maxrad.append(maxr)
err_lo.append(elo)
err_la.append(ela)
err_r.append(er)
frac_duplicates.append(frac_dupes)
else:
print("skipping iteration %d,N_csv=%d,N_detect=%d,N_match=%d" %
(i, N_csv, N_detect, N_match))
xe=model.evaluate(X, Y)
print(xe)
#print("binary XE score loss= %f,acc=%f" % xe[0])
if len(recall) > 3:
print("mean and std of N_match/N_csv (recall) = %f, %f" %
(np.mean(recall), np.std(recall)))
print("""mean and std of N_match/(N_match + (N_detect-N_match)).
(precision) = %f, %f""" % (np.mean(precision), np.std(precision)))
print("mean and std of F_%d score = %f, %f" %
(beta, np.mean(fscore), np.std(fscore)))
print("""mean and std of (N_detect - N_match)/N_detect (fraction
of craters that are new) = %f, %f""" %
(np.mean(frac_new), np.std(frac_new)))
print("""mean and std of (N_detect - N_match)/N_csv (fraction of
"craters that are new, 2) = %f, %f""" %
(np.mean(frac_new2), np.std(frac_new2)))
print("median and IQR fractional longitude diff = %f, 25:%f, 75:%f" %
(np.median(err_lo), np.percentile(err_lo, 25),
np.percentile(err_lo, 75)))
print("median and IQR fractional latitude diff = %f, 25:%f, 75:%f" %
(np.median(err_la), np.percentile(err_la, 25),
np.percentile(err_la, 75)))
print("median and IQR fractional radius diff = %f, 25:%f, 75:%f" %
(np.median(err_r), np.percentile(err_r, 25),
np.percentile(err_r, 75)))
print("mean and std of frac_duplicates: %f, %f" %
(np.mean(frac_duplicates), np.std(frac_duplicates)))
print("""mean and std of maximum detected pixel radius in an image =
%f, %f""" % (np.mean(maxrad), np.std(maxrad)))
print("""absolute maximum detected pixel radius over all images =
%f""" % np.max(maxrad))
print("")
if __name__ == '__main__':
n_dev=3000
n_test =3000
dir = 'catalogues/'
dev = h5py.File('%sdev_images.hdf5' % dir, 'r')
test = h5py.File('%stest_images.hdf5' % dir, 'r')
Data = {
'dev': [dev['input_images'][:n_dev].astype('float32'),
dev['target_masks'][:n_dev].astype('float32')],
'test': [test['input_images'][:n_test].astype('float32'),
test['target_masks'][:n_test].astype('float32')]
}
proc.preprocess(Data)
Craters = {
'dev': pd.HDFStore('%sdev_craters.hdf5' % dir, 'r'),
'test': pd.HDFStore('%stest_craters.hdf5' % dir, 'r')
}
model = load_model('models/model_deepResUnet_30000_1.h5')
get_metrics(Data['dev'], Craters['dev'], 256,model)
|
StarcoderdataPython
|
9621487
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 11:05:53 2021
@author: alef
"""
# String unicode
u = u'<NAME>'
print(u, type(u))
# Convertendo para str
s = u.encode('latin1')
print(s, '=>', type(s))
t = s.decode('latin1')
print(t, ' => ', type(t))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.