id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3364575
|
<filename>app/main/views.py
from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_sources,get_articles
@main.route('/')
def index():
'''
This is the view root page function that returns the index page and its data
'''
business_news = get_sources('business')
entertainment_news = get_sources('entertainment')
technology_news = get_sources('technology')
health_news = get_sources('health')
title = 'News - Top News And Stories For You'
return render_template('index.html',title = title,business_news = business_news,entertainment_news = entertainment_news,technology_news = technology_news, health_news = health_news)
@main.route('/sources/<id>')
def news(id):
articles = get_articles(id)
return render_template('articles.html',articles = articles)
|
StarcoderdataPython
|
8158905
|
import __future__
import os
import sys
import types
def compile_source_file(source_file, flags):
with open(source_file, "r") as f:
source = f.read()
return compile(source, os.path.basename(source_file), 'exec', flags)
if __name__ == "__main__":
# Compile and run test_pathlib.py as if
# "from __future__ import unicode_literals" had been added at the top.
flags = __future__.CO_FUTURE_UNICODE_LITERALS
code = compile_source_file("test_pathlib.py", flags)
mod = types.ModuleType('test_pathlib')
mod.__file__ = "test_pathlib.py"
sys.modules[mod.__name__] = mod
eval(code, mod.__dict__)
mod.main()
|
StarcoderdataPython
|
6542971
|
from __future__ import unicode_literals
from django.db import models
from Global_Equipment_library.models import LABEL_SIZES
FONT_TYPES = [('Impact','Impact'),
('Palatino','Palatino'),
('Tahoma','Tahoma'),
('Century Gothic', 'Century Gothic'),
('Lucida Sans Unicode', 'Lucida Sans Unicode'),
('Arial Black', 'Arial Black'),
('Times New Roman', 'Times New Roman'),
('Arial Narrow', 'Arial Narrow'),
('Verdana', 'Verdana'),
('Copperplate', 'Copperplate'),
('Lucida Console', 'Lucida Console'),
('Gill Sans', 'Gill Sans'),
('Trebuchet MS', 'Trebuchet MS'),
('Courier', 'Courier'),
('Arial', 'Arial')]
class LabelTemplate(models.Model):
"""
used to keep track of Label types,
like Avery 5160, 5167 or the Australian labels and custom labels
"""
name = models.CharField(max_length=100, unique=True)
size = models.CharField(max_length=10, choices=LABEL_SIZES)
def __unicode__(self):
return self.name
class LabelTextBox(models.Model):
"""
a generic text box that allows settings to be overridden
"""
text = models.CharField(max_length=100, blank=True)
font = models.CharField(max_length=100, choices=FONT_TYPES, default='Arial')
bold = models.BooleanField(default=False)
italic = models.BooleanField(default=False)
underline = models.BooleanField(default=False)
fontSize = models.IntegerField(default=12)
xPoint = models.IntegerField(default=0)
yPoint = models.IntegerField(default=0)
def __unicode__(self):
return self.text
|
StarcoderdataPython
|
9651997
|
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from time import time
# TODO: Let's switch to import logging in the future instead of print. [ebarsoum]
class ProgressPrinter:
'''
Accumulates training time statistics (loss and metric)
and pretty prints them as training progresses.
It provides the number of samples, average loss and average metric
since the last print or since the start of accumulation.
'''
def __init__(self, freq=None, first=0, tag=''):
'''
Constructor. The optional ``freq`` parameter determines how often
printing will occur. The value of 0 means an geometric
schedule (1,2,4,...). A value > 0 means a arithmetic schedule
(freq, 2*freq, 3*freq,...), and a value of None means no per-minibatch log.
'''
from sys import maxsize
if freq is None:
freq = maxsize
self.loss_since_start = 0
self.metric_since_start = 0
self.samples_since_start = 0
self.loss_since_last = 0
self.metric_since_last = 0
self.samples_since_last = 0
self.updates = 0
self.epochs = 0
self.freq = freq
self.first = first
self.tag = '' if not tag else "[{}] ".format(tag)
self.epoch_start_time = 0
if freq==0:
print(' average since average since examples')
print(' loss last metric last ')
print(' ------------------------------------------------------')
def avg_loss_since_start(self):
'''
Returns: the average loss since the start of accumulation
'''
return self.loss_since_start/self.samples_since_start
def avg_metric_since_start(self):
'''
Returns: the average metric since the start of accumulation
'''
return self.metric_since_start/self.samples_since_start
def avg_loss_since_last(self):
'''
Returns: the average loss since the last print
'''
return self.loss_since_last/self.samples_since_last
def avg_metric_since_last(self):
'''
Returns: the average metric since the last print
'''
return self.metric_since_last/self.samples_since_last
def reset_start(self):
'''
Resets the 'start' accumulators
Returns: tuple of (average loss since start, average metric since start, samples since start)
'''
ret = self.avg_loss_since_start(), self.avg_metric_since_start(), self.samples_since_start
self.loss_since_start = 0
self.metric_since_start = 0
self.samples_since_start = 0
return ret
def reset_last(self):
'''
Resets the 'last' accumulators
Returns: tuple of (average loss since last, average metric since last, samples since last)
'''
ret = self.avg_loss_since_last(), self.avg_metric_since_last(), self.samples_since_last
self.loss_since_last = 0
self.metric_since_last = 0
self.samples_since_last = 0
return ret
def epoch_summary(self, with_metric=False):
'''
If on an arithmetic schedule print an epoch summary using the 'start' accumulators.
If on a geometric schedule does nothing.
Args:
with_metric (`bool`): if `False` it only prints the loss, otherwise it prints both the loss and the metric
'''
self.epochs += 1
if self.freq > 0:
self.updates = 0
avg_loss, avg_metric, samples = self.reset_start()
epoch_end_time = time()
time_delta = epoch_end_time - self.epoch_start_time
speed = 0
if (time_delta > 0):
speed = samples / time_delta
self.epoch_start_time = epoch_end_time
if with_metric:
print("Finished Epoch [{}]: {}loss = {:0.6f} * {}, metric = {:0.1f}% * {} {:0.3f}s ({:5.1f} samples per second)".format(self.epochs, self.tag, avg_loss, samples, avg_metric*100.0, samples, time_delta, speed))
else:
print("Finished Epoch [{}]: {}loss = {:0.6f} * {} {:0.3f}s ({:5.1f} samples per second)".format(self.epochs, self.tag, avg_loss, samples, time_delta, speed))
return avg_loss, avg_metric, samples # BUGBUG: for freq=0, we don't return anything here
def update(self, loss, minibatch_size, metric=None):
'''
Updates the accumulators using the loss, the minibatch_size and the optional metric.
Args:
loss (`float`): the value with which to update the loss accumulators
minibatch_size (`int`): the value with which to update the samples accumulator
metric (`float` or `None`): if `None` do not update the metric
accumulators, otherwise update with the given value
'''
self.updates += 1
self.samples_since_start += minibatch_size
self.samples_since_last += minibatch_size
self.loss_since_start += loss * minibatch_size
self.loss_since_last += loss * minibatch_size
if metric is not None:
self.metric_since_start += metric * minibatch_size
self.metric_since_last += metric * minibatch_size
if self.epoch_start_time == 0:
self.epoch_start_time = time()
if self.freq == 0 and (self.updates+1) & self.updates == 0:
avg_loss, avg_metric, samples = self.reset_last()
if metric is not None:
print(' {:8.3g} {:8.3g} {:8.3g} {:8.3g} {:10d}'.format(
self.avg_loss_since_start(), avg_loss,
self.avg_metric_since_start(), avg_metric,
self.samples_since_start))
else:
print(' {:8.3g} {:8.3g} {:8s} {:8s} {:10d}'.format(
self.avg_loss_since_start(), avg_loss,
'', '', self.samples_since_start))
elif self.freq > 0 and (self.updates % self.freq == 0 or self.updates <= self.first):
avg_loss, avg_metric, samples = self.reset_last()
if self.updates <= self.first: # printing individual MBs
first_mb = self.updates
else:
first_mb = max(self.updates - self.freq + 1, self.first+1)
if metric is not None:
print(' Minibatch[{:4d}-{:4d}]: loss = {:0.6f} * {:d}, metric = {:0.1f}% * {:d}'.format(
first_mb, self.updates, avg_loss, samples, avg_metric*100.0, samples))
else:
print(' Minibatch[{:4d}-{:4d}]: loss = {:0.6f} * {:d}'.format(
first_mb, self.updates, avg_loss, samples))
def update_with_trainer(self, trainer, with_metric=False):
'''
Updates the accumulators using the loss, the minibatch_size and optionally the metric
using the information from the ``trainer``.
Args:
trainer (:class:`cntk.trainer.Trainer`): trainer from which information is gathered
with_metric (`bool`): whether to update the metric accumulators
'''
self.update(trainer.previous_minibatch_loss_average,trainer.previous_minibatch_sample_count, trainer.previous_minibatch_evaluation_average if with_metric else None)
# print the total number of parameters to log
def log_number_of_parameters(model, trace_level=0):
parameters = model.parameters
from functools import reduce
from operator import add, mul
total_parameters = reduce(add, [reduce(mul, p1.shape) for p1 in parameters], 0)
# BUGBUG: If model has uninferred dimensions, we should catch that and fail here
print("Training {} parameters in {} parameter tensors.".format(total_parameters, len(parameters)))
if trace_level > 0:
print()
for p in parameters:
print ("\t{}".format(p.shape))
|
StarcoderdataPython
|
372179
|
# Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from application.dao.component_dao import ComponentDao
from application.controller.controller import Controller
class ComponentDataController(Controller):
"""
Maybe the pedalboard data it's not sufficient for management in a custom
:class:`Components`. For example, in a possible visual pedalboard manager is
necessary persist the effects positions.
:class:`ComponentDataController` offers a way to salve and restore data.
For your component, create a unique identifier (key) and use it for manage all
your Component data. For example::
>>> key = 'raspberry-p0'
>>> controller = application.controller(ComponentDataController)
>>> # If key not exists, returns {}
>>> controller[key]
{}
>>> controller[key] = {'pedalboard': 0}
>>> controller[key]
{'pedalboard': 0}
>>> # The new data overrides old data
>>> controller[key] = {'pedalboards': []}
>>> controller[key]
{'pedalboards': []}
>>> # Changes in returned object will not change the persisted data
>>> data = controller[key]
>>> data['component'] = 'Raspberry P0'
>>> data
{'pedalboards': [], 'component': 'Raspberry P0'}
>>> controller[key]
{'pedalboards': []}
>>> # Remove all content for 'raspberry-p0'
>>> del controller[key]
>>> controller[key]
{}
.. warning::
:class:`ComponentDataController` does not have access control,
which means that any Component that eventually
use *ComponentDataController* may interfere with the content
(accessing, changing or removing).
.. warning::
It's a easy way for save simple data. Please, don't save binaries or big content
"""
dao = None
__data = None
def configure(self):
self.dao = self.app.dao(ComponentDao)
self.__data = self.dao.load()
def __getitem__(self, key):
"""
Returns the data for the informed `key`::
>>> component_data_controller[key]
{'any key': 'any data'}
If no data was saved for this key, an empty dictionary is returned::
>>> component_data_controller['a crazy key']
{}
:param string key:
:return dict: Content if exist for key informed, else empty `dict`
"""
try:
return dict(self.__data[key])
except KeyError:
return {}
def __setitem__(self, key, value):
"""
Change the `key` identifier content to `value`::
>>> component_data_controller[key] = {'any key': 'any data'}
:param string key: Identifier
:param value: Data will be persisted
"""
self.__data[key] = value
self.dao.save(self.__data)
def __delitem__(self, key):
"""
Remove all `item` identifier content::
>>> del component_data_controller[key]
:param string key: Identifier
"""
del self.__data[key]
self.dao.save(self.__data)
|
StarcoderdataPython
|
6591883
|
from typing import Callable, Any
from core.exceptions import StateException
from core.message import Message
from core.node import Node
from core.topic import Topic
def mitm(node1: Node, node2: Node, topic: Topic, node_name: str, interface_func: Callable[[Message, Topic], Any]):
"""
Publish subscribe agnostic Create a mit, connection for the system
:param node1: First node
:param node2: Second node
:param topic: Topic to communicate on
:param node_name: Name to give the system to target
:param interface_func: Function to execute on all the data as the mitm
:return:
"""
(_, _, bus_info1) = node1.server.getBusStats(node_name)
(_, _, bus_info2) = node2.server.getBusStats(node_name)
publisher_info = None
publisher_number = 0
subscriber_info = None
# NEed to create a node to sit in the middle
# Checking if there's a publisher. If both are publishers then raise exception
if any(topic.name in s for s in bus_info1[0]):
publisher_info = (node1, bus_info1)
publisher_number = 1
if any(topic.name in s for s in bus_info2[0]):
if publisher_info:
raise StateException("Both nodes are publishers on the requested topic")
publisher_info = (node2, bus_info2)
publisher_number = 2
if any(topic.name in s for s in bus_info1[1]):
if publisher_number == 1:
raise StateException("Node is both publisher and subscriber. This is wrong somehow")
subscriber_info = (node1, bus_info1)
if any(topic.name in s for s in bus_info2[1]):
if subscriber_info:
raise StateException("Both nodes are subscribers on the requested topic")
subscriber_info = (node2, bus_info2)
if not publisher_info or not subscriber_info:
raise StateException("Cannot MITM this configuration", )
# Get the topic info
(_, _, publisher_topicinfo) = publisher_info[0].server.getBusInfo(node_name)
for topicinfo in publisher_topicinfo:
if topicinfo[4] == topic.name:
pub_topic_wrapper = Topic(topic_name=topicinfo[4], message=topic.message,
protocol=topicinfo[3])
if topicinfo[2] is not 'o':
raise StateException("Publisher isn't publishing this topic somehow, something has gone wrong")
(_, _, subscriber_topicinfo) = subscriber_info[0].server.getBusInfo(node_name)
subscriber_info[0].server.publisherUpdate(node_name, topic.name)
publisher_info[0].server.requestTopic(node_name, topic.name, topic.protocol)
print("TODO")
|
StarcoderdataPython
|
1818407
|
import logging
from autogluon.core.constants import REGRESSION
from autogluon.core.utils.try_import import try_import_rapids_cuml
from .knn_model import KNNModel
logger = logging.getLogger(__name__)
# FIXME: Benchmarks show that CPU KNN can be trained in ~3 seconds with 0.2 second validation time for CoverType on automlbenchmark (m5.2xlarge)
# This is over 100 seconds validation time on CPU with rapids installed, investigate how it was so fast on CPU.
# "2021_02_26/autogluon_hpo_auto.openml_s_271.1h8c.aws.20210228T000327/aws.openml_s_271.1h8c.covertype.0.autogluon_hpo_auto/"
# Noticed: different input data types, investigate locally with openml dataset version and dtypes.
# TODO: Given this is so fast, consider doing rapid feature pruning
class KNNRapidsModel(KNNModel):
"""
RAPIDS KNearestNeighbors model : https://rapids.ai/start.html
NOTE: This code is experimental, it is recommend to not use this unless you are a developer.
This was tested on rapids-21.06 via:
conda create -n rapids-21.06 -c rapidsai -c nvidia -c conda-forge rapids=21.06 python=3.8 cudatoolkit=11.2
conda activate rapids-21.06
pip install --pre autogluon.tabular[all]
"""
def _get_model_type(self):
try_import_rapids_cuml()
from cuml.neighbors import KNeighborsClassifier, KNeighborsRegressor
if self.problem_type == REGRESSION:
return KNeighborsRegressor
else:
return KNeighborsClassifier
def _set_default_params(self):
default_params = {'weights': 'uniform'}
for param, val in default_params.items():
self._set_default_param_value(param, val)
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': False}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
def _more_tags(self):
return {'valid_oof': False}
|
StarcoderdataPython
|
11322715
|
def uniqueValues(aDict):
'''
aDict: a dictionary
returns: a sorted list of keys that map to unique aDict values, empty list if none
'''
# Your code here
count = 0
l = []
l2 = []
l3 = aDict.values()
for key in aDict:
count = 0
value = aDict[key]
for i in l3:
if value == i:
count += 1
else:
continue
if count > 1:
continue
else:
l.append(key)
return sorted(l)
aDict2 = {1: 1, 3: 2, 6: 0, 7: 0, 8: 4, 10: 0}
aDict1 = {1: 1, 2: 1, 3: 1}
aDict = {0: 4, 9: 4, 3: 4, 5: 2, 1: 1}
print uniqueValues(aDict)
|
StarcoderdataPython
|
3516605
|
#! /usr/bin/env python
import ConfigParser
import os
import sys
from distutils import dir_util
from redshell.constants import *
# Set default values
REDSHELL_DIR = os.path.expanduser(DEFAULT_REDSHELL_DIR)
REDSHELL_HISTORY = os.path.expanduser(DEFAULT_REDSHELL_HISTORY)
REDSHELL_REPORTS = os.path.expanduser(DEFAULT_REDSHELL_REPORTS)
REDSHELL_NMAP = DEFAULT_REDSHELL_NMAP
REDSHELL_SUDO = DEFAULT_REDSHELL_SUDO
REDSHELL_UA = DEFAULT_REDSHELL_UA
REDSHELL_PROXY = DEFAULT_REDSHELL_PROXY
REDSHELL_COLLABORATOR = DEFAULT_REDSHELL_COLLABORATOR
REDSHELL_DIR_SUBDIRS = ['log','output','lists','tools']
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
print("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
try:
Config = ConfigParser.ConfigParser()
if os.path.isfile(os.path.expanduser('~'+os.sep+'redshell.conf')):
Config.read("redshell.conf")
else:
if os.path.isfile("redshell.conf"):
Config.read("redshell.conf")
except:
pass
## CONFIG: Paths / redshell : Redshell Home directory
# Check if a value is provided in config file
try:
redshell_dir_conf = os.path.expanduser(ConfigSectionMap("Paths")['redshell'])
# if the path does not exist, create it and use it
if not os.path.isdir(redshell_dir_conf):
try:
os.makedirs(redshell_dir_conf)
except:
sys.stdout.write("Unable to create home directory: %s. Using default path: %s" % (redshell_dir_conf,DEFAULT_REDSHELL_DIR))
else:
REDSHELL_DIR = redshell_dir_conf
else:
REDSHELL_DIR = redshell_dir_conf
# Create subfolders in the homedir. if they exist just skip
try:
for subdir in REDSHELL_DIR_SUBDIRS:
os.makedirs(REDSHELL_DIR+os.sep+subdir)
except:
pass
# If no entry exists in config file or is not valid just ignore it
except:
pass
try:
# Set up the directory for templates
REDSHELL_TEMPLATES = REDSHELL_DIR+os.sep+'templates'
# If it it is not present in the homedir, copy it there
if not os.path.isdir(REDSHELL_TEMPLATES):
dir_util.copy_tree('templates',REDSHELL_TEMPLATES)
except:
pass
## CONFIG: Paths / history : Redshell history file
# Check if a value is provided in config file
try:
redshell_history_conf = os.path.expanduser(ConfigSectionMap("Paths")['history'])
if os.path.isfile(redshell_history_conf):
REDSHELL_HISTORY = redshell_history_conf
# if no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Paths / report : Redshell directory for reports
# Check if a value is provided in config file
try:
redshell_reports_conf = os.path.expanduser(ConfigSectionMap("Paths")['reports'])
# If the path does not exist, create it and use it
if not os.path.isdir(redshell_reports_conf):
try:
os.makedirs(redshell_reports_conf)
except:
sys.stdout.write("Unable to create report directory: %s. Using default path: %s" % (redshell_reports_conf,DEFAULT_REDSHELL_REPORTS))
else:
REDSHELL_REPORTS = redshell_reports_conf
else:
REDSHELL_REPORTS = redshell_reports_conf
# If no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Bin / nmap : binary for nmap
# Check if a value is provided in config file
try:
redshell_nmap_conf = os.path.expanduser(ConfigSectionMap("Bin")['nmap'])
if os.path.isfile(redshell_nmap_conf) and os.access(redshell_nmap_conf, os.X_OK):
REDSHELL_NMAP = redshell_nmap_conf
# if no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Bin / sudo : binary for sudo
# Check if a value is provided in config file
try:
redshell_sudo_conf = os.path.expanduser(ConfigSectionMap("Bin")['sudo'])
if os.path.isfile(redshell_sudo_conf) and os.access(redshell_sudo_conf, os.X_OK):
REDSHELL_SUDO = redshell_sudo_conf
# if no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Bin / dig : binary for dig
# Check if a value is provided in config file
try:
redshell_dig_conf = os.path.expanduser(ConfigSectionMap("Bin")['dig'])
if os.path.isfile(redshell_dig_conf) and os.access(redshell_dig_conf, os.X_OK):
REDSHELL_DIG = redshell_dig_conf
# if no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Web / proxy : web proxy
# Check if a value is provided in config file
try:
redshell_proxy_conf = ConfigSectionMap("Web")['proxy']
if redshell_proxy_conf:
REDSHELL_PROXY = redshell_proxy_conf
# if no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Web / UA : web UA
# Check if a value is provided in config file
try:
redshell_ua_conf = ConfigSectionMap("Web")['user-agent']
if redshell_ua_conf:
REDSHELL_UA = redshell_ua_conf
# if no entry exists in config file or is not valid just ignore it
except:
pass
## CONFIG: Web / collaborator-host : your collaborator host (we use batata)
# Check if a value is provided in config file
try:
collaborator_host = ConfigSectionMap("Web")['collaborator-host']
if collaborator_host:
REDSHELL_COLLABORATOR = collaborator_host
# if no entry exists in config file or is not valid just ignore it
except:
pass
|
StarcoderdataPython
|
1787841
|
<reponame>vsocrates/medtype
from helper import *
from joblib import Parallel, delayed
import requests, re
######################### Dump Ground Truth in required format for evaluation
def groundtruth_dump(doc_list):
base_dir = './results/{}'.format(args.data); make_dir(base_dir)
fname = './results/{}/ground_{}.txt'.format(args.data, args.split)
if not checkFile(fname):
writer = csv.writer(open(fname, 'w'), delimiter='\t')
for doc in doc_list:
for men in doc['mentions']:
writer.writerow([doc['_id'], men['start_offset'], men['end_offset'], men['link_id'], 1.0, 'O'])
######################### SCISPACY
def scispacy(doc_list):
import scispacy, spacy
from scispacy.abbreviation import AbbreviationDetector
from scispacy.umls_linking import UmlsEntityLinker
def process_data(pid, doc_list):
nlp = spacy.load("en_core_sci_sm")
nlp.add_pipe(AbbreviationDetector(nlp)) # Add abbreviation deteciton module
linker = UmlsEntityLinker(resolve_abbreviations=True); nlp.add_pipe(linker) # Add Entity linking module
data = []
for i, doc in enumerate(doc_list):
sci_res = nlp(doc['text'])
res_list = {}
for ent in sci_res.ents:
start, end = ent.start_char, ent.end_char
res_list[(start, end)] = ent._.umls_ents
doc['result'] = res_list
data.append(doc)
if i % 10 == 0:
print('Completed [{}] {}, {}'.format(pid, i, time.strftime("%d_%m_%Y") + '_' + time.strftime("%H:%M:%S")))
return data
num_procs = args.workers
chunks = partition(doc_list, num_procs)
data_list = mergeList(Parallel(n_jobs = num_procs)(delayed(process_data)(i, chunk) for i, chunk in enumerate(chunks)))
base_dir = './results/{}'.format(args.data); make_dir(base_dir)
dump_pickle(data_list, '{}/{}_{}.pkl'.format(base_dir, args.model, args.split))
######################### QUICK-UMLS
def quickumls(doc_list):
from quickumls import QuickUMLS
assert not args.quickumls_path is None, "Provide path where QuickUMLS is installed"
def process_data(pid, doc_list):
data = []
matcher = QuickUMLS(args.quickumls_path, 'score', threshold=0.6)
for i, doc in enumerate(doc_list):
qumls_res = matcher.match(doc['text'])
res_list = ddict(list)
for men in qumls_res:
for cand in men:
start, end = cand['start'], cand['end']
umls_cui = cand['cui']
score = cand['similarity']
res_list[(start, end)].append((umls_cui, score))
doc['result'] = dict(res_list)
data.append(doc)
if i % 10 == 0:
print('Completed [{}] {}, {}'.format(pid, i, time.strftime("%d_%m_%Y") + '_' + time.strftime("%H:%M:%S")))
return data
num_procs = 1
chunks = partition(doc_list, num_procs)
data_list = mergeList(Parallel(n_jobs = num_procs)(delayed(process_data)(i, chunk) for i, chunk in enumerate(chunks)))
base_dir = './results/{}'.format(args.data); make_dir(base_dir)
dump_pickle(data_list, '{}/{}_{}.pkl'.format(base_dir, args.model, args.split))
######################### CTAKES
def ctakes(doc_list):
import ctakes_parser as cparser
ctakes_url = 'http://{}:{}/ctakes'.format(args.ctakes_host, args.ctakes_port)
def process_data(pid, doc_list):
data = []
for i, doc in enumerate(doc_list):
text = clean_text(doc['text'])
res = requests.get(ctakes_url, params={"text": text});
res_list = ddict(set)
if res.status_code == 200:
data = res.json()
for dat in data:
if 'ontologyConceptArr' in dat['annotation'] and not dat['annotation']['ontologyConceptArr'] is None:
start = dat['annotation']['begin']
end = dat['annotation']['end']
for cands in dat['annotation']['ontologyConceptArr']:
umls_cui = cands['annotation']['cui']
score = cands['annotation']['score'] # 0.0 as lookup
res_list[(start, end)].add((umls_cui, round(score, 3)))
res_list = {k: list(v) for k, v in res_list.items()}
doc['result'] = dict(res_list)
data.append(doc)
if i % 10 == 0:
print('Completed [{}] {}, {}'.format(pid, i, time.strftime("%d_%m_%Y") + '_' + time.strftime("%H:%M:%S")))
return data
num_procs = args.workers
chunks = partition(doc_list, num_procs)
data_list = mergeList(Parallel(n_jobs = num_procs)(delayed(process_data)(i, chunk) for i, chunk in enumerate(chunks)))
base_dir = './results/{}'.format(args.data); make_dir(base_dir)
dump_pickle(data_list, '{}/{}_{}.pkl'.format(base_dir, args.model, args.split))
######################### METAMAP
def metamap(doc_list):
from pymetamap import MetaMap
mm = MetaMap.get_instance(args.metamap_path)
def process_data(pid, doc_list):
data = []
for i, doc in enumerate(doc_list):
try:
text = clean_text(doc['text'])
concepts, error = mm.extract_concepts([text],[doc['_id']])
assert len(text) == len(doc['text']), 'Text length does not match after pre-processing'
res_list = ddict(list)
for k, concept in enumerate(concepts):
if concept[1] !='MMI': continue
pos_info = [list(map(int, x.split('/'))) for x in concept.pos_info.replace(',', ';').replace('[', '').replace(']', '').split(';')]
men_cnt = [len(x.split(',')) for x in concept.pos_info.split(';')]
men_sing = replace(concept.trigger, '"').split('"')[1::2][1::2]
mentions = mergeList([[men]*men_cnt[j] for j, men in enumerate(men_sing)])
for j, (start, offset) in enumerate(pos_info):
end = start + offset
res_list[(start, end)].append((concept.cui, concept.score))
doc['result'] = dict(res_list)
data.append(doc)
if i % 10 == 0:
print('Completed [{}] {}, {}'.format(pid, i, time.strftime("%d_%m_%Y") + '_' + time.strftime("%H:%M:%S")))
except Exception as e:
print('\nException Cause: {}'.format(e.args[0]))
continue
print('All work done {}!!'.format(pid))
return data
num_procs = args.workers
chunks = partition(doc_list, num_procs)
data_list = mergeList(Parallel(n_jobs= num_procs)(delayed(process_data)(i,chunk) for i, chunk in enumerate(chunks)))
base_dir = './results/{}'.format(args.data); make_dir(base_dir)
dump_pickle(data_list, '{}/{}_{}.pkl'.format(base_dir, args.model, args.split))
######################### METAMAP LITE
def metamaplite(doc_list):
from pymetamap import MetaMapLite
mm = MetaMapLite.get_instance(args.metamaplite_path)
def process_data(pid, doc_list):
data, miss = [], 0
for i, doc in enumerate(doc_list):
try:
text = clean_text(doc['text'])
concepts, error = mm.extract_concepts([text],[doc['_id']])
assert len(text) == len(doc['text']), 'Text length does not match after pre-processing'
res_list = ddict(list)
for k, concept in enumerate(concepts):
if concept.mm !='MMI': continue
pos_info = [list(map(int, x.split('/'))) for x in concept.pos_info.split(';')]
mentions = replace(concept.trigger, '"').split('"')[0::2][1::2]
for j, (start, offset) in enumerate(pos_info):
end = start + offset
res_list[(start, end)].append((concept.cui, concept.score))
doc['result'] = dict(res_list)
data.append(doc)
if i % 10 == 0:
print('Completed [{}] {}, {}'.format(pid, i, time.strftime("%d_%m_%Y") + '_' + time.strftime("%H:%M:%S")))
except Exception as e:
print('\nException Cause: {}'.format(e.args[0]))
miss += 1
continue
print('All work done {} | Miss: {}!!'.format(pid, miss))
return data
num_procs = args.workers
chunks = partition(doc_list, num_procs)
data_list = mergeList(Parallel(n_jobs= num_procs)(delayed(process_data)(i,chunk) for i, chunk in enumerate(chunks)))
base_dir = './results/{}'.format(args.data); make_dir(base_dir)
dump_pickle(data_list, '{}/{}_{}.pkl'.format(base_dir, args.model, args.split))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', default='ncbi', help='Dataset on which evaluation has to be performed.')
parser.add_argument('--model', default='metamap', help='Entity linking system to use. Options: [scispacy, quickumls, ctakes, metamamp, metamaplite]')
parser.add_argument('--split', default='test', help='Dataset split to evaluate on')
parser.add_argument('--quickumls_path', default=None, help='QuickUMLS installation directory')
parser.add_argument('--metamap_path', default=None, help='Location where MetaMap executable is installed, e.g .../public_mm/bin/metamap18')
parser.add_argument('--metamaplite_path',default=None, help='Location where MetaMapLite is installed, e.g .../public_mm_lite')
parser.add_argument('--ctakes_host', default='localhost', help='IP at which cTakes server is running')
parser.add_argument('--ctakes_port', default=9999,type=int, help='Port at which cTakes server is running')
parser.add_argument('--workers', default=1, type=int, help='Number of processes to use for parallelization')
args = parser.parse_args()
# Reading dataset
doc_list = []
for line in open('../datasets/{}.json'.format(args.data)):
doc = json.loads(line.strip())
if doc['split'] != args.split: continue
doc_list.append(doc)
# Dump Ground truth
groundtruth_dump(doc_list)
# Dump Model's output
if args.model == 'quickumls': quickumls(doc_list)
elif args.model == 'scispacy': scispacy(doc_list)
elif args.model == 'ctakes': ctakes(doc_list)
elif args.model == 'metamap': metamap(doc_list)
elif args.model == 'metamaplite': metamaplite(doc_list)
else: raise NotImplementedError
|
StarcoderdataPython
|
9791127
|
<reponame>GeorgeKandamkolathy/LocaltoSpotify
import eyed3
import os
import requests
import json
import webbrowser
import http.server
import sys
from io import StringIO
import base64
import urllib.parse
import re
CLIENT_ID = "128418d86c274651af8cdc709df1c143"
CLIENT_SECRET = "<KEY>"
def main():
unsuccesful = [] #Unsuccesful track transfers
store = [] #Track location
p = 0 #Progress Bar iterator
i = 0 # Reused Iterator
songlist = ""
auth = '' #Authrorization code
empty = 0 #Used to check for failed track transfers
listlen = 0 #Number of tracks
untagged = [] #Files without proper ID3 tagging
directory = input('Music directory: ')
existingstate = input('Existing or New Playlist: ')
pattern = re.compile("(^(e|E)(xist)?(s)?(ing)?|^(n|N)(ew|EW)?)")
while pattern.match(existingstate) == None:
existingstate = input('Existing or New Playlist: ')
playlistname = input('Playlist name: ')
result = authenticate()
result_string = result.getvalue()
#Store file locations
directoryencode = os.fsencode(directory)
getTracks(directoryencode,store)
printProgressBar(0, len(store), prefix = 'Progress:', suffix = 'Complete', length = 50)
# Iterate through server output to retrive authentication code
while result_string[i] != '=':
i += 1
i += 1
while result_string[i] != ' ':
auth = auth + result_string[i]
i += 1
i = 0
#Send request to retrive access token using auth code
params = {
"grant_type": "authorization_code",
"code": auth,
"redirect_uri": "http://localhost:8000",
}
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization" : "Basic " + base64.b64encode("{}:{}".format(CLIENT_ID, CLIENT_SECRET).encode('UTF-8')).decode('ascii')
}
html = requests.request('post', "https://accounts.spotify.com/api/token", headers=headers, params=params, data=None)
token = html.json()
# Final access token form for use in api calls
auth = 'Bearer ' + token['access_token']
while i < len(store):
printProgressBar(p + 1, len(store), prefix = 'Progress:', suffix = 'Complete', length = 50)
p += 1
#If track failed don't add comma seperator to list
if i > 0 and empty != 1:
songlist = songlist + "%2C"
empty = 0
#Stop eyed3 error ouputs
sys.stderr = result
#Get search terms through tags
song = eyed3.core.load(store[i])
songname = song.tag.title
album = song.tag.album
artist = song.tag.artist
#Skip files without tags
if songname == None or album == None:
untagged.append(store[i])
empty += 1
i += 1
continue
sys.stderr = sys.__stderr__
#Encode search terms for URL usage
songname = urllib.parse.quote(songname,safe='')
album = urllib.parse.quote(album,safe='')
artist = urllib.parse.quote(artist,safe='')
#Api call for track search using track name and album
response = requests.get("https://api.spotify.com/v1/search?q=track:"+ songname +"%20album:"+ album +"&type=track&limit=1", headers = {'Authorization' : auth})
dictionary = response.json()
#If previous check failed replace album with artist
if dictionary['tracks']['items'] == []:
if artist == None:
untagged.append(store[i])
i += 1
continue
response = requests.get("https://api.spotify.com/v1/search?q="+ songname +"%20artist:"+ artist +"&type=track&limit=1", headers = {'Authorization' : auth})
dictionary = response.json()
#If both failed add track to unsuccesful array and continue
if dictionary['tracks']['items'] == []:
unsuccesful.append(urllib.parse.unquote(songname))
i += 1
empty = 1
continue
#Add retrived track id to list
trackid = dictionary['tracks']['items'][0]['id']
songlist = songlist + "spotify%3Atrack%3A" + trackid
listlen += 1
i += 1
i = 0
#Api call for user ID
response = requests.get("https://api.spotify.com/v1/me", headers = {'Authorization' : auth})
response = response.json()
userid = response['id']
#If user chose new playlisy make api call to create playlist
new_check = re.compile("^(n|N)(ew|EW)?")
exist_check = re.compile("^(e|E)(xist|XIST)?(s)?(ing|ING)?")
if new_check.match(existingstate) != None:
params = {
"name": playlistname,
"description": "Playlist transfer from local",
"public": 'false'
}
response = requests.request('post', "https://api.spotify.com/v1/users/"+ userid +"/playlists", headers= {'Authorization' : auth,"Content-Type": "application/json","Accept": "application/json"}, params=None ,data=json.dumps(params))
response = response.json()
playlistid= response['id']
#Else api call to retrive existing playlists
elif exist_check.match(existingstate) != None:
response = requests.get("https://api.spotify.com/v1/me/playlists", headers = {'Authorization' : auth})
response = response.json()
while response['items'][i]['name'] != playlistname:
if i + 1 == len(response['items']):
print("Playlist Not Found")
return
i += 1
playlistid = response['items'][i]['id']
#Batch processing api calls for track uploafs
current = 0
trackbuffer = ''
j = 0
if listlen > 30:
while j < len(songlist):
if j + 1 == len(songlist):
trackbuffer = trackbuffer + songlist[j]
response = requests.request('post', "https://api.spotify.com/v1/playlists/"+ playlistid +"/tracks?uris=" + trackbuffer, headers= {'Authorization' : auth})
trackbuffer = ''
if songlist[j] == '%':
if songlist[j + 1] == '2' and songlist[j + 2] == 'C':
current += 1
if current == 30:
response = requests.request('post', "https://api.spotify.com/v1/playlists/"+ playlistid +"/tracks?uris=" + trackbuffer, headers= {'Authorization' : auth})
trackbuffer = ''
current = 0
j = j + 3
trackbuffer = trackbuffer + songlist[j]
j += 1
else:
#Api call to add songs to playlist
response = requests.request('post', "https://api.spotify.com/v1/playlists/"+ playlistid +"/tracks?uris=" + songlist, headers= {'Authorization' : auth})
print(str(len(unsuccesful)) + " Unsuccesful tracks: "+ str(unsuccesful))
print(str(len(untagged)) + " Untagged tracks: "+ str(untagged))
if len(unsuccesful) > 0 or len(untagged) > 0 :
retry_check = re.compile("^(y|Y)(es|ES)?")
retry_attempt = input("Would you like to attempt to add individual failed tracks?: ")
if retry_check.match(retry_attempt) != None:
reattempt(playlistid, auth, unsuccesful)
reattempt(playlistid, auth, untagged)
#Recursive calls to search subdirectories
def getTracks(directoryencode,store):
for entry in os.scandir(directoryencode):
filename = os.fsdecode(entry)
if entry.is_file() and filename.endswith(".mp3"):
store.append(filename)
elif entry.is_dir():
getTracks(entry.path,store)
#Create temp server to handle URI redirect and retrive Auth Code
def wait_for_request(server_class=http.server.HTTPServer,
handler_class=http.server.BaseHTTPRequestHandler):
server_address = ('', 8000)
httpd = server_class(server_address, handler_class)
httpd.handle_request()
def authenticate():
old_stderr = sys.stderr
result = StringIO()
sys.stderr = result
url = 'https://accounts.spotify.com/en/authorize?response_type=code&client_id=128418d86c274651af8cdc709df1c143&redirect_uri=http://localhost:8000&scope=user-read-private%20user-read-email%20playlist-modify-private%20playlist-modify-public%20playlist-read-private'
webbrowser.open(url)
wait_for_request()
sys.stderr = old_stderr
return result
def reattempt(playlistid, auth, retry = [], *args):
i = 0
while i < len(retry):
retry[i] = re.sub('\(.+\)$', '', retry[i])
retry[i] = re.sub('^.+ -', '', retry[i])
retry[i] = os.path.splitext(retry[i])[0]
response = requests.get("https://api.spotify.com/v1/search?q=track:"+ retry[i] +"&type=track&limit=3", headers = {'Authorization' : auth})
dictionary = response.json()
if len(dictionary['tracks']['items']) != 0 :
z = 0
print(retry[i])
while z < len(dictionary['tracks']['items']) :
print(str(int(z) + 1) + ". " + dictionary['tracks']['items'][z]['name'] + ' - ' + dictionary['tracks']['items'][z]['artists'][0]['name'] + "\n")
z += 1
choice = input('Add which song - 0 for none: ')
while choice > z :
choice = input('Add which song - 0 for none: ')
if choice != 0 :
track = re.sub('\:', '%3A', dictionary['tracks']['items'][int(choice) - 1]['uri'])
response = requests.request('post', "https://api.spotify.com/v1/playlists/"+ playlistid +"/tracks?uris=" + track, headers= {'Authorization' : auth})
i += 1
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
if __name__ == "__main__":
main()
input()
|
StarcoderdataPython
|
9754739
|
import matplotlib.pyplot as plt
import numpy as np
with open('/home/ganesh/Desktop/class_acc.txt','r') as myfile:
data = myfile.read()
actor_list=list()
accuracy_list=list()
x = data.strip().split('\n')
for i in range(len(x)):
y=x[i].split(',')
print(y)
actor_list.append(y[0])
accuracy_list.append(y[1])
myfile.close()
plt.scatter(np.arange(0,len(accuracy_list)),accuracy_list)
|
StarcoderdataPython
|
8034953
|
<reponame>andela/ah-django-unchained<filename>authors/apps/usernotifications/views.py<gh_stars>0
import jwt
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework import status
from rest_framework.generics import (
ListAPIView,
UpdateAPIView
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from .serializers import NotificationSerializer, UnsubscribeSerializer
from authors.apps.authentication.models import User
class NotificationAPIView(ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = NotificationSerializer
def get(self, request, *args, **kwargs):
notifications = self.notifications(request)
serializer = self.serializer_class(
notifications, many=True, context={'request': request}
)
return Response(
{"count": notifications.count(), "notifications": serializer.data}
)
def notifications(self, request):
# this method will be overridden by the following methods
pass
class AllNotificationsAPIView(NotificationAPIView):
"""
list all the notifications for this user
"""
def notifications(self, request):
request.user.notifications.mark_as_sent()
return request.user.notifications.active()
class UnsubscribeAPIView(ListAPIView, UpdateAPIView):
"""
class for allowing users to unsubscribe from notifications
"""
permission_classes = [AllowAny]
serializer_class = UnsubscribeSerializer
def get(self, request, token):
"""
unsubscribe from email notifications
"""
try:
email = jwt.decode(token, settings.SECRET_KEY)['email']
user = User.objects.get(email=email)
except(TypeError, ValueError, OverflowError, Exception):
raise Http404
user.email_notification_subscription = False
user.save()
message = {
"message": "You have successfully unsubscribed from notifications",
"email": user.email_notification_subscription,
"app": user.app_notification_subscription
}
return Response(message, status=status.HTTP_200_OK)
def put(self, request):
"""
unsubscribe from app notifications
"""
self.permission_classes.append(IsAuthenticated,)
user = get_object_or_404(User, email=request.user.email)
user.app_notification_subscription = False
message = {
"message": "You have successfully unsubscribed from app notifications",
"email": user.email_notification_subscription,
"app": user.app_notification_subscription
}
user.save()
return Response(message, status=status.HTTP_200_OK)
class SubscribeAPIView(UpdateAPIView):
"""
allow users to subscribe to notifications
"""
permission_classes = [IsAuthenticated]
def post(self, request):
"""
subscribe to all notifications
"""
user = get_object_or_404(User, email=request.user.email)
user.email_notification_subscription = True
user.app_notification_subscription = True
user.save()
message = {
"message": "You have successfully subscribed to notifications",
"email": user.email_notification_subscription,
"app": user.app_notification_subscription
}
return Response(message, status=status.HTTP_200_OK)
|
StarcoderdataPython
|
3544386
|
"""Setup the package."""
# Parse requirements
# ------------------
import pkg_resources
import pathlib
def parse_requirements(path: str) -> 'list[str]':
with pathlib.Path(path).open() as requirements:
return [str(req) for req in pkg_resources.parse_requirements(requirements)]
# Setup package
# -------------
from setuptools import setup
DRIVERS = ['asyncpg', 'aiopg', 'aiomysql', 'aiosqlite', 'triopg', 'trio_mysql']
setup(
install_requires=parse_requirements('requirements/requirements.txt'),
extras_require=dict(
{driver: [f"aio-databases[{driver}]"] for driver in DRIVERS},
tests=parse_requirements('requirements/requirements-tests.txt'),
)
)
# pylama:ignore=E402,D
|
StarcoderdataPython
|
1664194
|
<reponame>RivtLib/replit01
import operator
from typing import Union
from .exceptions import InvalidVersion
from .legacy_version import LegacyVersion
from .version import Version
OP_EQ = operator.eq
OP_LT = operator.lt
OP_LE = operator.le
OP_GT = operator.gt
OP_GE = operator.ge
OP_NE = operator.ne
_trans_op = {
"=": OP_EQ,
"==": OP_EQ,
"<": OP_LT,
"<=": OP_LE,
">": OP_GT,
">=": OP_GE,
"!=": OP_NE,
}
def parse(
version, # type: str
strict=False, # type: bool
): # type:(...) -> Union[Version, LegacyVersion]
"""
Parse the given version string and return either a :class:`Version` object
or a LegacyVersion object depending on if the given version is
a valid PEP 440 version or a legacy version.
If strict=True only PEP 440 versions will be accepted.
"""
try:
return Version(version)
except InvalidVersion:
if strict:
raise
return LegacyVersion(version)
|
StarcoderdataPython
|
270236
|
<reponame>bg459/gan-ensembling-loader<filename>data/data_cars.py
import torch
import numpy as np
import os
from data.image_dataset import ImageDataset
from torch.utils.data import Subset
from torchvision import transforms
from PIL import Image
import random
import math
from mat4py import loadmat
from collections import defaultdict
from .transforms import randomcrop_tensor, centercrop_tensor
###### utility functions ######
anno_path = 'dataset/cars/devkit/cars_meta.mat'
data = loadmat(anno_path)
class_names = data['class_names']
anno_path = 'dataset/cars/devkit/cars_train_annos.mat'
data = loadmat(anno_path)
class_labels = data['annotations']['class']
file_names = data['annotations']['fname']
bbox_annotations = (data['annotations']['bbox_x1'],
data['annotations']['bbox_y1'],
data['annotations']['bbox_x2'],
data['annotations']['bbox_y2'])
# for each class, which images are a member of it
indices_per_class = defaultdict(list)
for i in range(len(class_labels)):
indices_per_class[class_labels[i]].append(i)
train_indices = {k: v[:len(v)//2] for k, v in indices_per_class.items()}
val_indices = {k: v[len(v)//2:-len(v)//4] for k, v in indices_per_class.items()}
test_indices = {k: v[-len(v)//4:] for k, v in indices_per_class.items()}
car_types = ['-'] + [x.split(' ')[-2] for x in class_names] # - for 1-indexing
def get_partition_indices(part, valid_classes):
fine_to_coarse_label_map = {i: valid_classes.index(x) for
i, x in enumerate(car_types) if x in
valid_classes}
partition_indices = []
fine_labels = []
if part == 'train':
[partition_indices.extend(train_indices[k]) for k in fine_to_coarse_label_map]
[fine_labels.extend([k]*len(train_indices[k])) for k in fine_to_coarse_label_map]
if part == 'val':
[partition_indices.extend(val_indices[k]) for k in fine_to_coarse_label_map]
[fine_labels.extend([k]*len(val_indices[k])) for k in fine_to_coarse_label_map]
if part == 'test':
[partition_indices.extend(test_indices[k]) for k in fine_to_coarse_label_map]
[fine_labels.extend([k]*len(test_indices[k])) for k in fine_to_coarse_label_map]
return partition_indices, fine_labels, fine_to_coarse_label_map
###### dataset functions ######
class CarsDataset:
def __init__(self, partition, classes='threecars', load_w=True, **kwargs):
root = 'dataset/cars/images'
self.load_w = load_w
self.dset = ImageDataset(root, return_path=True, **kwargs)
if classes == 'sixcars':
valid_classes = ['SUV', 'Sedan', 'Hatchback', 'Convertible', 'Coupe', 'Cab']
elif classes == 'threecars':
valid_classes = ['SUV', 'Sedan', 'Cab']
elif classes == 'suvsedan':
valid_classes = ['SUV', 'Sedan']
else:
valid_classes = None
print("The valid classes are: %s" % valid_classes)
partition_idx, fine_labels, fine_to_coarse_label_map = get_partition_indices(partition, valid_classes)
self.dset = Subset(self.dset, partition_idx)
self.fine_to_coarse_label_map = fine_to_coarse_label_map
self.partition_idx = partition_idx
self.fine_labels = fine_labels
self.coarse_labels = valid_classes
def __len__(self):
return len(self.dset)
def __getitem__(self, idx):
data = self.dset[idx]
fine_label = self.fine_labels[idx]
img_filename = os.path.splitext(os.path.basename(data[2]))[0]
coarse_label = self.fine_to_coarse_label_map[fine_label]
remapped_index = self.dset.indices[idx] # remap since it is subset
assert(os.path.splitext(file_names[remapped_index])[0] == img_filename)
assert(class_labels[remapped_index] == fine_label)
# get bounding box
bbox = (bbox_annotations[0][remapped_index],
bbox_annotations[1][remapped_index],
bbox_annotations[2][remapped_index],
bbox_annotations[3][remapped_index])
if self.load_w:
path = data[2]
w_path = path.replace('images/images', 'latents').replace('jpg', 'pth')
# remove batch dimension
w_pth = torch.load(w_path, map_location='cpu')['w'][0].detach()
return (data[0], w_pth, coarse_label, bbox, *data[2:])
return (data[0], coarse_label, bbox, *data[2:])
###### transformation functions #####
def get_transform(dataset, transform_type):
assert(dataset.lower() == 'car')
if transform_type == 'imtrain':
return transforms.Compose([
transforms.RandomResizedCrop(256, scale=(0.8, 1.0),
interpolation=Image.ANTIALIAS),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'imval':
return transforms.Compose([
transforms.Resize(256, Image.ANTIALIAS),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'im2tensor':
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'imcrop':
return transforms.Compose([
transforms.Resize(256, Image.ANTIALIAS),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
elif transform_type == 'tensorbase':
return TensorBaseTransform()
elif transform_type == 'tensormixed':
return TensorCombinedTransform()
elif transform_type == 'tensormixedtrain':
return TensorCombinedTransformTrain()
else:
raise NotImplementedError
class TensorBaseTransform(object):
def __init__(self):
self.load_size = (256, 342) # 1.5x = (384, 512) gan output size
self.crop_size = 256
self.resize = torch.nn.Upsample(self.load_size, mode='bilinear')
def __call__(self, image):
image = image[:, :, 64:-64, :] # crop off the black padding
image = self.resize(image)
image = centercrop_tensor(image, self.crop_size, self.crop_size)
return image
# mimics RandomCrop and RandomHorizontalFlip on Tensor inputs
class TensorCombinedTransform(object):
def __init__(self):
self.load_size = (256, 342) # 1.5x (512, 384)
self.crop_size = 256
self.resize = torch.nn.Upsample(self.load_size, mode='bilinear')
def __call__(self, image):
image = image[:, :, 64:-64, :] # crop off the black padding
# resize 512x384 --> (342x256) (about 1.5x scale)
image = self.resize(image)
# random crop at 256 (for each image individually)
image = torch.cat([randomcrop_tensor(im[None], self.crop_size)
for im in image])
# random horizontal flip
image = torch.stack([torch.flip(x, dims=(-1,)) if torch.rand(1) >
0.5 else x for x in image])
return image
# mimics RandomResizeCrop and RandomHorizontalFlip on Tensor inputs
class TensorCombinedTransformTrain(object):
def __init__(self):
# performs a tensor random-resize crop
self.crop_size = 256
self.resize = torch.nn.Upsample((self.crop_size, self.crop_size), mode='bilinear')
self.scale = (0.8, 1.0)
self.ratio = (3. / 4., 4. / 3.)
self.flip = True
def get_image_size(self, img):
assert(isinstance(img, torch.Tensor) and img.dim() > 2)
return img.shape[-2:][::-1]
def get_params(self, img):
scale = self.scale
ratio = self.ratio
width, height = self.get_image_size(img)
area = height * width
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if (in_ratio < min(ratio)):
w = width
h = int(round(w / min(ratio)))
elif (in_ratio > max(ratio)):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def __call__(self, image):
image = image[:, :, 64:-64, :] # crop off the black badding
# random resized crop, for each image independently
image_list = []
for im in image:
im = im[None]
i, j, h, w = self.get_params(image)
im = im[:, :, i:i+h, j:j+w] # random resize crop
im = self.resize(im) # resize to output size
image_list.append(im)
image = torch.cat(image_list, dim=0)
# horizontal flip for each image independently
image = torch.stack([torch.flip(x, dims=(-1,)) if torch.rand(1) >
0.5 else x for x in image])
return image
|
StarcoderdataPython
|
1941564
|
<reponame>gdialektakis/Statistical-Dialogue-Systems-with-Adversarial-AutoEncoders<filename>autoencoder/adversarial_autoencoder/lib/precision.py
_author__ = """<NAME> (<EMAIL>)"""
# Copyright (C) 2016 by
# <NAME> <<EMAIL>>
# All rights reserved.
# Computer Science Department, University of Crete.
import tensorflow as tf
_FLOATX = tf.float32
|
StarcoderdataPython
|
360926
|
from flask import (
Blueprint,
Response,
render_template,
current_app,
request,
redirect,
jsonify
)
import requests
frontend = Blueprint('frontend', __name__, template_folder='templates')
headers = {'Content-type': 'application/json'}
@frontend.route('/')
def index():
country_register = current_app.config['COUNTRY_REGISTER']
return render_template('index.html', country_register=country_register)
@frontend.route('/country', methods=['POST'])
def country():
country = request.form.getlist('country')[0]
current_app.logger.info(country)
country_register = current_app.config['COUNTRY_REGISTER']
url = "%s/country/%s" % (country_register, country)
return redirect(url)
@frontend.route('/countries.json')
def countries():
country_register = current_app.config['COUNTRY_REGISTER']
url = "%s/records.json?page-size=300" % country_register
resp = requests.get(url, headers=headers)
countriesJson = resp.json()
countries = []
for key in countriesJson:
countries.append(countriesJson[key])
countries = sorted(countries, key=lambda country: country['name'])
return jsonify({'entries': countries})
|
StarcoderdataPython
|
6582067
|
<reponame>brentp/bcbio-nextgen<filename>bcbio/provenance/versioncheck.py<gh_stars>1-10
"""Check specific required program versions required during the pipeline.
"""
import subprocess
from bcbio.pipeline import config_utils
from bcbio.log import logger
def samtools(config):
"""Ensure samtools has parallel processing required for piped analysis.
"""
samtools = config_utils.get_program("samtools", config)
p = subprocess.Popen([samtools, "sort"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = p.communicate()
p.stdout.close()
if output.find("-@") == -1:
return ("Installed version of samtools sort does not have support for multithreading (-@ option) "
"required to support bwa piped alignment and BAM merging. "
"Please upgrade to the latest version "
"from http://samtools.sourceforge.net/")
def testall(items):
logger.info("Testing minimum versions of installed programs")
items = [x[0] for x in items]
config = items[0]["config"]
msgs = []
for fn in [samtools]:
out = fn(config)
if out:
msgs.append(out)
if msgs:
raise OSError("Program problems found. You can upgrade dependencies with:\n" +
"bcbio_nextgen.py upgrade -u skip --tooldir=/usr/local\n\n" +
"\n".join(msgs))
|
StarcoderdataPython
|
3524407
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField
from wtforms.validators import DataRequired
class LcdForm(FlaskForm):
lcd_text = StringField('LcdText', validators=[DataRequired()])
submit = SubmitField('Display')
class LcdRowForm(FlaskForm):
lcd_text = StringField('LcdText', validators=[DataRequired()])
lcd_row = IntegerField('LcdRow', validators=[DataRequired()])
submit = SubmitField('Display')
class GenericCPIOForm(FlaskForm):
pin = IntegerField('pin', validators=[DataRequired()])
value = StringField('value', validators=[DataRequired()])
class LedForm(FlaskForm):
pin = IntegerField('PIN', validators=[DataRequired()])
repetitions = IntegerField('Repetitions', validators=[DataRequired()])
submit = SubmitField('Blink!')
|
StarcoderdataPython
|
1743805
|
<gh_stars>0
import serial
import logging
port = "/dev/ttyUSB0"
baud = 115200
#log = logging.getLogger("serialM4")
class serialM4():
def __init__(self):
#log.info("SerialM4 constructor called")
self.com = serial.Serial(port, 115200)
def run(self):
#log.info("Receive thread started")
while True:
self.state = self.com.read(1)
print(self.state)
def cmd(self, char):
#log.info("Serial command: '" + char + "'")
self.com.write(str.encode(char))
def open(self):
self.cmd("o")
def close(self):
self.cmd("c")
def reagent(self):
self.cmd("r")
def waste(self):
self.cmd("w")
def stop(self):
self.cmd("s")
def inPos(self):
self.cmd("Q")
def inNeg(self):
self.cmd("A")
def outPos(self):
self.cmd("X")
def outNeg(self):
self.cmd("Y")
|
StarcoderdataPython
|
1652072
|
<filename>stack_overseer/question_monitor/config/api_config.example.py
# Remove .example, change to api_config.py
API_KEY = "GET API KEY HERE https://stackapps.com/apps/oauth/register" # for stackExchange API
GEO_CODER_API = "Whatever service endpoint APIKEY!" # GEOCODER
GEO_CODER_API_ENDPOINT = "A API Endpoint Url"
|
StarcoderdataPython
|
5061396
|
# -*- coding: utf-8 -*-
"""insult_train.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11OTc2Q2mXQ1a3O0vljL4FhhZhqJqQzT_
"""
from google.colab import drive
drive.mount('/content/drive')
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.models import Model
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.layers import Input
from keras.layers.merge import Concatenate
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
toxic_comments = pd.read_csv("/content/drive/My Drive/work/Keyword_Suggestion/Training/Datasets/hatekeyword.csv")
toxic_comments.head()
filter = toxic_comments["comment_text"] != ""
toxic_comments = toxic_comments[filter]
toxic_comments = toxic_comments.dropna()
toxic_comments_labels = toxic_comments[["toxic", "severe_toxic", "obscene","insult"]]
toxic_comments_labels.head()
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
toxic_comments_labels.sum(axis=0).plot.bar()
def preprocess_text(sen):
# Remove punctuations and numbers
sentence = re.sub('[^a-zA-Z]', ' ', sen)
# Single character removal
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
X = []
sentences = list(toxic_comments["comment_text"])
for sen in sentences:
X.append(preprocess_text(sen))
y = toxic_comments_labels.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
maxlen = 200
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
from numpy import array
from numpy import asarray
from numpy import zeros
embeddings_dictionary = dict()
glove_file = open('/content/drive/My Drive/work/Emotion Detection/glove.6B.100d.txt', encoding="utf8")
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
embedding_matrix = zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
deep_inputs = Input(shape=(maxlen,))
embedding_layer = Embedding(vocab_size, 100, weights=[embedding_matrix], trainable=False)(deep_inputs)
LSTM_Layer_1 = LSTM(128)(embedding_layer)
dense_layer_1 = Dense(4, activation='sigmoid')(LSTM_Layer_1)
model = Model(inputs=deep_inputs, outputs=dense_layer_1)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print(model.summary())
from keras.utils import plot_model
plot_model(model, to_file='model_plot4a.png', show_shapes=True, show_layer_names=True)
history = model.fit(X_train, y_train, batch_size=128, epochs=5, verbose=1, validation_split=0.2)
score = model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
predictions = model.predict(X_test, verbose=1)
print(X_test)
from keras.models import load_model
model.save('insult_model.h5')
import pickle
# saving
with open('insult_tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
# loading
with open('insult_tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
model1 = load_model('insult_model.h5')
score = model1.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
|
StarcoderdataPython
|
9609614
|
from flask import Flask, url_for, render_template, redirect, jsonify, json, request
from requests import get
import urllib.request, urllib.error, urllib.parse, json, webbrowser, requests
# reference: https://github.com/pacman2020/Pokemon-flask-API
def pretty(obj):
return json.dumps(obj, sort_keys=True, indent=2)
#
def safe_get(url):
r = requests.get(url)
if r.status_code == 200:
return requests.get(url)
else:
print("Failed to fulfill the request." )
print("Error code: ", r.status_code)
#### Main Assignment ##############
#
def pokeAllREST(baseurl = 'https://pokeapi.co/api/v2/pokemon',
params={},
printurl = False
):
url = baseurl + "?" + urllib.parse.urlencode(params)
if printurl:
print(url)
return safe_get(url)
def pokeNameREST(baseurl = 'https://pokeapi.co/api/v2/pokemon/',
id = 1,
printurl = False
):
url = baseurl + str(id)
if printurl:
print(url)
return safe_get(url)
def get_poke_names(n=1000): # n is limit
results = pokeAllREST(params={"limit": n})
if results is None:
return None
else:
all_poke = results.json()
# print(pretty(all_poke))
return all_poke
def get_poke_info(poke_id):
result = pokeNameREST(id = poke_id)
if result is None:
return None
else:
this_poke = result.json()
# print(pretty(this_poke))
return this_poke
app = Flask(__name__)
@app.route('/')
def home():
limit = 36
offset = request.args.get('offset')
search = request.args.get('search')
if search:
pokemonsJson = get('https://pokeapi.co/api/v2/pokemon/'+search.lower())
pokemon = json.loads(pokemonsJson.content)
for i in pokemon['stats']:
stat = i['base_stat']
if i['stat']['name'] == "hp":
hp = stat
if i['stat']['name'] == "attack":
attack = stat
if i['stat']['name'] == "defense":
defense = stat
if i['stat']['name'] == "speed":
speed = stat
pokemons = []
pokemons.append(
{
'id': pokemon['id'],
'name': pokemon['name'],
'type': pokemon['types'][0]['type']['name'],
'experience': pokemon['base_experience'],
'hp': hp,
'attack': attack,
'defense': defense,
'speed': speed
}
)
return render_template('home.html', pokemons=pokemons)
url = 'https://pokeapi.co/api/v2/pokemon?limit={}&offset={}'.format(limit, offset)
pokemonsJson = get(url)
all_pokemons = json.loads(pokemonsJson.content)
pokemons = []
id = 0
for pokemon in all_pokemons['results']:
# extracting pokemon id by URL
id_p = str(pokemon['url'])
id = id_p.split('/')[-2]
pokemon = get_poke_info(id)
for i in pokemon['stats']:
stat = i['base_stat']
if i['stat']['name'] == "hp":
hp = stat
if i['stat']['name'] == "attack":
attack = stat
if i['stat']['name'] == "defense":
defense = stat
if i['stat']['name'] == "speed":
speed = stat
pokemons.append(
{
'id': id,
'name': pokemon['name'],
'type': pokemon['types'][0]['type']['name'],
'experience': pokemon['base_experience'],
'hp': hp,
'attack': attack,
'defense': defense,
'speed': speed
}
)
return render_template('home.html', pokemons=pokemons)
@app.route('/<name>')
def detail(name):
pokemonsJson = get('https://pokeapi.co/api/v2/pokemon/'+name)
pokemon_data = json.loads(pokemonsJson.content)
for i in pokemon_data['stats']:
stat = i['base_stat']
if i['stat']['name'] == "hp":
hp = stat
if i['stat']['name'] == "attack":
attack = stat
if i['stat']['name'] == "defense":
defense = stat
if i['stat']['name'] == "speed":
speed = stat
pokemon ={
'id': pokemon_data['id'],
'name': pokemon_data['name'],
'type': pokemon_data['types'][0]['type']['name'],
'experience': pokemon_data['base_experience'],
'hp': hp,
'attack': attack,
'defense': defense,
'speed': speed
}
return render_template('detail.html', pokemon=pokemon)
if __name__ == '__main__':
app.run(debug=True)
|
StarcoderdataPython
|
4930627
|
import logging
import tempfile
import os
import torch
from collections import OrderedDict
from tqdm import tqdm
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from pycocotools.cocoeval import COCOeval, Params
def do_coco_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
if "segm" in iou_types:
logger.info("Preparing segm results")
coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
if 'keypoints' in iou_types:
logger.info('Preparing keypoints results')
coco_results['keypoints'] = prepare_for_coco_keypoint(predictions, dataset)
if 'depth' in iou_types:
logger.info('Preparing depth results')
coco_results['depth'] = prepare_for_coco_regression(predictions, dataset)
results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_coco(
dataset.coco, coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "coco_results.pth"))
return results, coco_results
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
# if hasattr(dataset, "class_filter_list") and mapped_labels[k] in dataset.class_filter_list
]
)
return coco_results
def prepare_for_coco_segmentation(predictions, dataset):
import pycocotools.mask as mask_util
import numpy as np
masker = Masker(threshold=0.5, padding=1)
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
masks = prediction.get_field("mask")
# t = time.time()
# Masker is necessary only if masks haven't been already resized.
if list(masks.shape[-2:]) != [image_height, image_width]:
masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
masks = masks[0]
# logger.info('Time mask: {}'.format(time.time() - t))
# prediction = prediction.convert('xywh')
# boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# rles = prediction.get_field('mask')
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction.bbox) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]['width']
image_height = dataset.coco.imgs[original_id]['height']
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert('xywh')
boxes = prediction.bbox.tolist()
scores = prediction.get_field('scores').tolist()
labels = prediction.get_field('labels').tolist()
keypoints = prediction.get_field('keypoints')
keypoints = keypoints.resize((image_width, image_height))
keypoints = keypoints.keypoints.view(keypoints.keypoints.shape[0], -1).tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend([{
'image_id': original_id,
'category_id': mapped_labels[k],
'keypoints': keypoint,
'score': scores[k]} for k, keypoint in enumerate(keypoints)])
return coco_results
def prepare_for_coco_regression(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction.bbox) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]['width']
image_height = dataset.coco.imgs[original_id]['height']
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert('xywh')
boxes = prediction.bbox.tolist()
scores = prediction.get_field('scores').tolist()
labels = prediction.get_field('labels').tolist()
regs = prediction.get_field('depths').tolist() # .convert("depth")
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend([{
'image_id': original_id,
'category_id': mapped_labels[k],
'bbox': boxes[k],
'depth': reg,
'score': scores[k]} for k, reg in enumerate(regs)])
return coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# coco_dt = coco_gt.loadRes(coco_results)
coco_dt = coco_gt.loadRes(str(json_result_file)) if coco_results else COCO()
if iou_type == "depth":
coco_eval = COCORegeval(coco_gt, coco_dt, iou_type)
else:
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class COCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
"depth": ["AP", "D>1.25**2", "D>1.25", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints", "depth")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
results = '\n'
for task, metrics in self.results.items():
results += 'Task: {}\n'.format(task)
metric_names = metrics.keys()
metric_vals = ['{:.4f}'.format(v) for v in metrics.values()]
results += (', '.join(metric_names) + '\n')
results += (', '.join(metric_vals) + '\n')
return results
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
import numpy as np
import datetime
import time
from collections import defaultdict
import copy
class RegParams(Params):
def setRegParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
self.scoreThrs = 0.7
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
elif iouType == 'depth':
self.setRegParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
class COCORegeval(COCOeval):
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = RegParams(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def evaluate(self):
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
elif p.iouType == 'depth':
computeIoU = self.computeRegErr
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# print(self.evalImgs)
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeRegErr(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
# dts = [d for d in dts if d['score']>p.scoreThrs]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
# compute oks between each detection and ground truth object
total_error = 0
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['disp_unity'])
# g = np.array(gt['disp_base'])
# g = np.array(gt['depth'])
# g = np.array(gt['height_rw'])
# k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
# depth_g = g/bb[3]
depth_g = g
for i, dt in enumerate(dts):
d = np.array(dt['depth'])
dbb = dt['bbox']
bb_height = dbb[3]
# depth_d = d/bb_height
depth_d = d
# print(depth_d,depth_g)
dx = depth_d - depth_g
# print(xd, bb_height, xg)
# e = np.abs(dx) #/ depth_g
# print(dx*2200)
total_error += np.abs(dx*2200)
ious[i, j] = min(depth_d/depth_g, depth_g/depth_d)# np.exp(-e) #/ e.shape[0]
# print(depth_g, depth_d)
total_error /= len(dts)* len(gts)
# print(total_error)
return ious
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
def _summarizeRegs():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.65)
stats[2] = _summarize(1, maxDets=20, iouThr=.8)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.65)
stats[7] = _summarize(0, maxDets=20, iouThr=.8)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
elif iouType == 'depth':
summarize = _summarizeRegs
self.stats = summarize()
|
StarcoderdataPython
|
68564
|
import datetime
from subprocess import CalledProcessError # nosec
from threading import Thread
from typing import Dict, List, Optional, Set
import boto3
import click
import pytz
import semver
from botocore.config import Config
from botocore.exceptions import ClientError
from colored import attr, fg
from opta.amplitude import amplitude_client
from opta.cleanup_files import cleanup_files
from opta.commands.local_flag import _clean_tf_folder, _handle_local_flag
from opta.constants import DEV_VERSION, TF_PLAN_PATH, UPGRADE_WARNINGS, VERSION
from opta.core.aws import AWS
from opta.core.azure import Azure
from opta.core.cloud_client import CloudClient
from opta.core.gcp import GCP
from opta.core.generator import gen, gen_opta_resource_tags
from opta.core.kubernetes import get_cluster_name, tail_module_log, tail_namespace_events
from opta.core.local import Local
from opta.core.plan_displayer import PlanDisplayer
from opta.core.terraform import Terraform, get_terraform_outputs
from opta.error_constants import USER_ERROR_TF_LOCK
from opta.exceptions import MissingState, UserErrors
from opta.layer import Layer, StructuredConfig
from opta.pre_check import pre_check
from opta.process import ApplyOptions
from opta.process import apply as apply2
from opta.utils import check_opta_file_exists, fmt_msg, logger
from opta.utils.clickoptions import local_option
from opta.utils.features import is_module_api_enabled
@click.command()
@click.option(
"-c", "--config", default="opta.yaml", help="Opta config file", show_default=True
)
@click.option(
"-e",
"--env",
default=None,
help="The env to use when loading the config file",
show_default=True,
)
@click.option(
"--refresh",
is_flag=True,
default=False,
help="Run from first block, regardless of current state",
hidden=True,
)
@click.option(
"--image-tag",
default=None,
type=str,
help="If this handles a service, it's the image tag you wanna deploy",
)
@click.option(
"--test",
is_flag=True,
default=False,
help="Run tf plan, but don't lock state file",
hidden=True,
)
@click.option(
"--auto-approve",
is_flag=True,
default=False,
help="Automatically approve terraform plan.",
)
@click.option(
"--detailed-plan",
is_flag=True,
default=False,
help="Show full terraform plan in detail, not the opta provided summary",
)
@local_option
def apply(
config: str,
env: Optional[str],
refresh: bool,
local: bool,
image_tag: Optional[str],
test: bool,
auto_approve: bool,
detailed_plan: bool,
) -> None:
"""Create or update infrastructure
Apply changes to match the Opta configuration
files in the current directory.
Examples:
opta apply --auto-approve
opta apply -c my-config.yaml --image-tag=v1
"""
config = check_opta_file_exists(config)
_apply(
config,
env,
refresh,
local,
image_tag,
test,
auto_approve,
detailed_plan=detailed_plan,
)
def _apply(
config: str,
env: Optional[str],
refresh: bool,
local: bool,
image_tag: Optional[str],
test: bool,
auto_approve: bool,
image_digest: Optional[str] = None,
stdout_logs: bool = True,
detailed_plan: bool = False,
) -> None:
if is_module_api_enabled():
opts = ApplyOptions(auto_approve=auto_approve, config_path=config)
apply2(opts)
return
pre_check()
_clean_tf_folder()
if local and not test:
config = _local_setup(config, image_tag, refresh_local_env=True)
layer = Layer.load_from_yaml(config, env)
layer.verify_cloud_credentials()
layer.validate_required_path_dependencies()
if Terraform.download_state(layer):
tf_lock_exists, _ = Terraform.tf_lock_details(layer)
if tf_lock_exists:
raise UserErrors(USER_ERROR_TF_LOCK)
_verify_parent_layer(layer, auto_approve)
event_properties: Dict = layer.get_event_properties()
amplitude_client.send_event(
amplitude_client.START_GEN_EVENT, event_properties=event_properties,
)
# We need a region with at least 3 AZs for leader election during failover.
# Also EKS historically had problems with regions that have fewer than 3 AZs.
if layer.cloud == "aws":
providers = layer.gen_providers(0)["provider"]
aws_region = providers["aws"]["region"]
azs = _fetch_availability_zones(aws_region)
if len(azs) < 3:
raise UserErrors(
fmt_msg(
f"""
Opta requires a region with at least *3* availability zones like us-east-1 or us-west-2.
~You configured {aws_region}, which only has the availability zones: {azs}.
~Please choose a different region.
"""
)
)
Terraform.create_state_storage(layer)
gen_opta_resource_tags(layer)
cloud_client: CloudClient
if layer.cloud == "aws":
cloud_client = AWS(layer)
elif layer.cloud == "google":
cloud_client = GCP(layer)
elif layer.cloud == "azurerm":
cloud_client = Azure(layer)
elif layer.cloud == "local":
if local: # boolean passed via cli
pass
cloud_client = Local(layer)
else:
raise Exception(f"Cannot handle upload config for cloud {layer.cloud}")
existing_config: Optional[StructuredConfig] = cloud_client.get_remote_config()
old_semver_string = (
""
if existing_config is None
else existing_config.get("opta_version", "").strip("v")
)
current_semver_string = VERSION.strip("v")
_verify_semver(old_semver_string, current_semver_string, layer, auto_approve)
try:
existing_modules: Set[str] = set()
first_loop = True
for module_idx, current_modules, total_block_count in gen(
layer, existing_config, image_tag, image_digest, test, True, auto_approve
):
if first_loop:
# This is set during the first iteration, since the tf file must exist.
existing_modules = Terraform.get_existing_modules(layer)
first_loop = False
configured_modules = set([x.name for x in current_modules]) - {
"runx"
} # Ignore runx module
is_last_module = module_idx == total_block_count - 1
has_new_modules = not configured_modules.issubset(existing_modules)
if not is_last_module and not has_new_modules and not refresh:
continue
if is_last_module:
untouched_modules = existing_modules - configured_modules
configured_modules = configured_modules.union(untouched_modules)
layer.pre_hook(module_idx)
if layer.cloud == "local":
if is_last_module:
targets = []
else:
targets = list(
map(lambda x: f"-target=module.{x}", sorted(configured_modules))
)
if test:
Terraform.plan("-lock=false", *targets, layer=layer)
print("Plan ran successfully, not applying since this is a test.")
else:
current_properties = event_properties.copy()
current_properties["module_idx"] = module_idx
amplitude_client.send_event(
amplitude_client.APPLY_EVENT, event_properties=current_properties,
)
logger.info("Planning your changes (might take a minute)")
try:
Terraform.plan(
"-lock=false",
"-input=false",
f"-out={TF_PLAN_PATH}",
layer=layer,
*targets,
quiet=True,
)
except CalledProcessError as e:
logger.error(e.stderr or "")
raise e
PlanDisplayer.display(detailed_plan=detailed_plan)
if not auto_approve:
click.confirm(
"The above are the planned changes for your opta run. Do you approve?",
abort=True,
)
logger.info("Applying your changes (might take a minute)")
service_modules = (
layer.get_module_by_type("k8s-service", module_idx)
if layer.cloud == "aws"
else layer.get_module_by_type("gcp-k8s-service", module_idx)
)
if (
len(service_modules) != 0
and get_cluster_name(layer.root()) is not None
and stdout_logs
):
service_module = service_modules[0]
# Tailing logs
logger.info(
f"Identified deployment for kubernetes service module {service_module.name}, tailing logs now."
)
new_thread = Thread(
target=tail_module_log,
args=(
layer,
service_module.name,
10,
datetime.datetime.utcnow().replace(tzinfo=pytz.UTC),
2,
),
daemon=True,
)
# Tailing events
new_thread.start()
new_thread = Thread(
target=tail_namespace_events,
args=(
layer,
datetime.datetime.utcnow().replace(tzinfo=pytz.UTC),
3,
),
daemon=True,
)
new_thread.start()
tf_flags: List[str] = []
if auto_approve:
tf_flags.append("-auto-approve")
try:
Terraform.apply(
layer, *tf_flags, TF_PLAN_PATH, no_init=True, quiet=False
)
except Exception as e:
layer.post_hook(module_idx, e)
raise e
else:
layer.post_hook(module_idx, None)
cloud_client.upload_opta_config()
logger.info("Opta updates complete!")
except Exception as e:
event_properties["success"] = False
event_properties["error_name"] = e.__class__.__name__
raise e
else:
event_properties["success"] = True
finally:
amplitude_client.send_event(
amplitude_client.FINISH_GEN_EVENT, event_properties=event_properties,
)
def _verify_semver(
old_semver_string: str,
current_semver_string: str,
layer: "Layer",
auto_approve: bool = False,
) -> None:
if old_semver_string in [DEV_VERSION, ""] or current_semver_string in [
DEV_VERSION,
"",
]:
return
old_semver = semver.VersionInfo.parse(old_semver_string)
current_semver = semver.VersionInfo.parse(current_semver_string)
if old_semver > current_semver:
raise UserErrors(
f"You're trying to run an older version of opta (last run with version {old_semver}). "
"Please update to the latest version and try again!"
)
present_modules = [k.aliased_type or k.type for k in layer.modules]
current_upgrade_warnings = sorted(
[
(k, v)
for k, v in UPGRADE_WARNINGS.items()
if current_semver >= k[0] > old_semver
and k[1] == layer.cloud
and k[2] in present_modules
],
key=lambda x: semver.VersionInfo.parse(x[0][0]),
)
for current_upgrade_warning in current_upgrade_warnings:
logger.info(
f"{fg('magenta')}WARNING{attr(0)}: Detecting an opta upgrade to or past version {current_upgrade_warning[0]}. "
f"Got the following warning: {current_upgrade_warning[1]}"
)
if not auto_approve and len(current_upgrade_warnings) > 0:
click.confirm(
"Are you ok with the aforementioned warnings and done all precautionary steps you wish to do?",
abort=True,
)
# Fetch the AZs of a region with boto3
def _fetch_availability_zones(aws_region: str) -> List[str]:
client = boto3.client("ec2", config=Config(region_name=aws_region))
azs: List[str] = []
resp = client.describe_availability_zones(
Filters=[{"Name": "zone-type", "Values": ["availability-zone"]}]
)
azs = list(map(lambda az: az["ZoneName"], resp["AvailabilityZones"]))
return azs
# Verify whether the parent layer exists or not
def _verify_parent_layer(layer: Layer, auto_approve: bool = False) -> None:
if layer.parent is None:
return
try:
get_terraform_outputs(layer.parent)
except ClientError as e:
if e.response["Error"]["Code"] == "AccessDenied":
raise UserErrors(
f"We were unable to fetch Environment details for the Env {layer.parent.name}, on your AWS account (opta needs this to store state). "
"Usually, it means that your AWS account has insufficient permissions. "
"Please fix these issues and try again!"
)
except MissingState as e:
if not auto_approve:
click.confirm(
f"Failed to get the Environment state {e.args[0]} "
"Usually, this means that the Environment mentioned in configuration file does not exist. \n"
f"Would you like to create your environment using {layer.parent.path}?",
abort=True,
)
_apply(
layer.parent.path,
env=None,
refresh=False,
local=False,
image_tag=None,
test=False,
auto_approve=False,
)
cleanup_files()
def _local_setup(
config: str, image_tag: Optional[str] = "", refresh_local_env: bool = False
) -> str:
adjusted_config, localopta_envfile = _handle_local_flag(config, False)
if adjusted_config != config: # Only do this for service opta files
config = adjusted_config # Config for service
if refresh_local_env:
_apply(
config=localopta_envfile,
image_tag=image_tag,
auto_approve=True,
local=False,
env="",
refresh=True,
test=False,
detailed_plan=True,
)
_clean_tf_folder()
return config
|
StarcoderdataPython
|
9625243
|
<reponame>Twilighters/test_task_with_email
import logging
from selenium.webdriver.remote.webelement import WebElement
from common.constants import EmailConstants
from locators.email_page_locators import EmailPageLocators
from locators.login_page_locators import LoginPageLocators
from models.auth import AuthData
from pages.base_page import BasePage
logger = logging.getLogger("test-task")
class LoginPage(BasePage):
def click_first_login_button(self):
self.click_element(self.first_login_button())
def click_submit_button(self):
self.click_element(self.submit_button())
def input_email(self, text) -> None:
email_locator = self.find_element(LoginPageLocators.LOGIN_FIELD)
email_locator.send_keys(text)
def input_password(self, text) -> None:
password_locator = self.find_element(LoginPageLocators.PASSWORD_FIELD)
self.fill_element(password_locator, text)
def auth(self, data: AuthData):
logger.info(f'User email is "{data.login}, user password {data.password}"')
self.app.login.click_first_login_button()
self.input_email(data.login)
self.click_submit_button()
self.input_password(data.password)
self.click_submit_button()
def submit_button(self) -> WebElement:
return self.find_element(LoginPageLocators.LOGIN_BUTTON)
def first_login_button(self) -> WebElement:
return self.find_element(LoginPageLocators.FIRST_LOGIN_BUTTON)
def is_auth(self):
element = self.find_element(EmailPageLocators.TO_WRITE_EMAIL_BUTTON).text
return element == EmailConstants.SEND_EMAIL_BUTTON_TEXT
|
StarcoderdataPython
|
114325
|
<gh_stars>0
#!/usr/bin/python
#coding:utf-8
from mylogistic import *
import numpy as np
import sys
import argparse
def MultiClassification(train_file_x, train_file_y, test_file_x, test_file_y):
train_data = np.loadtxt(train_file_x, delimiter = ',', dtype = np.float)
train_label_data = np.loadtxt(train_file_y, delimiter = ',', dtype = np.float)
test_data = np.loadtxt(test_file_x, delimiter = ',', dtype = np.float)
test_label_data = np.loadtxt(test_file_y, delimiter = ',', dtype = np.float)
# train_data = np.column_stack((train_data, label_data))
# np.random.shuffle(train_data)
train_x = train_data
train_y = train_label_data
test_x = test_data
test_y = test_label_data
train_x = Normalizition(train_x)
test_x = Normalizition(test_x)
#number of iter
param = {}
#二分类
iteration = 70
param['objective'] = 'multi'
param['learning_rate'] = 15
param['num_iters'] = iteration
param['num_class'] = 10
param['lam'] = 1
model = MyLRModel(param)
model.Train(train_x, train_y)
#预测(训练数据)
pre_y = model.Predict(train_x)
print '迭代次数:%d' %(iteration)
print '训练集准确度为:%f' %(float(np.sum(train_y == pre_y)) / len(train_y))
pred_testy = model.Predict(test_x)
# print test_y
# print pred_testy
print '测试集准确率为:%f' %(float(np.sum(test_y == pred_testy))/len(test_y))
# for i in range(0, len(train_x)):
# j = np.random.permutation(len(train_x))[0]
# pred = model.Predict(train_x[j,:].reshape(1,train_x.shape[1]))
# print '该图像算法预测的数字是:', pred
# ShowPicture(train_x, j)
if __name__ == '__main__':
#参数读取
# parser = argparse.ArgumentParser(description = '逻辑回归')
# parser.add_argument('--train', help='训练文件')
# parser.add_argument('--label', help='标签文件(可选)')
#
# args = parser.parse_args()
# if args.train == None:
# parser.print_help()
# exit()
train_file_x, train_file_y, test_file_x, test_file_y = sys.argv[1:5]
MultiClassification(train_file_x, train_file_y, test_file_x, test_file_y)
|
StarcoderdataPython
|
12864461
|
import scrapy
from bs4 import BeautifulSoup
from lab3.items import Lab3Item
class QuoteSpider(scrapy.Spider):
name = 'quotes'
start_urls = ['http://quotes.toscrape.com/page/1/']
page_num = 1
# 对爬取到的信息进行解析
def parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
nodes = soup.find_all('div', {'class': 'quote'})
for node in nodes:
text = node.find('span', {'class': 'text'}).text
author = node.find('small', {'class': 'author'}).text
tags = node.find_all('a', {'class': 'tag'})
tags_list = []
for tag in tags:
tags_list.append(tag.text)
# 接下来找作者链接,进去爬取里面的信息
author_link = 'http://quotes.toscrape.com/' + node.find_all('span')[1].a['href']
# 抛给author_parse进行处理
yield response.follow(author_link, self.author_parse)
# print('{0:<4}:{1:<20} said:{2:<20}\n{3}'.format(self.page_num, author, text, tags_list))
item = Lab3Item(author=author, text=text, tags=tags_list)
yield item
print('=' * 80 + 'page:',self.page_num,'saved successfully!' + '=' * 80)
# 下面爬取下一页的链接
try:
self.page_num += 1
url = soup.find('li', {'class': 'next'}).a['href']
if url:
next_link = 'http://quotes.toscrape.com/' + url
yield scrapy.Request(next_link, callback=self.parse)
except Exception:
print('所有页面信息爬取结束!!!')
def author_parse(self, response, **kwargs):
soup = BeautifulSoup(response.body, 'html.parser')
author_name = soup.find_all('div', {'class': 'author-details'})[0].find('h3').text
birthday = soup.find('span').text
bio = soup.find('div', {'class': 'author-description'}).text
# print('{}: {}\n{}\n{}\n'.format(self.page_num, author_name, birthday, bio))
item = Lab3Item(name=author_name, birthday=birthday, bio=bio)
yield item
|
StarcoderdataPython
|
3577858
|
import os
from typing import List, Union, Optional, Any, Tuple
import asyncio
import aiohttp
import json
from configparser import ConfigParser
# import websocket
from slack.web.client import WebClient
from slack.signature.verifier import SignatureVerifier
from slack.web.slack_response import SlackResponse
from slack.errors import SlackApiError
from . import log
from . import control
from .control import CommandContext, MessageContext
from .env import Env
from .config import Config
from . import commands
from . import parser
from .state import State, StateElement
from .utils import get_optional
def assert_future(x: Union[asyncio.Future, SlackResponse]) -> asyncio.Future: # type: ignore
assert isinstance(x, asyncio.Future)
return x
logger = log.getLogger(__name__)
class StateSave(StateElement):
control: "SlackControl"
def __init__(self, control: "SlackControl") -> None:
self.control = control
async def save(self, state: State) -> None:
if state.has_section("slack"):
state["slack"] = {}
st = state["slack"]
st["channel_id"]= get_optional(self.control._channel_id, "")
class SlackControl(control.Control):
_client: WebClient
# _ws: websocket.WebSocketApp
_config: Config
_state: State
_channel_name: str
_channel_id: Optional[str]
_admin_channel_id: str
_api_token: str
_app_token: str
_ws_task: Any # async_io.Task[Any] won't work with Python..
_aiohttp_session: aiohttp.ClientSession
def __init__(self, env: Env) -> None:
super().__init__()
self._config = env.config
self._state = env.state
api_token = os.environ.get("SLACK_API_TOKEN")
if api_token is None:
api_token = self._config.get("slack", "slack_api_secret_id")
app_token = os.environ.get("SLACK_APP_TOKEN")
if app_token is None:
app_token = self._config.get("slack", "slack_app_secret_id")
admin_channel_id = os.environ.get("SLACK_ADMIN_CHANNEL_ID")
if admin_channel_id is None:
admin_channel_id = self._config.get("slack", "slack_admin_channel_id")
self._api_token = api_token
self._app_token = app_token
self._admin_channel_id = admin_channel_id
channel_name = self._config.get("slack", "channel", empty_is_none=True)
if channel_name[0] != "#":
raise control.ConfigError("Expected channel name to start with #")
self._channel_name = channel_name
self._channel_id = self._state.get("slack", "channel_id", fallback=None)
self._client = WebClient(token=api_token, run_async=True)
self._aiohttp_session = aiohttp.ClientSession()
async def setup(self) -> None:
if self._channel_id is None:
result = await assert_future(self._client.api_call(
api_method="users.conversations",
json={}
))
if result["ok"]:
logger.debug(f"result: {result}")
ids = [channel["id"] for channel in result["channels"] if f"#{channel['name']}" == self._channel_name]
if ids:
self._channel_id = ids[0]
await self._state.save()
else:
raise control.ConfigError(f"Could not find channel {self._channel_name} from the list of joined conversations")
# logger.info("Post message")
# await assert_future(self._client.api_call(
# api_method="chat.postMessage",
# json={"channel": self._channel_id,
# "text": "hello world"}
# ))
self._ws_task = asyncio.create_task(self._ws_handler())
async def run(self) -> None:
pass
async def _ws_handler(self) -> None:
num_retries = 0
def sleep_time() -> float:
return min(120, pow(1.15, num_retries) * 10)
try:
while True:
ws_url: Optional[str] = None
async with self._aiohttp_session.post("https://slack.com/api/apps.connections.open",
headers={"Authorization": f"Bearer {self._app_token}",
"Content-type": "application/x-www-form-urlencoded"}) as response:
if response.status == 200:
data = json.loads(await response.text())
if bool(data.get("ok")):
ws_url = data["url"]
if ws_url is None:
logger.error(f"Failed to acquire web socket URL; sleeping {sleep_time()} seconds and trying again")
await asyncio.sleep(sleep_time())
num_retries += 1
else:
got_messages = False
async with aiohttp.ClientSession().ws_connect(ws_url) as session:
logger.debug(f"Established websocket connection, waiting first message..")
async for message in session:
if not got_messages:
logger.info(f"Established websocket connection successfully")
if not message.data:
logger.error("Message did not contain data")
break
got_messages = True
json_message = json.loads(message.data)
logger.info(f"json_message: {json_message}")
try:
envelope_id = json_message.get("envelope_id")
except Exception as exn:
logger.error(f"exception1: {exn}")
raise exn
# ack first, handle later, so we don't end up reprocessing crashing commands..
if envelope_id is not None:
ack = {"envelope_id": envelope_id}
logger.debug(f"acking with {ack}")
await session.send_json(ack)
logger.debug(f"acked")
# Filter through bot messages and set admin rights
text = json_message.get("payload", {}).get("event", {}).get("text", None)
bot = json_message.get("payload", {}).get("event", {}).get("bot_id", None)
if text is not None and bot is None:
admin_room = json_message.get("payload", {}).get("event", {}).get("channel", None) == self._admin_channel_id
command_context = CommandContext(admin_room=admin_room,
control=self)
await self.process_message(command_context, text)
if bot is not None:
logger.debug(f"Not processing bot messages as commands")
if got_messages:
logger.error(f"Web socket session terminated: sleeping 10 seconds and reconnecting")
await asyncio.sleep(10)
num_retries = 0
else:
logger.error(f"Web socket session terminated without receiving any data: sleeping {sleep_time()} seconds and reconnecting")
await asyncio.sleep(sleep_time())
num_retries += 1
except Exception as exn:
logger.error(f"exception: {exn}")
raise exn
async def _command_ping(self, context: CommandContext, valid: Tuple[()]) -> None:
await self.send_message(context.to_message_context(), "pong")
async def send_message(self,
message_context: control.MessageContext,
message: str) -> None:
assert len(message) == 0 or message[0] != "!"
try:
response = await assert_future(self._client.api_call(
api_method="chat.postMessage",
json={"channel": self._channel_id,
"text": message}
))
except SlackApiError as exn:
assert exn.response["ok"] is False
error = exn.response["error"] # str like 'invalid_auth', 'channel_not_found'
raise control.MessageSendError(error) from exn
pass
|
StarcoderdataPython
|
6625967
|
"""
Functions for basic reading and writing of PENMAN graphs.
"""
from typing import Union, Iterable, List
from pathlib import Path
from penman.codec import PENMANCodec
from penman.model import Model
from penman.graph import Graph
from penman.types import (Variable, file_or_filename)
def decode(s: str,
model: Model = None) -> Graph:
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
model: the model used for interpreting the graph
Returns:
the Graph object described by *s*
Example:
>>> from penman.interface import decode
>>> decode('(b / bark-01 :ARG0 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = PENMANCodec(model=model)
return codec.decode(s)
def encode(g: Graph,
top: Variable = None,
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> str:
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: if given, the node to use as the top in serialization
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> from penman.interface import encode
>>> from penman.graph import Graph
>>> encode(Graph([('h', 'instance', 'hi')]))
'(h / hi)'
"""
codec = PENMANCodec(model=model)
return codec.encode(g,
top=top,
indent=indent,
compact=compact)
def load(source: file_or_filename,
model: Model = None) -> List[Graph]:
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
model: the model used for interpreting the graph
Returns:
a list of Graph objects
"""
codec = PENMANCodec(model=model)
if isinstance(source, (str, Path)):
with open(source) as fh:
return list(codec.iterdecode(fh))
else:
assert hasattr(source, 'read')
return list(codec.iterdecode(source))
def loads(string: str,
model: Model = None) -> List[Graph]:
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
model: the model used for interpreting the graph
Returns:
a list of Graph objects
"""
codec = PENMANCodec(model=model)
return list(codec.iterdecode(string))
def dump(graphs: Iterable[Graph],
file: file_or_filename,
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> None:
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
"""
codec = PENMANCodec(model=model)
if isinstance(file, (str, Path)):
with open(file, 'w') as fh:
_dump(fh, graphs, codec, indent, compact)
else:
assert hasattr(file, 'write')
_dump(file, graphs, codec, indent, compact)
def _dump(fh, gs, codec, indent, compact):
"""Helper method for dump() for incremental printing."""
ss = (codec.encode(g, indent=indent, compact=compact)
for g in gs)
try:
print(next(ss), file=fh)
except StopIteration:
return
for s in ss:
print(file=fh)
print(s, file=fh)
def dumps(graphs: Iterable[Graph],
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> str:
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
Returns:
the string of serialized graphs
"""
codec = PENMANCodec(model=model)
strings = [codec.encode(g, indent=indent, compact=compact)
for g in graphs]
return '\n\n'.join(strings)
|
StarcoderdataPython
|
9727396
|
<reponame>denisri/soma-workflow
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import six
import os
import inspect
import importlib
class Scheduler(object):
'''
Allow to submit, kill and get the status of jobs.
The Scheduler class is an abstract class which specifies the jobs
management API. It has several implementations, located in
``soma_workflow.schedulers.*_scheduler``.
A scheduler implementation class can be retrived using the global function
:func:`get_scheduler_implementation`, or instantiated using
:func:`build_scheduler`.
New schedulers can be written to support computing resources types that are
currently not supported (a cluster with a DRMS which has no DRMAA
implementation typicalyly). The various methods of the Scheduler API have
to be overloaded in this case.
'''
parallel_job_submission_info = None
logger = None
is_sleeping = None
def __init__(self):
self.parallel_job_submission_info = None
self.is_sleeping = False
def sleep(self):
self.is_sleeping = True
def wake(self):
self.is_sleeping = False
def clean(self):
pass
def job_submission(self, job):
'''
Submit a Soma-Workflow job
Parameters
----------
job: EngineJob
Job to be submitted
Returns
-------
job_id: string
Job id for the scheduling system (DRMAA for example, or native DRMS
identifier)
'''
raise Exception("Scheduler is an abstract class!")
def get_job_status(self, scheduler_job_id):
'''
Parameters
----------
scheduler_job_id: string
Job id for the scheduling system (DRMAA for example)
Returns
-------
status: string
Job status as defined in constants.JOB_STATUS
'''
raise Exception("Scheduler is an abstract class!")
def get_job_exit_info(self, scheduler_job_id):
'''
The exit info consists of 4 values returned in a tuple:
**exit_status**: string
one of the constants.JOB_EXIT_STATUS values
**exit_value**: int
exit code of the command (normally 0 in case of success)
**term_sig**: int
termination signal, 0 IF ok
**resource_usage**: bytes
bytes string in the shape
``b'cpupercent=60 mem=13530kb cput=00:00:12'`` etc. Items may include:
* cpupercent
* cput
* mem
* vmem
* ncpus
* walltime
Parameters
----------
scheduler_job_id: string
Job id for the scheduling system (DRMAA for example)
Returns
-------
exit_info: tuple
exit_status, exit_value, term_sig, resource_usage
'''
raise Exception("Scheduler is an abstract class!")
def kill_job(self, scheduler_job_id):
'''
Parameters
----------
scheduler_job_id: string
Job id for the scheduling system (DRMAA for example)
'''
raise Exception("Scheduler is an abstract class!")
@classmethod
def build_scheduler(cls, config):
''' Create a scheduler of the expected type, using configuration to
parameterize it.
Parameters
----------
config: Configuration
configuration object instance
'''
raise Exception("Scheduler is an abstract class!")
def get_scheduler_implementation(scheduler_type):
''' Get the scheduler class implementation corresponding to the expected
one.
Parameters
----------
scheduler_type: str
scheduler type: 'drmaa', 'drmaa2', 'local_basic', 'mpi', or other
custom scheduler
Returns
-------
scheduler_class: Scheduler subclass
'''
from . import schedulers
if scheduler_type == 'local_basic':
scheduler_type = 'local'
sched_dir = os.path.dirname(schedulers.__file__)
if os.path.exists(os.path.join(sched_dir,
'%s_scheduler.py' % scheduler_type)):
sched_mod = '%s_scheduler' % scheduler_type
# try:
module = importlib.import_module('.%s' % sched_mod,
'soma_workflow.schedulers')
sched_list = []
# if there is a __main_scheduler__, just use it
scheduler = getattr(module, '__main_scheduler__', None)
if scheduler is not None:
return scheduler
for element in six.itervalues(module.__dict__):
if element in sched_list:
continue # avoid duplicates
if inspect.isclass(element) and element is not Scheduler \
and issubclass(element, Scheduler):
sched_list.append(element)
if element.__name__.lower() == ('%sscheduler'
% scheduler_type).lower():
# fully matching
return element
if len(sched_list) == 1:
# unambiguous
return sched_list[0]
if len(sched_list) == 0:
print('Warning: module soma_workflow.schedulers.%s contains '
'no scheduler:' % sched_mod)
else:
print('Warning: module soma_workflow.schedulers.%s contains '
'several schedulers:' % sched_mod)
print([s.__name__ for s in sched_list])
# except ImportError:
raise NameError('scheduler type %s is not found' % scheduler_type)
def build_scheduler(scheduler_type, config):
''' Create a scheduler of the expected type, using configuration to
parameterize it.
Parameters
----------
scheduler_type: string
type of scheduler to be built
config: Configuration
configuration object
Returns
-------
scheduler: Scheduler
Scheduler instance
'''
scheduler_class = get_scheduler_implementation(scheduler_type)
scheduler = scheduler_class.build_scheduler(config)
return scheduler
def get_schedulers_list():
'''
List all available installed schedulers
Returns
-------
schedulers: list
schedulers list. Each item is a tuple (name, enabled)
'''
from . import schedulers
dirname = os.path.dirname(schedulers.__file__)
sched_files = os.listdir(dirname)
schedulers = []
for sched_file in sched_files:
if sched_file.endswith('_scheduler.py'):
sched_mod = sched_file[:-3]
enabled = True
try:
module = importlib.import_module('.%s' % sched_mod,
'soma_workflow.schedulers')
except NotImplementedError:
continue # skip not implemented / unfinished ones
except Exception:
enabled = False
if sched_mod == 'local_scheduler':
sched_mod = 'local_basic_scheduler'
sched = sched_mod[:-10]
schedulers.append((sched, enabled))
return schedulers
|
StarcoderdataPython
|
318275
|
/home/runner/.cache/pip/pool/1f/25/29/96266bdb681f6a20eae8a895c4d0785df90bfbe7af62a169fbb690708a
|
StarcoderdataPython
|
1879686
|
"""
Created on 2018-08-04
@author: <NAME>
<EMAIL>
"""
import time
import warnings
from concurrent import futures
from typing import Dict, List, Type
import numpy as np
import torch
import torch.nn as nn
from nord.configurations.all import Configs
from nord.neural_nets import NeuralDescriptor, NeuralNet
from nord.data_curators import (get_cifar10_distributed,
get_cifar10,
get_fashion_mnist,
get_activity_recognition_data)
from nord.utils import progress_bar
try:
import horovod.torch as hvd
hvd_available = True
except Exception:
hvd_available = False
warnings.warn('Horovod not found')
try:
from mpi4py import MPI
except Exception:
warnings.warn('mpi4py not found')
class AbstractNeuralEvaluator():
"""A class to load a dataset and evaluate a network on it
by distributing the load across N workers.
"""
def __init__(self, optimizer_class: Type,
optimizer_params: Dict, verbose: bool):
(self.trainloader, self.trainsampler,
self.testloader, self.testsampler,
self.classes) = [None]*5
self.data_loaded = False
self.optimizer_class = optimizer_class
self.optimizer_params = optimizer_params
self.verbose = verbose
self.conf = Configs()
def load_data(self, data_percentage: float):
"""Check if the data is loaded and act accordingly.
This is to make sure that the distributed environment
has been initialized. Instantiate trainloader, trainsampler,
testloader, testsampler, classes
"""
raise NotImplementedError
def set_device(self):
"""Returns the device that will run the experiment,
usually cuda:dev_no or cpu
"""
raise NotImplementedError
def get_optimizer(self, net: nn.Module):
"""Returns the optimizer to train the network
"""
raise NotImplementedError
def print_status_bar(self, batch_id: int, loss: float):
"""Print status after each epoch
"""
raise NotImplementedError
def descriptor_to_net(self, descriptor: NeuralDescriptor, untrained: bool):
"""Make a net from descriptor, accounting for any distributed comms
"""
raise NotImplementedError
def process_returns(self, loss: float, metrics: dict):
"""Process loss and metrics, accounting for any distributed testing
"""
raise NotImplementedError
def train(self, net: nn.Module, epochs: int):
"""Network training.
Parameters
----------
net : torch.nn.Module
The neural network.
epochs : int
The number of epochs to train the network.
"""
device = self.set_device()
try:
print(MPI.COMM_WORLD.rank, 'Train Device: ', device)
except Exception:
print('Train Device: ', device)
net.to(device)
net.train()
optimizer = self.get_optimizer(net)
# for p in net.parameters():
# print(p)
# break
criterion_loss = self.conf.CRITERION[self.dataset]()
if self.conf.CRITERION[self.dataset] == nn.KLDivLoss:
def criterion(x, y): return criterion_loss(x.float(), y.float())
else:
def criterion(x, y): return criterion_loss(x, y)
for epoch in range(epochs): # loop over the dataset multiple times
if self.trainsampler is not None:
print('TRAINSAMPLER NOT NONE')
self.trainsampler.set_epoch(epoch)
test_loss = 0
for batch_idx, data in enumerate(self.trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
test_loss += loss.item()
self.print_status_bar(batch_idx, test_loss)
# criterion.increase_threshold()
del optimizer
def test(self, net: nn.Module):
"""Distributed network evaluation.
Parameters
----------
net : torch.nn.Module
The neural network.
Returns
-------
test_metrics : float
The average metrics.
"""
device = self.set_device()
try:
print(MPI.COMM_WORLD.rank, 'Test Device: ', device)
except Exception:
print('Test Device: ', device)
net.to(device)
net.eval()
test_loss = 0
criterion_loss = self.conf.CRITERION[self.dataset]()
if self.conf.CRITERION[self.dataset] == nn.KLDivLoss:
def criterion(x, y): return criterion_loss(x.float(), y.float())
else:
def criterion(x, y): return criterion_loss(x, y)
all_predicted = []
all_targets = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(self.testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
all_predicted.extend(outputs)
all_targets.extend(targets)
self.print_status_bar(batch_idx, test_loss)
if len(self.testloader) > 0:
test_loss /= len(self.testloader)
if self.testsampler is not None:
if len(self.testsampler) > 0:
test_loss /= len(self.testsampler)
metrics = {}
all_predicted = torch.stack(all_predicted)
all_targets = torch.stack(all_targets)
for metric in self.conf.METRIC[self.dataset]:
metrics.update(metric(all_predicted, all_targets))
return test_loss, metrics
def descriptor_evaluate(self, descriptor: NeuralDescriptor, epochs: int,
data_percentage: float = 1.0,
untrained: bool = False, dataset: str = None):
"""Distributed network evaluation, with a NeuralDescriptor input.
Parameters
----------
descriptor : NeuralDescriptor
The neural network's descriptor object.
untrained : bool (optional)
If True, skip the training.
epochs : int
Number of epochs that the net will be trained.
Returns
-------
test_metrics : dict
The average metrics.
loss: float
The value of the loss function.
total_time:
The time required to train
"""
self.load_data(data_percentage, dataset)
net = self.descriptor_to_net(descriptor, untrained)
print(net)
return self.net_evaluate(net, epochs, data_percentage,
untrained, dataset)
def net_evaluate(self, net: nn.Module, epochs: int,
data_percentage: float = 1.0, untrained: bool = False,
dataset: str = None, return_net=False):
"""Distributed network evaluation, with a NeuralDescriptor input.
Parameters
----------
descriptor : NeuralDescriptor
The neural network's descriptor object.
untrained : bool (optional)
If True, skip the training.
epochs : int
Number of epochs that the net will be trained.
Returns
-------
test_metrics : dict
The average metrics across all workers.
loss: float
The value of the loss function.
total_time:
The time required to train
"""
self.load_data(data_percentage, dataset)
start_time = time.time()
if not net.functional:
metrics = {}
for metric in self.conf.METRIC[self.dataset]:
metrics.update(
metric(torch.Tensor([[1], [0]]), torch.Tensor([[-1], [2]]))
)
return 0, metrics, 0
if not untrained:
self.train(net, epochs)
total_time = time.time() - start_time # in seconds
loss, metrics = self.test(net)
loss, metrics = self.process_returns(loss, metrics)
if not return_net:
return loss, metrics, total_time
else:
return loss, metrics, total_time, net
class LocalEvaluator(AbstractNeuralEvaluator):
def load_data(self, data_percentage: float, dataset: str):
if not self.data_loaded:
self.data_loaded = True
self.dataset = dataset
(self.trainloader,
self.testloader,
self.classes) = self.conf.DATA_LOAD[self.dataset](data_percentage)
def set_device(self):
"""Returns the device that will run the experiment,
usually cuda:dev_no or cpu
"""
if not torch.cuda.is_available():
return 'cpu'
if hvd_available:
try:
return 'cuda:%d' % hvd.local_rank() # If horovod is initialized
except ValueError as e:
print(e)
warnings.warn('Horovod not initialized')
try:
return 'cuda:%d' % MPI.COMM_WORLD.rank # If mpi is running
except Exception as ee:
print(ee)
warnings.warn(
'MPI not initialized, using one GPU per node.')
return 'cuda:0'
def get_optimizer(self, net: nn.Module):
"""Returns the optimizer to train the network
"""
return self.optimizer_class(net.parameters(), **self.optimizer_params)
def print_status_bar(self, batch_id: int, loss: float):
"""Print status after each epoch
"""
if self.verbose:
progress_bar(batch_id, len(self.trainloader), 'Loss: %.2f'
% (loss/(batch_id+1)))
def descriptor_to_net(self, descriptor: NeuralDescriptor, untrained: bool):
"""Make a net from descriptor, accounting for any distributed comms
"""
return NeuralNet(descriptor, self.conf.NUM_CLASSES[self.dataset],
self.conf.INPUT_SHAPE[self.dataset], self.conf.CHANNELS[self.dataset],
untrained=untrained,
keep_dimensions=self.conf.DIMENSION_KEEPING[self.dataset],
dense_part=self.conf.DENSE_PART[self.dataset])
def process_returns(self, loss: float, metrics: dict):
"""Process loss and metrics, accounting for any distributed testing
"""
if self.verbose:
print('Metrics of the network on the test images: ', (
metrics))
print('Loss of the network on the test images: %.4f ' % (
loss))
return loss, metrics
class LocalBatchEvaluator(LocalEvaluator):
def train_work(self, params: tuple):
optimizer, criterion, net, inputs, labels, my_id = params
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
my_loss = loss.item()
return my_id, my_loss
def train(self, nets: List[nn.Module], epochs: int):
device = self.set_device()
print('Device: ', device)
ex = futures.ThreadPoolExecutor(max_workers=4)
optimizers = []
for net in nets:
net.train()
net.to(device)
optimizer = self.get_optimizer(net)
optimizers.append(optimizer)
t = time.time()
criterion = self.conf.CRITERION[self.dataset]()
for epoch in range(epochs): # loop over the dataset multiple times
if self.trainsampler is not None:
self.trainsampler.set_epoch(epoch)
test_losses = [0 for _ in range(len(nets))]
for batch_idx, data in enumerate(self.trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
args = ((optimizers[i], criterion, nets[i],
inputs, labels, i) for i in range(len(nets)))
results = ex.map(self.train_work, args)
for i, res_loss in results:
# print(res_loss)
test_loss = test_losses[i]
test_losses[i] = test_loss + res_loss
# self.print_status_bar(batch_idx, test_loss)
print(time.time()-t)
# criterion.increase_threshold()
def descriptors_evaluate(self, descriptors: List[NeuralDescriptor],
epochs: int, data_percentage: float = 1.0,
untrained: bool = False,
dataset: str = None):
"""Distributed network evaluation, with a NeuralDescriptor input.
Parameters
----------
descriptor : NeuralDescriptor
The neural network's descriptor object.
untrained : bool (optional)
If True, skip the training.
epochs : int
Number of epochs that the net will be trained.
Returns
-------
test_accuracy : float
The average accuracy across all workers.
"""
nets = []
for descriptor in descriptors:
net = self.descriptor_to_net(descriptor, untrained)
nets.append(net)
return self.nets_evaluate(nets, epochs, data_percentage,
untrained, dataset)
def nets_evaluate(self, nets: List[nn.Module], epochs: int,
data_percentage: float = 1.0, untrained: bool = False,
dataset: str = None):
"""Distributed network evaluation, with a NeuralDescriptor input.
Parameters
----------
descriptor : NeuralDescriptor
The neural network's descriptor object.
untrained : bool (optional)
If True, skip the training.
epochs : int
Number of epochs that the net will be trained.
Returns
-------
test_accuracy : float
The average accuracy across all workers.
"""
self.load_data(data_percentage, dataset)
nets_no = len(nets)
losses = np.zeros(nets_no)
accuracies = np.zeros(nets_no)
non_functionals = []
start_time = time.time()
for i in range(nets_no):
net = nets[i]
if not net.functional:
non_functionals.append(i)
if not untrained:
self.train(nets, epochs)
total_time = time.time() - start_time # in seconds
for i in range(nets_no):
if i not in non_functionals:
loss, accuracy = self.test(net)
loss, accuracy = self.process_returns(loss, accuracy)
losses[i] = loss
accuracies[i] = accuracy
return losses, accuracies, total_time
class DistributedEvaluator(AbstractNeuralEvaluator):
def load_data(self, data_percentage: float, dataset: str):
if not self.data_loaded:
self.data_loaded = True
if dataset == 'cifar10':
(self.trainloader, self.trainsampler,
self.testloader, self.testsampler,
self.classes) = get_cifar10_distributed(hvd.size(),
hvd.rank(),
data_percentage)
else:
raise NotImplementedError
def set_device(self):
"""Returns the device that will run the experiment,
usually cuda:dev_no or cpu
"""
if not torch.cuda.is_available():
return 'cpu'
return 'cuda:%d' % hvd.local_rank()
def get_optimizer(self, net: nn.Module):
"""Returns the optimizer to train the network
"""
optimizer = self.optimizer_class(
net.parameters(), **self.optimizer_params)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=net.named_parameters())
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
return optimizer
def print_status_bar(self, batch_id: int, loss: float):
"""Print status after each epoch
"""
if hvd.rank() == 0 and self.verbose:
progress_bar(batch_id, len(self.trainloader), 'Loss: %.2f'
% (loss/(batch_id+1)))
def descriptor_to_net(self, descriptor: NeuralDescriptor, untrained: bool):
"""Make a net from descriptor, accounting for any distributed comms
"""
MPI.COMM_WORLD.barrier()
descriptor = MPI.COMM_WORLD.bcast(descriptor)
net = NeuralNet(descriptor, self.conf.NUM_CLASSES[self.dataset],
self.conf.INPUT_SHAPE[self.dataset], self.conf.CHANNELS[self.dataset],
untrained=untrained,
keep_dimensions=self.conf.DIMENSION_KEEPING[self.dataset])
hvd.broadcast_parameters(net.state_dict(), root_rank=0)
return net
def process_returns(self, loss: float, accuracy: float):
"""Process loss and accuracy, accounting for any distributed testing
"""
loss = MPI.COMM_WORLD.allreduce(loss)/hvd.size()
accuracy = MPI.COMM_WORLD.allreduce(accuracy)/hvd.size()
if hvd.rank() == 0 and self.verbose:
print('Accuracy of the network on the test images: %.2f %%' % (
accuracy))
print('Loss of the network on the test images: %.4f ' % (
loss))
return loss, accuracy
|
StarcoderdataPython
|
8164280
|
#!/usr/bin/env python3
"""
Using two template images, align images from one template space to another.
"""
import subprocess
from os import PathLike
from pathlib import Path
from typing import List
def main(from_images: List[PathLike], from_template: PathLike, to_template: PathLike, to_dir: PathLike, suffix: str = "_aligned") -> None:
"""
Using two template images, align images from one template space to another.
Args:
from_images (List[PathLike]): Paths to images you want to align.
from_template (PathLike): Template the input images are aligned to.
to_template (PathLike): What to align the images to.
to_dir (PathLike): Where you want the results to be written to.
suffix (str, optional): Suffix to append to filenames. Defaults to "_aligned".
"""
from_images = [Path(from_image).resolve() for from_image in from_images]
from_template = Path(from_template).resolve()
to_template = Path(to_template).resolve()
to_dir = Path(to_dir).resolve()
to_dir.mkdir(parents=True, exist_ok=True)
_align_using_templates(from_images, from_template, to_template, to_dir, suffix)
def _align_using_templates(from_images: List[PathLike], from_template: PathLike, to_template: PathLike, cwd: PathLike, suffix: str) -> None:
"""
Using two template images, align images from one template space to another. Requires that input paths be full paths.
Args:
from_images (List[PathLike]): Paths to images you want to align.
from_template (PathLike): Template the input images are aligned to.
to_template (PathLike): What to align the images to.
cwd (PathLike): Where you want the results to be written to.
suffix (str): Suffix to append to filenames.
"""
print(f"Aligning images to {to_template}")
command = f"""
align_epi_anat.py
-dset1 {from_template}
-dset2 {to_template}
-dset1to2
-dset1_strip None
-dset2_strip None
-suffix {suffix}
-child_dset1
""".split()
command += from_images
print(command)
subprocess.run(command, cwd=cwd)
|
StarcoderdataPython
|
6525560
|
<filename>expenseApp/urls.py<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.userList, name='index'),
path('create/', views.create, name='create'),
path('create/<int:pk>/deposit', views.deposit, name='deposit'),
path('create/<int:pk>/expense', views.expense, name='expense'),
path('detail/<int:pk>',views.detail, name='detail'),
path('newUser/', views.newUser, name='newUser'),
path('create/<int:pk>/edit', views.edit, name='edit'),
]
|
StarcoderdataPython
|
11361500
|
<filename>qa/rpc-tests/test_framework/cashlib/__init__.py<gh_stars>100-1000
# Copyright (c) 2018 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .cashlib import init, bin2hex, signTxInput, signTxInputSchnorr, signHashSchnorr, randombytes, pubkey, spendscript, addrbin, txid, sha256, hash256, hash160, SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_FORKID, SIGHASH_ANYONECANPAY, signData, ScriptMachine, ScriptFlags, ScriptError, Error, BCH
|
StarcoderdataPython
|
11240668
|
<filename>src/model/rule_transformation.py
from sqlalchemy.sql.expression import null
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.ext.hybrid import hybrid_property
from database import BaseModel
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import Integer, String
from sqlalchemy import Column
from mixins.key_value_mixin import KeyValueMixin
from model.transaction import Transaction
class RuleTransformation(BaseModel, KeyValueMixin):
__tablename__ = 'rule_transformations'
rule_id = Column(Integer, ForeignKey('rules.id'))
attribute_name = Column(String, nullable=False)
attribute_value = Column(String)
value_type = Column(String(8), default='str')
rule = relationship('Rule', back_populates='rule_transformations')
@classmethod
def build(cls, attribute_name, attribute_value, value_type):
rule_transformation = cls()
rule_transformation.attribute_name = attribute_name
rule_transformation.attribute_value = attribute_value
rule_transformation.value_type = value_type
return rule_transformation
def get_as_string(self, indentation_size=2, indentation_start=4) -> str:
indentation_lvl_1 = ' ' * indentation_start
return f'Attribute Name: {indentation_lvl_1}{self.attribute_name} - Attribute value: {self.attribute_value} ({self.value_type})'
|
StarcoderdataPython
|
6478190
|
import peewee
from awsanalysis.a_loader import ALoader
import boto3
class SgLoader(ALoader):
def dep(self):
return set()
def setup(self):
db = self._dbMgr.getDB()
class SgTable(peewee.Model):
id = peewee.CharField(primary_key=True)
name = peewee.CharField()
class Meta:
database = db
"""
type: 1:ingres ip, 2:ingree sg, 6:egress ip, 7:egress sg
"""
class SgRuleTable(peewee.Model):
id = peewee.IntegerField(primary_key=True)
sgid = peewee.CharField()
ruleType = peewee.IntegerField()
fromPort = peewee.CharField()
toPort = peewee.CharField()
source = peewee.CharField()
protocol = peewee.CharField()
class Meta:
database = db
self._dbMgr.addModel("SgTable", SgTable)
self._dbMgr.addModel("SgRuleTable", SgRuleTable)
SgTable.create_table()
SgRuleTable.create_table()
def load(self):
filters = []
if self._conf.get("vpcid", False):
filters = [{
'Name': 'vpc-id',
'Values': [self._conf.get("vpcid")]
}]
ec2 = boto3.client('ec2')
response = ec2.describe_security_groups(Filters=filters)
sgArr = []
sgRuleArr = []
for sg in response["SecurityGroups"]:
tmpSgArr, tmpSgRuleArr = self.insertSg(sg)
sgArr = sgArr + tmpSgArr
sgRuleArr = sgRuleArr + tmpSgRuleArr
if (sgArr):
self._dbMgr.getModel("SgTable").insert_many(sgArr).execute();
if (sgRuleArr):
self._dbMgr.getModel("SgRuleTable").insert_many(sgRuleArr).execute();
def insertSg(self, sg):
sgData = {
"id": sg["GroupId"],
"name": sg.get("GroupName", "")
}
sgRuleArr = []
marked = False
for permission in sg.get("IpPermissions", []):
for ip in permission["IpRanges"]:
sgRuleArr = sgRuleArr + self.insertRule(sg, 1, permission, ip)
marked = True
for usg in permission["UserIdGroupPairs"]:
sgRuleArr = sgRuleArr + self.insertRule(sg, 2, permission, usg)
marked = True
for permission in sg.get("IpPermissionsEgress", []):
for ip in permission["IpRanges"]:
sgRuleArr = sgRuleArr + self.insertRule(sg, 6, permission, ip)
marked = True
for usg in permission["UserIdGroupPairs"]:
sgRuleArr = sgRuleArr + self.insertRule(sg, 7, permission, usg)
marked = True
if not marked :
sgRuleArr = sgRuleArr + self.insertRule(sg, 11, {}, sg)
return [sgData], sgRuleArr
def insertRule(self, sg, ruleType, permission, rule):
source = ""
if ruleType == 1 or ruleType == 6 :
source = rule.get("CidrIp", "")
elif ruleType == 2 or ruleType == 7 :
source = rule.get("GroupId", "")
elif ruleType == 11 :
source = str(rule) # unknown data type?
sgRuleData = {
"sgid": sg["GroupId"],
"ruleType": ruleType,
"fromPort": permission.get("FromPort", "??"),
"toPort": permission.get("ToPort", "??"),
"source": source,
"protocol": permission.get("IpProtocol", "??")
}
return [sgRuleData]
|
StarcoderdataPython
|
6534942
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Threshold
# 正交投影。。。。。。
# 坐标*外参,将坐标转变到相机坐标系下,然后用正交投影+双线性插值,获取每个点的所有的特征(遮挡问题怎么处理???,这里都没有深度测试,没有考虑遮挡)
class GraphProjection(nn.Module):
"""Graph Projection layer, which pool 2D features to mesh
The layer projects a vertex of the mesh to the 2D image and use
bilinear interpolation to get the corresponding feature.
"""
def __init__(self):
super(GraphProjection, self).__init__()
def forward(self, img_features, input):
self.img_feats = img_features
# 决定图像宽高
# h = 248 * x/z + 111.5
# w = 248 * y/z + 111.5
# 248和111.5怎么来的??经验值?
h = 248 * torch.div(input[:, 1], input[:, 2]) + 111.5
w = 248 * torch.div(input[:, 0], -input[:, 2]) + 111.5
# 裁剪图像,最大值为223 (即图像为<=224)
h = torch.clamp(h, min = 0, max = 223)
w = torch.clamp(w, min = 0, max = 223)
# 特征图尺寸
img_sizes = [56, 28, 14, 7]
out_dims = [64, 128, 256, 512]
feats = [input]
# 四次投影
for i in range(4):
out = self.project(i, h, w, img_sizes[i], out_dims[i])
feats.append(out)
# 四次投影的特征直接cat
output = torch.cat(feats, 1)
return output
def project(self, index, h, w, img_size, out_dim):
# 第index次投影, 图像尺寸h*w , 图像尺寸img_size(xy方向相同)
# 取出本次特征
img_feat = self.img_feats[index]
# 计算出图像尺寸大小和224原图的相对百分比,由此得出输出特征图尺寸相对于当前特征图大小
x = h / (224. / img_size)
y = w / (224. / img_size)
# torch.floor(x) : 小于等于x的最大整数
# torch.ceil(x) : 大于等于x的最小整数
x1, x2 = torch.floor(x).long(), torch.ceil(x).long()
y1, y2 = torch.floor(y).long(), torch.ceil(y).long()
# 按图像尺寸阶段最大值
x2 = torch.clamp(x2, max = img_size - 1)
y2 = torch.clamp(y2, max = img_size - 1)
#Q11 = torch.index_select(torch.index_select(img_feat, 1, x1), 1, y1)
#Q12 = torch.index_select(torch.index_select(img_feat, 1, x1), 1, y2)
#Q21 = torch.index_select(torch.index_select(img_feat, 1, x2), 1, y1)
#Q22 = torch.index_select(torch.index_select(img_feat, 1, x2), 1, y2)
# Q11为
Q11 = img_feat[:, x1, y1].clone()
Q12 = img_feat[:, x1, y2].clone()
Q21 = img_feat[:, x2, y1].clone()
Q22 = img_feat[:, x2, y2].clone()
x, y = x.long(), y.long()
# 双线性插值
weights = torch.mul(x2 - x, y2 - y)
Q11 = torch.mul(weights.float().view(-1, 1), torch.transpose(Q11, 0, 1))
weights = torch.mul(x2 - x, y - y1)
Q12 = torch.mul(weights.float().view(-1, 1), torch.transpose(Q12, 0 ,1))
weights = torch.mul(x - x1, y2 - y)
Q21 = torch.mul(weights.float().view(-1, 1), torch.transpose(Q21, 0, 1))
weights = torch.mul(x - x1, y - y1)
Q22 = torch.mul(weights.float().view(-1, 1), torch.transpose(Q22, 0, 1))
output = Q11 + Q21 + Q12 + Q22
return output
gp = GraphProjection()
bs = 4
channels = 16
h = [56, 28, 14, 7]
w = [56, 28, 14, 7]
img_features = []
for i in range(4):
img_features.append (torch.rand((bs,h[i],w[i])))
N = 500
dim = 3
input = torch.rand((N,dim))
gp(img_features,input)
|
StarcoderdataPython
|
6577227
|
"""Parse info about Perecrestok supremarket: `https://www.perekrestok.ru`."""
from time import sleep
from typing import Dict, Optional, NamedTuple, Set, NoReturn
import requests as req
from selenium import webdriver # type: ignore
from bs4 import BeautifulSoup # type: ignore
import pandas as pd # type: ignore
Category = NamedTuple("Category", [("name", str), ("url", str)])
class Perecrestok:
"""Allow to parse all data about products from the Perecrestok."""
def __init__(self):
self.main_url = "https://www.perekrestok.ru"
self.url_catalog = "https://www.perekrestok.ru/catalog"
self.columns = [
"Название",
"Категория",
"Производитель",
"Торговая марка",
"Вес",
"Жирность",
]
self.result = pd.DataFrame(columns=self.columns)
@staticmethod
def __get_html(url: str) -> str:
"""Scroll down a HTML page and return the HTML-code."""
driver = webdriver.Chrome()
driver.get(url)
for i in range(0, 50000, 1080):
driver.execute_script(f"window.scrollTo({i}, {i+1080})")
sleep(3)
page: str = driver.page_source
driver.close()
return page
@staticmethod
def __get_product_name(soup: BeautifulSoup) -> Optional[str]:
"""Return the porduct name."""
name = soup.find(class_="js-product__title xf-product-card__title")
if name:
name = name.text.split("\n")[0]
return name
@staticmethod
def __raise_error(function_name: str) -> NoReturn:
"""Raise error if status code not equal 200."""
raise ValueError(f"Проблема с подключением к сети в функции {function_name}.")
def get_catalog(self) -> Set[Category]:
"""Return set of namedtuples about all categories in the catalog."""
result: Set[Category] = set()
resp = req.get(self.url_catalog)
if resp.status_code != 200:
self.__raise_error(self.get_catalog.__name__)
soup = BeautifulSoup(resp.text, "lxml")
for cat in soup.find_all(class_="xf-catalog-categories__item"):
href = cat.find(class_="xf-catalog-categories__link").get("href")
name = cat.text.strip()
result.add(Category(name, self.main_url + href))
return result
def __parse_good(self, url: str) -> Dict[str, Optional[str]]:
"""Parse information about the product."""
product: Dict[str, Optional[str]] = {}
_ = [product.setdefault(key, None) for key in self.columns]
resp = req.get(url)
if resp.status_code != 200:
self.__raise_error(self.__parse_good.__name__)
soup = BeautifulSoup(resp.text, "lxml")
product["Название"] = self.__get_product_name(soup)
table = soup.find(
"table", attrs={"class": "xf-product-info__table xf-product-table"}
)
if table:
rows = table.find_all("tr")
for row in rows:
key = row.find_all(class_="xf-product-table__col-header")[
0
].text.strip()
value = row.find_all("td")[0].text.strip()
if key == "Объём":
key = "Вес"
if key in self.columns:
product[key] = value
return product
def __parse_category(self, category: Category) -> pd.DataFrame:
"""Parse all products in the categoty."""
print(f"Start parsing {category.name}.")
page = self.__get_html(category.url)
soup = BeautifulSoup(page, "lxml")
goods = soup.find_all(class_="js-catalog-product _additionals xf-catalog__item")
for good in goods:
url = good.find(class_="xf-product-picture__link js-product__image").get(
"href"
)
url = self.main_url + url
product = self.__parse_good(url)
product["Категория"] = category.name
self.result = pd.concat(
[self.result, pd.DataFrame.from_dict(product, orient="index").T]
)
self.result = self.result.dropna(subset=["Название"]).drop_duplicates(
subset=["Название"]
)
return self.result
def parse(self) -> pd.DataFrame:
"""Parse all products descriptions from `https://www.perekrestok.ru`."""
for category in self.get_catalog():
self.__parse_category(category)
return self.result
if __name__ == "__main__":
parser = Perecrestok()
data = parser.parse()
data.to_csv("perecrestok_goods.csv")
|
StarcoderdataPython
|
6649264
|
<gh_stars>1-10
"""
This example exhibits some of the functionality of a peripheral BLE device,
such as reading, writing and notifying characteristics.
This peripheral can be used with one of the central examples running on a separate nordic device,
or can be run with the nRF Connect app to explore the contents of the service
"""
import atexit
import struct
import threading
import time
from blatann import BleDevice
from blatann.examples import example_utils, constants
from blatann.gap import advertising, smp, IoCapabilities
from blatann.waitables import GenericWaitable
logger = example_utils.setup_logger(level="DEBUG")
def on_connect(peer, event_args):
"""
Event callback for when a central device connects to us
:param peer: The peer that connected to us
:type peer: blatann.peer.Client
:param event_args: None
"""
if peer:
logger.info("Connected to peer")
else:
logger.warning("Connection timed out")
def on_disconnect(peer, event_args):
"""
Event callback for when the client disconnects from us (or when we disconnect from the client)
:param peer: The peer that disconnected
:type peer: blatann.peer.Client
:param event_args: The event args
:type event_args: blatann.event_args.DisconnectionEventArgs
"""
logger.info("Disconnected from peer, reason: {}".format(event_args.reason))
def on_hex_conversion_characteristic_write(characteristic, event_args):
"""
Event callback for when the client writes to the hex conversion characteristic.
This takes the data written, converts it to the hex representation, and updates the characteristic
with this new value. If the client is subscribed to the characteristic, the client will be notified.
:param characteristic: The hex conversion characteristic
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:param event_args: the event arguments
:type event_args: blatann.event_args.WriteEventArgs
"""
logger.info("Got characteristic write - characteristic: {}, data: 0x{}".format(characteristic.uuid,
str(event_args.value).encode("hex")))
new_value = "{}".format(str(event_args.value).encode("hex"))
characteristic.set_value(new_value[:characteristic.max_length], notify_client=True)
def on_gatts_subscription_state_changed(characteristic, event_args):
"""
Event callback for when a client subscribes or unsubscribes from a characteristic. This
is the equivalent to when a client writes to a CCCD descriptor on a characteristic.
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:type event_args: blatann.event_args.SubscriptionStateChangeEventArgs
"""
logger.info("Subscription state changed - characteristic: {}, state: {}".format(characteristic.uuid, event_args.subscription_state))
def on_time_char_read(characteristic, event_args):
"""
Event callback for when the client reads our time characteristic. Gets the current time and updates the characteristic.
This demonstrates "lazy evaluation" of characteristics--instead of having to constantly update this characteristic,
it is only updated when read/observed by an outside actor.
:param characteristic: the time characteristic
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:param event_args: None
"""
t = time.time()
ms = int((t * 1000) % 1000)
msg = "Time: {}.{:03}".format(time.strftime("%H:%M:%S", time.localtime(t)), ms)
characteristic.set_value(msg)
def on_client_pairing_complete(peer, event_args):
"""
Event callback for when the pairing process completes with the client
:param peer: the peer that completed pairing
:type peer: blatann.peer.Client
:param event_args: the event arguments
:type event_args: blatann.event_args.PairingCompleteEventArgs
"""
logger.info("Client Pairing complete, status: {}".format(event_args.status))
def on_passkey_display(peer, event_args):
"""
Event callback that is called when a passkey is required to be displayed to a user
for the pairing process.
:param peer: The peer the passkey is for
:type peer: blatann.peer.Client
:param event_args: The event args
:type event_args: blatann.event_args.PasskeyDisplayEventArgs
"""
logger.info("Passkey display: {}, match: {}".format(event_args.passkey, event_args.match_request))
class CountingCharacteristicThread(object):
"""
Thread which updates the counting characteristic and notifies
the client each time its updated.
This also demonstrates the notification queuing functionality--if a notification/indication
is already in progress, future notifications will be queued and sent out when the previous ones complete.
"""
def __init__(self, characteristic):
"""
:param characteristic: the counting characteristic
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
"""
self.current_value = 0
self._stop_event = threading.Event()
self._stopped = threading.Event()
self.characteristic = characteristic
self.characteristic.on_notify_complete.register(self._on_notify_complete)
self.thread = threading.Thread(target=self.run)
atexit.register(self.join)
self.thread.daemon = True
self.thread.start()
def join(self):
"""
Used to stop and join the thread
"""
self._stop_event.set()
self._stopped.wait(3)
def _on_notify_complete(self, characteristic, event_args):
"""
Event callback that is triggered when the notification finishes sending
:param characteristic: The characteristic the notification was on
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:param event_args: The event arguments
:type event_args: blatann.event_args.NotificationCompleteEventArgs
"""
logger.info("Notification Complete, id: {}, reason: {}".format(event_args.id, event_args.reason))
def run(self):
while not self._stop_event.is_set():
try:
if not self.characteristic.client_subscribed: # Do nothing until a client is subscribed
time.sleep(1)
continue
# Increment the value and pack it
self.current_value += 1
value = struct.pack("<I", self.current_value)
# Send out a notification of this new value
waitable = self.characteristic.notify(value)
# Send a burst of 16, then wait for them all to send before trying to send more
if self.current_value % 16 == 0:
waitable.wait()
time.sleep(1) # Wait a second before sending out the next burst
except Exception as e:
logger.exception(e)
self._stopped.set()
def main(serial_port):
# Create and open the device
ble_device = BleDevice(serial_port)
ble_device.open()
# Set up desired security parameters
ble_device.client.security.set_security_params(passcode_pairing=False, bond=False,
io_capabilities=IoCapabilities.DISPLAY_ONLY, out_of_band=False)
ble_device.client.security.on_pairing_complete.register(on_client_pairing_complete)
ble_device.client.security.on_passkey_display_required.register(on_passkey_display)
# Create and add the math service
service = ble_device.database.add_service(constants.MATH_SERVICE_UUID)
# Create and add the hex conversion characteristic to the service
hex_conv_char = service.add_characteristic(constants.HEX_CONVERT_CHAR_UUID,
constants.HEX_CONVERT_CHAR_PROPERTIES, "Test Data")
# Register the callback for when a write occurs and subscription state changes
hex_conv_char.on_write.register(on_hex_conversion_characteristic_write)
hex_conv_char.on_subscription_change.register(on_gatts_subscription_state_changed)
# Create and add the counting characteristic, initializing the data to [0, 0, 0, 0]
counting_char = service.add_characteristic(constants.COUNTING_CHAR_UUID, constants.COUNTING_CHAR_PROPERTIES, [0]*4)
counting_char.on_subscription_change.register(on_gatts_subscription_state_changed)
# Create the thread for the counting characteristic
counting_char_thread = CountingCharacteristicThread(counting_char)
# Create and add the time service
time_service = ble_device.database.add_service(constants.TIME_SERVICE_UUID)
# Add the time characteristic and register the callback for when its read
time_char = time_service.add_characteristic(constants.TIME_CHAR_UUID, constants.TIME_CHAR_PROPERTIES, "Time")
time_char.on_read.register(on_time_char_read)
# Initialize the advertising and scan response data
adv_data = advertising.AdvertisingData(local_name=constants.PERIPHERAL_NAME, flags=0x06)
scan_data = advertising.AdvertisingData(service_uuid128s=constants.TIME_SERVICE_UUID, has_more_uuid128_services=True)
ble_device.advertiser.set_advertise_data(adv_data, scan_data)
# Start advertising
logger.info("Advertising")
ble_device.client.on_connect.register(on_connect)
ble_device.client.on_disconnect.register(on_disconnect)
ble_device.advertiser.start(timeout_sec=0, auto_restart=True)
# Create a waitable that will never fire, and wait for some time
w = GenericWaitable()
w.wait(60*30, exception_on_timeout=False) # Keep device active for 30 mins
# Cleanup
counting_char_thread.join()
logger.info("Done")
ble_device.close()
if __name__ == '__main__':
main("COM49")
|
StarcoderdataPython
|
8002073
|
"""
Render setup overrides and collections can be enabled and disabled.
Disabling an override removes its effect, but keeps the override itself.
Disabling a collection disables all the overrides in its list, as well
as disabling any child (nested) collection it may have.
To implement this behavior, overrides and collections have three
attributes:
1) An enabled attribute. This attribute is readable only (output), and
is (trivially) computed from the two following attributes.
2) A self enabled attribute. This writable attribute determines whether
the override or collection itself is enabled.
3) A parent enabled attribute. This writable attribute is connected to
its parent's enabled output attribute, unless it is a collection
immediately under a render layer.
The enabled output boolean value is the logical and of the self enabled
attribute and the parent enabled attribute.
"""
def addChangeCallbacks(node):
"""
Add callbacks to indicate the argument node's enabled attribute changed.
A list of callback IDs is returned.
"""
pass
def createIntAttribute(longName, shortName, defaultValue):
"""
Helper method to create an input (writable) int attribute
"""
pass
def computeWithIsolateSelected(node, plug, dataBlock):
pass
def initializeAttributes(cls):
pass
def compute(node, plug, dataBlock):
pass
def createBoolAttribute(longName, shortName, defaultValue):
"""
Helper method to create an input (writable) boolean attribute
"""
pass
def createBoolOutputAttribute(longName, shortName, defaultValue):
"""
Helper method to create an output (readable) boolean attribute
"""
pass
def _compute(node, plug, dataBlock):
pass
|
StarcoderdataPython
|
1709316
|
<filename>app/main.py
# encoding=utf-8
import json
import logging
import webapp2
import appengine_config
from google.appengine.api import modules
from google.appengine.api import app_identity
import telegram
import telegram_token
from service import WordCountService
# Creating the bot and getting basic info of it.
BOT = telegram.Bot(token=telegram_token.TOKEN)
BOT_ME = BOT.getMe()
if not appengine_config.DEBUG:
# Setting the webhook (callback).
VERSION = modules.get_current_version_name()
HOST_NAME = app_identity.get_default_version_hostname()
HOST_URL = 'https://{}-dot-{}/{}'.format(VERSION, HOST_NAME, telegram_token.TOKEN)
BOT.setWebhook(HOST_URL)
# Text to send back after substitution.
RESPONSE_TEXT = """Letters: %(letters)s
Words: %(words)s
Lines: %(lines)s
"""
RESPONSE_NO_TEXT = "Please, sent me some text. " + telegram.Emoji.PAGE_FACING_UP
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(BOT_ME)
self.response.write('\n')
self.response.write(HOST_URL)
def post(self):
json_str = self.request.body
logging.info("POST Body: " + json_str)
try:
json_obj = json.loads(json_str)
update = telegram.Update.de_json(json_obj)
chat_id = update.message.chat.id
text = update.message.text.encode('utf-8')
if text != '':
logging.info("Message Text: " + text)
BOT.sendMessage(chat_id=chat_id,
text=self.get_response(text))
else:
logging.info("No Message Text.")
BOT.sendMessage(chat_id=chat_id,
text=self.get_response())
except ValueError:
logging.error('No body or bad JSON body.')
except AttributeError:
logging.error('No correct attributes in JSON body.')
def get_response(self, text=None):
if text:
return RESPONSE_TEXT % WordCountService.count(text)
return RESPONSE_NO_TEXT
app = webapp2.WSGIApplication([
('/' + telegram_token.TOKEN, MainPage),
], debug=appengine_config.DEBUG)
|
StarcoderdataPython
|
6505000
|
N = int(input())
s = set(input() for i in range(N))
print (len(s))
|
StarcoderdataPython
|
3553723
|
#!/usr/bin/env python
# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface
import oci
import utils
import atexit
from yaml_utils import PopulateYaml
class BackupVolumeSystemTest(VolumeProvisionerSystemTestInterface):
KUBERNETES_RESOURCES = ["../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml",
"../../dist/oci-volume-provisioner.yaml",
"../../dist/oci-volume-provisioner-rbac.yaml"]
BACKUP_CLAIM_TEMPLATE = "templates/example-claim-from-backup.template"
CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup"
def __init__(self, test_id=None, setup=False, check_oci=False, canaryMetrics=None):
super(BackupVolumeSystemTest, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci,
k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics)
def run(self):
super(BackupVolumeSystemTest, self).run()
if self._check_oci: # Do not run tests in the validate-test-image stage (oci_config not propagated to image)
utils.log("Running system test: Create volume from backup", as_banner=True)
_backup_ocid, _availability_domain = self._setup_create_volume_from_backup()
_claim_target = PopulateYaml(self.BACKUP_CLAIM_TEMPLATE, self._test_id,
region=_availability_domain.split(':')[1], backup_id=_backup_ocid).generateFile()
_res = self._test_create_volume(_claim_target, "demooci-from-backup-" + self._test_id,
availability_domain=_availability_domain,
verify_func=self._volume_from_backup_check, canaryMetricName=self.CM_VOLUME_FROM_BACKUP)
self._tear_down_create_volume_from_backup(_backup_ocid)
self._checkTestSuccess()
def _create_backup(self, volume_ocid):
'''Create volume backup on OCI from existing volume
@param volume_ocid: Ocid of course volume
@type volume_ocid: C{Str}
@return: Tuple containing the backup id, compartment id and display name
@rtype: C{Tuple}'''
client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config)
_backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid,
display_name="backup_volume_system_test" + self._test_id)
_response = client.create_volume_backup(_backup_details)
utils.log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data))
_res = self._get_json_doc(str(_response.data))
return _res['id'], _res['compartment_id'], _res['display_name']
def _delete_backup(self, backup_ocid):
'''Delete volume backup from OCI
@param backup_ocid: Ocid of backup volume to delete
@type backup_ocid: C{Str}'''
client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config)
_response = client.delete_volume_backup(backup_ocid)
utils.log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data))
def _create_volume_from_backup(self, backup_ocid, test_id, availability_domain, compartment_id):
client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config)
_volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid,
display_name="restored_volume_system_test" + test_id,
availability_domain=availability_domain,
compartment_id=compartment_id)
try:
_response = client.create_volume(_volume_details)
utils.log("Response for creating volume from backup %s: %s %s" % (_response.data, self._get_json_doc(str(_response.data))['id'], compartment_id))
return self._get_json_doc(str(_response.data))['id']
except Exception as exc:
utils.log("Failed to create volume from backup %s" % exc)
def _setup_create_volume_from_backup(self, storageType=VolumeProvisionerSystemTestInterface.BLOCK_STORAGE, availability_domain=None):
'''Setup environment for creating a volume from a backup device
@return: OCID of generated backup
@rtype: C{Str}'''
utils.log("Creating test volume (using terraform)", as_banner=True)
self._terraform("init", self.TERRAFORM_DIR)
self._terraform("apply", self.TERRAFORM_DIR)
_availability_domain = self._get_terraform_output_var(self.TERRAFORM_AVAILABILITY_DOMAIN)
utils.log(self._terraform("output -json", self.TERRAFORM_DIR))
# Create replication controller and write data to the generated volume
_rc_name, _rc_config = self._create_rc_or_pod("templates/example-replication-controller-with-volume-claim.template",
_availability_domain, volume_name=self._get_volume_name())
self._create_file_via_replication_controller(_rc_name)
self._verify_file_existance_via_replication_controller(_rc_name)
# Create backup from generated volume
_backup_ocid, compartment_id, _volume_name = self._create_backup(self._get_terraform_output_var(self.TERRAFORM_VOLUME_OCID))
if not self._wait_for_volume_to_create(_backup_ocid, compartment_id=compartment_id, backup=True, storageType=storageType,
availability_domain=availability_domain):
utils.log("Failed to find backup with name: " + _volume_name)
return _backup_ocid, _availability_domain
def _tear_down_create_volume_from_backup(self, backup_ocid):
'''Tear down create volume from backup
@param test_id: Test id used to append to component names
@type test_id: C{Str}
@param backup_ocid: OCID of backup from which the test volume was created
@type backup_ocid: C{Str}'''
def _destroy_test_volume_atexit():
utils.log("Destroying test volume (using terraform)", as_banner=True)
self._terraform("destroy -force", self.TERRAFORM_DIR)
atexit.register(_destroy_test_volume_atexit)
self._delete_backup(backup_ocid)
def _volume_from_backup_check(self, test_id, availability_domain, volume, file_name='hello.txt',):
'''Verify whether the volume created from the backup is in a healthy state
@param test_id: Test id to use for creating components
@type test_id: C{Str}
@param availability_domain: Availability domain to create resource in
@type availability_domain: C{Str}
@param volume: Name of volume to verify
@type volume: C{Str}
@param file_name: Name of file to do checks for
@type file_name: C{Str}'''
_ocid = volume.split('.')
_ocid = _ocid[-1]
_rc_name, _rc_config = self._create_rc_or_pod("templates/example-replication-controller.template", availability_domain, _ocid)
utils.log("Does the file from the previous backup exist?")
stdout = utils.kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html")
if file_name not in stdout.split("\n"):
utils.log("Error: Failed to find file %s in mounted volume" % file_name)
utils.log("Deleting the replication controller (deletes the single nginx pod).")
utils.kubectl("delete -f " + _rc_config)
|
StarcoderdataPython
|
5053001
|
#!/usr/bin/python
#FOG - update IP
# Adjust /opt/fog/.fogsettings
# Change Mysql values and update password from environment
# - DB - fog
# - globalSettings
# - update globalSettings set settingValue='??' where settingKey='FOG_TFTP_HOST';
# - update globalSettings set settingValue='??' where settingKey='FOG_WEB_HOST';
# - nfsGroupMembers
# - update nfsGroupMembers set ngmHostename='??' where ngmMemberName='DefaultMember';
# requires these packages and the initial fog setup to be done
# Run this on startup in a docker machine
# apt-get install python-pip python-dev libmysqlclient-dev
# pip install MySQL-python
# Use python3 style print
from __future__ import print_function
import os
import socket
import MySQLdb
import pdb
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',
ifname[:15]))[20:24])
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
# Pull password
it_pw = os.getenv("IT_PW")
print("Detecting ip...")
# Check environment variable if it exists
ip = os.getenv("PUBLIC_IP")
if ip is None:
# No environment variable set, try and figure out the current IP
ip = get_lan_ip()
print("Found IP: {0}".format(ip))
print("Updating fog settings...")
fog_settings_path = "/opt/fog/.fogsettings"
# Read in the old file
filedata = ""
with open(fog_settings_path, 'r') as file:
filedata = file.readlines()
# While we write it back out, we will grab mysql values for later
# Set them to defaults
mysql_host="localhost"
mysql_user="root"
mysql_pass=""
mysql_db="fog"
# Now write data back out
f = open(fog_settings_path, "w")
for line in filedata:
if line.startswith("ipaddress="):
f.write("ipaddress='" + ip + "'\n")
if line.startswith("password="):
f.write("password='" + <PASSWORD> + "'\n")
else:
f.write(line)
# Check for mysql values
if line.startswith("snmysqluser="):
mysql_user = line.split('=')[-1].strip().strip("'")
if line.startswith("snmysqlpass="):
mysql_pass = line.split('=')[-1].strip().strip("'")
if line.startswith("snmysqlhost="):
mysql_host = line.split('=')[-1].strip().strip("'")
if line.startswith("snmysqldb="):
# NOTE - db is fog, cant set it in .fogsettings?
mysql_db = line.split('=')[-1].strip().strip("'")
f.close()
#pdb.set_trace()
# Update the mysql server - put new IP in place
db = MySQLdb.connect(host=mysql_host, user=mysql_user,
passwd=<PASSWORD>, db=mysql_db, port=3306)
# update TFTP host
cur = db.cursor()
cur.execute("update globalSettings set settingValue='" + ip + "' where settingKey='FOG_TFTP_HOST'")
# Update Web Host
cur = db.cursor()
cur.execute("update globalSettings set settingValue='" + ip + "' where settingKey='FOG_WEB_HOST'")
# Update WOL Host
cur = db.cursor()
cur.execute("update globalSettings set settingValue='" + ip + "' where settingKey='FOG_WOL_HOST'")
# Update storage IP
cur = db.cursor()
cur.execute("update nfsGroupMembers set ngmHostname='" + ip + "' where ngmMemberName='DefaultMember'")
db.close()
# Fix the ip in default.ipxe file
os.system("/bin/sed -i \"s|http://\([^/]\+\)/|http://" + ip + "/|\" /tftpboot/default.ipxe")
os.system("/bin/sed -i \"s|http:///|http://" + ip + "/|\" /tftpboot/default.ipxe")
# Fix the ip in the config.class.php file
os.system("/bin/sed -i \"s|\\\".*\\..*\\..*\\..*\\\"|\\\"" + ip + "\\\"|\" /var/www/fog/lib/fog/config.class.php")
# Run fog installer
#installer_path = "~/trunk/bin"
#installer_file = "installfog.sh"
#print("Running fog installer...")
# Don't run installer
#cmd = "cd " + installer_path + "; ./" + installer_file + " -y"
#print("CMD: " + cmd)
#os.system(cmd)
# Finished
print("Finished updating fog server ip.")
|
StarcoderdataPython
|
3201478
|
from dash import html
from dash.dependencies import Input, Output, State
def get_postal_code(click_data: dict) -> str:
"""
Helper function for the callbacks
Gets postal code from map click_data.
---
Args:
click_data (dict): user click information
Returns:
postal_code (str): Area postal code. '00180' by default.
"""
# try to find the area by postcode
try:
postal_code = str(click_data["points"][0]['location'])
except:
postal_code = '00180' # Kamppi Postal code
return postal_code
def privacy_check(postal_code: str) -> bool:
"""
Helper function for the callbacks.
Checks if postal code is private.
---
Args:
postal_code (str): Area postal code.
Returns:
True (bool): When postal code is in the private_list
False (bool): When postal code is not found in the private_list
"""
# List of private area postcodes
private_list = ['00230', '02290', '01770']
if postal_code in private_list:
return True
else:
return False
def privacy_notice(section_title: str, neighborhood: str) -> list:
"""
Helper function for the callbacks.
---
Args:
section_title (str): Title of a sections
neighborhood (str): name of the neighborhood
Returns:
children (list): List of html components to be displayed.
"""
privacy_notice_text = f"""
The statistical data about postal areas where less than 30 citizens live is
private due to possible privacy violations. Additionally the data representing
such a small population will not yield statistically significant insights.
For these reason the data available for {neighborhood} neighborhood will not be displayed.
"""
children = [
html.H5(section_title),
html.P(privacy_notice_text),
html.P("Check out the other postal areas!")
]
return children
def init_modal_popup(app):
"""
"""
@app.callback(
Output("help-modal-centered", "is_open"),
[Input("help-open-centered", "n_clicks"), Input("help-close-centered", "n_clicks")],
[State("help-modal-centered", "is_open")],
)
def toggle_modal(n1: str, n2: str, is_open: bool):
"""
Toggle modal pop-ups.
---
Args:
n1 (str): id of the trigger button
n2 (str): id of the trigger button
is_open (bool): Current state of the modal. True for Open, False otherwise.
Returns:
is_open (bool): True for Open, False otherwise. Default value is False.
"""
if n1 or n2:
return not is_open
return is_open
|
StarcoderdataPython
|
8064112
|
<filename>config.py
# токен бота
TOKEN = ''
# admin -@Flaiers
admin_id = _
# telegram api id
api_id = _
# telegram api hash
api_hash = ''
# telegram name session
name = 'session'
# фразы
unknown = ['Я тебя не понимаю 😐', 'Мне непонятно 🙃',
'Твоё сообщение мне непонятно 😕', 'Я не могу понять 🙃',
'Пожалуйста, перефразируйте вопрос 😉', 'Не могу разобрать 😕',
'Некорректный вопрос 🧐', 'Такое я ещё не понимаю 🙁']
|
StarcoderdataPython
|
11352012
|
<reponame>fluiddyn/fluiddyn<gh_stars>10-100
"""
Utilities for creating figures (:mod:`fluiddyn.output.figs`)
=============================================================
.. currentmodule:: fluiddyn.output.figs
Provides
.. autoclass:: Figure
:members:
.. autoclass:: Figures
:members:
"""
import os
import sys
import matplotlib
import matplotlib.pyplot as plt
from ..util import is_run_from_ipython
from .rcparams import set_rcparams
class Figures:
"""Represent a set of figures.
Utilities to plot and save figures with matplotlib.
Parameters
----------
path_save : str
Related to the path where to save.
hastosave : bool
If True, the function `Figure.saveifhasto` save the figure.
for_beamer : bool
If True, use beamer layout.
for_article : bool
If True, use article layout.
fontsize : {18, int}
Font size of the text in the figures.
fontsize_pad : {9, int}
Font size of the pad in the figures.
"""
def __init__(
self,
path_save=None,
hastosave=False,
for_beamer=False,
for_article=False,
fontsize=18,
fontsize_pad=9,
):
self.hastosave = hastosave
if path_save is None:
self.path_save = os.getcwd()
elif os.path.isabs(path_save):
self.path_save = path_save
else:
self.path_save = os.path.join(os.getcwd(), path_save)
set_rcparams(fontsize, for_article, for_beamer, fontsize_pad=9)
def new_figure(
self,
name_file=None,
num=None,
fig_width_mm=200,
fig_height_mm=150,
size_axe=None,
):
"""Create a new Figure object and return it.
Parameters
----------
num : int, optional
Number.
fig_width_mm : {200, number}, optional
Width (in mm)
fig_height_mm : {150, number}, optional
Height (in mm)
size_axe : list, optional
Size of the axe.
name_file : str, optional
Name of the file.
"""
one_inch_in_mm = 25.4
fig_width_inches = float(fig_width_mm) / one_inch_in_mm
fig_height_inches = float(fig_height_mm) / one_inch_in_mm
figsize = [fig_width_inches, fig_height_inches]
dpi_latex = 72.27
fig = Figure(
num=num,
figsize=figsize,
dpi=dpi_latex,
size_axe=size_axe,
name_file=name_file,
figures=self,
)
return fig
def show(block=None):
"""Show slightly more cleaver than old version of plt.show."""
if block is None:
if is_run_from_ipython():
block = False
plt.ion()
else:
block = True
plt.ioff()
if sys.platform.startswith("win") and not block:
print("Warning: bug with anaconda and non-blocking show (?)")
try:
plt.show(block=block)
except TypeError:
plt.show()
class Figure(matplotlib.figure.Figure):
"""One figure.
Improvement (?) of `matplotlib.figure.Figure`
Parameters
----------
(for the __init__ method)
size_axe : list, optional
Size of the axe.
name_file : str, optional
Name of the file.
figures : :class:`fluiddyn.output.figs.Figures`
Set of figures.
kwargs : keyword arguments
Given when create the figure.
"""
def __init__(
self, size_axe=None, name_file=None, figures=None, **kwargs # *args,
):
# Ugly workaround to be able to use the function plt.figure
fig = plt.figure(**kwargs)
for k, v in fig.__dict__.items():
self.__dict__[k] = v
if name_file is not None:
self.name_file = name_file
if figures is None:
self.hastosave = True
self.path_save = os.getcwd()
else:
self.hastosave = figures.hastosave
self.path_save = figures.path_save
self.figures = figures
title = "Fig " + str(self.number)
if name_file is not None:
fig.name_file = name_file
title += " " + name_file
self.clf()
self.canvas.manager.set_window_title(title)
if size_axe is not None:
self.add_axes(size_axe)
def saveifhasto(self, name_file=None, hastosave=None, verbose=True):
"""Save the figure if `hastosave` is True.
Parameters
----------
name_file : str, optional
Name of the file (the extension has to indicate the format).
hastosave : bool, optional
If True, save the figure.
verbose : {True, bool}, optional
Print nothing if False.
"""
if hastosave is None:
hastosave = self.hastosave
if hastosave:
if name_file is None:
try:
name_file = self.name_file
except AttributeError:
raise ValueError("No name given...")
if not os.path.exists(self.path_save):
os.mkdir(self.path_save)
path = os.path.join(self.path_save, name_file)
if verbose:
print("Save figure in file\n" + path)
super().savefig(path)
|
StarcoderdataPython
|
3279630
|
<gh_stars>1-10
""" game class """ # noqa
# We want only one implementation of the Game Class.
# To do this, create a single instance of the Game class on import. Then
# each time we try to instanciate Game(), we just return a reference to
# the game class.
import cmd
from datetime import datetime
import pprint
import random
import re
from combat import Combat
from common.ipc import Ipc
from common.general import isIntStr, dateStr, logger, dLog
from common.general import splitTargets, targetSearch, itemSort
from common.general import getRandomItemFromList, secsSinceDate, getNeverDate
from common.globals import maxCreaturesInRoom
from common.help import enterHelp
from creature import Creature
from magic import Spell, SpellList, spellCanTargetSelf
from object import ObjectFactory, isObjectFactoryType
from room import RoomFactory, isRoomFactoryType, getRoomTypeFromFile
class _Game(cmd.Cmd, Combat, Ipc):
""" Single instance of the Game class, shared by all users
(see instanciation magic at the bottom of the file)"""
_instanceDebug = False
def __init__(self):
""" game-wide attributes """
self.instance = "Instance at %d" % self.__hash__()
self._activeRooms = []
self._activePlayers = []
self._startdate = datetime.now()
self._asyncThread = None
self._instanceDebug = _Game._instanceDebug
return None
def debug(self):
return pprint.pformat(vars(self))
def toggleInstanceDebug(self):
self._instanceDebug = not self._instanceDebug
def getInstanceDebug(self):
return self._instanceDebug
def getId(self):
return self.instance
def isValid(self):
if self.getId() != "" and self._startdate < datetime.now():
return True
return False
def asyncTasks(self):
""" Tasks that run in a separate thread with ~1 sec intervals """
self.asyncNonPlayerActions()
self.asyncCharacterActions()
def processDeadClients(self):
True
def joinGame(self, client):
""" Perform required actions related to joining the game """
charObj = client.charObj
if not charObj:
logger.warn("Game: Character not defined - returning False")
return False
gameCmd = GameCmd(client) # each user gets their own cmd shell
self.addToActivePlayerList(charObj)
# in-game broadcast announcing game entry
msg = self.txtBanner(
"{} has entered the game at {}".format(charObj.getName(),
dateStr("now")), bChar="=")
self.gameMsg(msg + "\n")
logger.info("JOINED GAME " + charObj.getId())
# add room to charObj and then display the room
if self.joinRoom(1, charObj):
self.charMsg(charObj, charObj.getRoom().display(charObj))
try:
gameCmd.cmdloop() # start the game cmdloop
finally:
if client.charObj:
self.leaveGame(client.charObj)
return False
def leaveGame(self, charObj, saveChar=True):
""" Handle details of leaving a game """
self.leaveRoom(charObj)
# remove character from game character list
self.removeFromActivePlayerList(charObj)
# final character save before throwing away charObj
if saveChar:
# saveChar is False when it's a suicide
try:
charObj.save(logStr=__class__.__name__)
except AttributeError:
logger.warning("Could not save character")
# notification and logging
msg = self.txtBanner(
"{} has left the game at {}".format(charObj.getName(), dateStr("now")),
bChar="=")
self.gameMsg(msg + "\n")
if charObj.client:
charObj.client.spoolOut(msg + "\n")
logger.info("LEFT GAME " + charObj.getId())
# Discard charObj
if charObj.client:
charObj.client.charObj = None
charObj = None
return True
def getCharacterList(self):
return self._activePlayers
def addToActivePlayerList(self, charObj):
""" add character to list of characters in game """
if charObj not in self.getCharacterList():
self._activePlayers.append(charObj)
def removeFromActivePlayerList(self, charObj):
""" remove character from list of characters in game """
if charObj in self.getCharacterList():
self._activePlayers.remove(charObj)
def getActiveRoomList(self):
return self._activeRooms
def addToActiveRooms(self, roomObj):
""" Add room to active room list """
if roomObj not in self.getActiveRoomList():
self._activeRooms.append(roomObj)
return True
def removeFromActiveRooms(self, roomObj):
""" Remove room from active room list """
if self.isActiveRoom(roomObj):
self._activeRooms.remove(roomObj)
return True
def isActiveRoom(self, roomObj):
""" Return true if room is in active room list """
if roomObj in self.getActiveRoomList():
return True
return False
def getActiveRoom(self, num):
""" Return the roomObj for an active room, given the room number """
for roomObj in self.getActiveRoomList():
if roomObj.getId() == num:
return roomObj
return None
def activeRoomInfo(self):
msg = "Active rooms: " + ", ".join(
[x.getItemId() + "(" + str(x) + ")" for x in self.getActiveRoomList()]
)
return msg
def deActivateEmptyRoom(self, roomObj):
""" deactiveates room if empty. Returns true if deactiveated """
if len(roomObj.getCharacterList()) == 0:
self.removeFromActiveRooms(roomObj)
return True
return False
def asyncCharacterActions(self):
""" asyncronous actions that occur to players. """
for charObj in self.getCharacterList():
self.timeoutInactivePlayer(charObj)
charObj.processPoisonAndRegen()
def timeoutInactivePlayer(self, charObj, timeoutInSecs=300):
""" kick character out of game if they have been inactive """
removeCharFromGame = False
timeOutTxt = "You have timed out due to inactivity\n"
if charObj.getInputDate() == getNeverDate():
# Ignore the timeout check if the input date has not been set yet
# This is a timing issue in that the first run of the async loop
# runs before the character is fully initialized with an input date.
return(False)
if secsSinceDate(charObj.getInputDate()) > timeoutInSecs:
removeCharFromGame = True
if not charObj.client.is_alive():
removeCharFromGame = True
if removeCharFromGame:
self.charMsg(charObj, timeOutTxt)
logger.info("GAME TIMEOUT {}".format(charObj.getId()))
self.leaveGame(charObj, saveChar=True)
return(True)
return(False)
def asyncNonPlayerActions(self):
""" asyncronous actions that are not tied to a player. """
for roomObj in self.getActiveRoomList():
if self.deActivateEmptyRoom(roomObj):
continue
self.creatureEncounter(roomObj)
self.creaturesAttack(roomObj)
return None
def roomLoader(self, roomStr):
""" returns a roomObj, given a roomStr """
logPrefix = "game.roomLoader (" + str(roomStr) + ") "
roomObj = None
roomType = "room"
roomNum = 0
roomStr = str(roomStr)
if isIntStr(roomStr):
roomNum = int(roomStr)
roomType = getRoomTypeFromFile(roomNum)
elif "/" in roomStr:
# if it's not a number, assume it's in the form: Room/35
roomType, roomNum = roomStr.split("/")
if isIntStr(roomNum):
roomNum = int(roomNum)
if roomNum == 0:
logger.error(logPrefix + "Room number is 0")
return None
else:
logger.error(logPrefix + "Room number is invalid")
return None
# See if room is already active
for oneroom in self.getActiveRoomList():
if oneroom.getRoomNum() == roomNum: # if the room alread exists
roomObj = oneroom # use existing roomObj
if not roomObj:
roomObj = RoomFactory(roomType, roomNum) # instanciate room object
roomObj.load(logStr=__class__.__name__) # load room from disk
if roomObj is None:
logger.error(logPrefix + "Room object is None")
return roomObj
# end roomLoader
def joinRoom(self, roomThing, charObj):
""" insert player into a room
* can accept room number or roomObj
* create or join room instance
* add character to room instance
* add room to character instance
* add room to active rooms list
* close spring loaded doors if room is empty
# roomStr can be a room number or can be in the form Shop/35
"""
roomObj = None
if isinstance(roomThing, int) or isinstance(roomThing, str):
roomObj = self.roomLoader(roomThing)
elif isRoomFactoryType(roomThing.getType()):
roomObj = roomThing
if not roomObj:
logger.error("joinRoom: Could not get roomObj")
return False
existingRoom = charObj.getRoom()
if existingRoom:
if existingRoom == roomObj: # if already in desired room
return True # do nothing
else:
self.leaveRoom(charObj) # leave the previous room
charObj.setRoom(roomObj) # Add room to character
roomObj.addCharacter(charObj) # Add character to room
self.addToActiveRooms(roomObj) # Add room to active room list
return True
def leaveRoom(self, charObj):
""" Handle details of leaving a room
* Remove room from active rooms list if it's empty
* remove character from room instance
* remove room from character instance
* toDo - check if other players/creatures follow
* toDo - notify others that character has left the room
* toDo - stop/reassign attackers
"""
if not charObj:
return False
if not charObj.getRoom(): # There is no previous room, so just return
return True
if charObj.getRoom().getId() == 0: # Not a real room - just loaded?
return True
charObj.getRoom().removeCharacter(charObj) # remove charact from room
# if room's character list is empty, remove room from activeRoomList
if len(charObj.getRoom().getCharacterList()) == 0:
self.removeFromActiveRooms(charObj.getRoom())
charObj.getRoom().removeNonPermanents(removeTmpPermFlag=False)
charObj.getRoom().save()
charObj.removeRoom() # Remove room from character
return True
def calculateObjectPrice(self, charObj, obj):
""" return adjusted price for an object based on many factors """
if obj.isCursed():
return 1
price = obj.getValue()
price = obj.adjustPrice(price) # object adjustment
price = charObj.getRoom().adjustPrice(price) # room adjustment
price = charObj.adjustPrice(price) # char adjust
return price
def getCorrespondingRoomObj(self, doorObj, activeOnly=False):
""" Get the room object that correcponds to a door """
roomObj = self.getActiveRoom(doorObj.getToWhere())
if not roomObj: # If active room doesn't exist
if not activeOnly:
# Load room from disk into separate instance
roomObj = self.roomLoader(doorObj.getToWhere())
else:
roomObj = None
return roomObj
def modifyCorrespondingDoor(self, doorObj, charObj):
""" When a door is opened/closed on one side, the corresponing door
needs to be updated """
roomObj = self.getCorrespondingRoomObj(doorObj)
if roomObj:
for obj in roomObj.getInventory():
if obj.getId() == doorObj.getCorresspondingDoorId():
if doorObj.isClosed():
obj.close(charObj)
else:
obj.open(charObj)
if doorObj.isLocked():
obj.lock()
else:
obj.unlock()
roomObj.save()
return True
return True
def buyTransaction(
self, charObj, obj, price, prompt, successTxt="Ok.", abortTxt="Ok."
):
""" buy an item """
roomObj = charObj.getRoom()
if charObj.client.promptForYN(prompt):
charObj.subtractCoins(price) # tax included
charObj.addToInventory(obj) # add item
if roomObj.getType() == "Shop":
roomObj.recordTransaction(obj) # update stats
roomObj.recordTransaction("sale/" + str(price))
charObj.recordTax(roomObj.getTaxAmount(price))
self.charMsg(charObj, successTxt)
logger.info(
"PURCHASE "
+ charObj.getId()
+ " bought "
+ obj.describe()
+ " for "
+ str(price)
)
return True
else:
self.charMsg(charObj, abortTxt)
return False
def sellTransaction(
self, charObj, obj, price, prompt, successTxt="Ok.", abortTxt="Ok."
):
""" sell an item """
roomObj = charObj.getRoom()
if charObj.client.promptForYN(prompt):
charObj.removeFromInventory(obj) # remove item
charObj.addCoins(price) # tax included
if roomObj.getType() == "Shop":
roomObj.recordTransaction(obj) # update stats
roomObj.recordTransaction("purchase/" + str(price))
charObj.recordTax(roomObj.getTaxAmount(price))
self.charMsg(charObj, successTxt)
logger.info(
"SALE "
+ charObj.getId()
+ " sold "
+ obj.describe()
+ " for "
+ str(price)
)
return True
else:
self.charMsg(charObj, abortTxt)
return False
def populateRoomCreatureCache(self, roomObj):
""" Create a creature cache, so that we don't have to load the
creatures every time we check for encounters. These creatures are
never actually encountered. They just exist for reference
"""
debugPrefix = "game.populateRoomCreatureCache (" + str(roomObj.getId()) + "): "
if len(roomObj.getCreatureCache()) == 0:
dLog(debugPrefix + "Populating room creature cache", self._instanceDebug)
# loop through all possible creatures for room and fill cache
for ccNum in roomObj.getEncounterList():
ccObj = Creature(ccNum)
ccObj.load()
roomObj.creatureCachePush(ccObj)
dLog(debugPrefix + "Cached " + ccObj.describe(), self._instanceDebug)
def getEligibleCreatureList(self, roomObj):
""" Determine which creatures, from the cache, can be encountered, by
comparing their frequency attribute to a random roll. Fill a
eligibleCreatureList with possible creatures for encounter. """
debugPrefix = "game.getEligibleCreatureList (" + str(roomObj.getId()) + "): "
eligibleCreatureList = []
for ccObj in roomObj.getCreatureCache():
if ccObj.getFrequency() >= random.randint(1, 100):
# Load creature to be encountered
cObj = Creature(ccObj.getId())
cObj.load()
eligibleCreatureList.append(cObj)
dLog(
debugPrefix + cObj.describe() + " is eligible", self._instanceDebug
)
return eligibleCreatureList
def creatureEncounter(self, roomObj):
""" As an encounter, add creature to room
Chance based on
* room encounter rates and encounter list
* creature frequency
"""
debugPrefix = "Game creatureEncounter (" + str(roomObj.getId()) + "): "
if not roomObj.readyForEncounter():
# dLog(debugPrefix + 'Room not ready for encounter',
# self._instanceDebug)
return False
if len(roomObj.getInventoryByType("Creature")) >= maxCreaturesInRoom:
self.roomMsg(
roomObj, "Others arrive, but wander off.\n", allowDupMsgs=False
)
return False
self.populateRoomCreatureCache(roomObj)
eligibleCreatureList = self.getEligibleCreatureList(roomObj)
creatureObj = getRandomItemFromList(eligibleCreatureList)
if creatureObj:
roomObj.addToInventory(creatureObj)
dLog(
debugPrefix + str(creatureObj.describe()) + " added to room",
self._instanceDebug,
)
self.roomMsg(roomObj, creatureObj.describe() + " has arrived\n")
creatureObj.setEnterRoomTime()
roomObj.setLastEncounter()
return None
def removeFromPlayerInventory(self, charObj, item, msg=""):
""" display message and remove item from player's inventory
* Has some canned responses, such as "disintegrate" """
if msg == "disint":
msg = item.describe(article="The") + " disintegrates"
if msg != "":
self.charMsg(charObj, msg + "\n")
# Remove item from player's inventory
charObj.removeFromInventory(item)
return None
def txtBanner(self, msg, bChar="-"):
""" return a string containing a banner.
Default is like this:
----- mymessage -----
"""
return "{0} {1} {0}".format(self.txtLine(lineChar=bChar, lineSize=5), msg)
def txtLine(self, lineChar="-", lineSize=80):
""" return a string containing a line
line size and character are customizable
Default is like this:
----------------------------------------------------------------
"""
return lineChar * lineSize
class GameCmd(cmd.Cmd):
""" Game loop - separate one for each player
* Uses cmd loop with do_<action> methods
* if do_ methods return True, then loop exits
"""
def __init__(self, client=None):
self.client = client
if client:
self.acctObj = client.acctObj
self.gameObj = client.gameObj
self.charObj = client.charObj
else:
self.acctObj = None
self.gameObj = None
self.charObj = None
self._lastinput = ""
self._instanceDebug = False
def toggleInstanceDebug(self):
self._instanceDebug = not self._instanceDebug
def setInstanceDebug(self, val):
self._instanceDebug = bool(val)
def getInstanceDebug(self):
return self._instanceDebug
def getCmdPrompt(self):
sp = "<"
ep = ">"
if self.charObj:
promptsize = self.charObj.getPromptSize()
else:
promptsize = "full"
if promptsize == "brief":
promptStr = ep + " "
else:
promptStr = sp + "game" + ep + " "
return promptStr
def cmdloop(self):
""" cmd method override - Game loop
requires player to have character loaded """
stop = False
line = ""
self.preloop()
while not stop:
if self.client.promptForCommand(self.getCmdPrompt()): # send/recv
line = self.client.getInputStr()
stop = self.runcmd(line)
else:
stop = True
self.postloop()
def runcmd(self, cmd):
""" workhorse of cmdloop
* runcmd extracted from cmdloop so that tests can call it without
prompting for input
"""
self._lastinput = cmd
dLog("GAME cmd = " + cmd, self._instanceDebug)
if self.precmd() == "stop":
return True
stop = self.onecmd(cmd)
if self.postcmd(cmd) == "stop":
return True
return stop
def preloop(self):
""" functionality that get run once before the input loop begins """
# Set the input date when first entering the game. Required for timeout
# to work properly on characters that never input a command.
self.charObj.setInputDate()
def precmd(self):
""" cmd method override """
# If charater has timed out or been booted from the game
# terminate the command loop.
if self.charObj not in self.gameObj.getCharacterList():
return("stop")
self.charObj.setInputDate()
if self.lastcmd != "":
self.charObj.setLastCmd(self.lastcmd)
return(False)
def postcmd(self, line):
""" cmd method override """
if self.charObj: # doesn't exist if there is a suicide
self.charObj.save(logStr=__class__.__name__)
return(False)
def emptyline(self):
""" cmd method override """
return False
def default(self, line):
""" cmd method override """
logger.warn("*** Invalid game command: %s\n" % line)
self.charObj.client.spoolOut("Invalid Command\n")
def getLastCmd(self):
""" Returns the first part of the last command """
return self.lastcmd.split(" ", 1)[0]
def missingArgFailure(self):
""" Print missing arg message and return False """
self.selfMsg(self.getLastCmd() + " what?\n")
return False
def getObjFromCmd(self, itemList, cmdline):
""" Returns a list of target Items, given the full cmdargs """
targetItems = []
for target in splitTargets(cmdline):
obj = targetSearch(itemList, target)
if obj:
targetItems.append(obj)
targetItems += [None] * 2 # Add two None items to the list
return targetItems
def getCombatTarget(self, line):
""" All combat commands need to determine the target """
charObj = self.charObj
roomObj = charObj.getRoom()
creatureList = roomObj.getInventoryByType("Creature")
targetList = self.getObjFromCmd(creatureList, line)
target = targetList[0]
if not target:
# Re-use old target if it still exists
lastTarget = charObj.getCurrentlyAttacking()
if lastTarget:
if lastTarget in creatureList:
target = lastTarget
if not target:
if line == "":
self.selfMsg("No target.\n")
else:
self.selfMsg(line + " is not a valid target.\n")
return None
return target
def parseSpellArgs(self, line):
charObj = self.charObj
roomObj = charObj.getRoom()
charObjList = charObj.getInventory()
roomInv = roomObj.getCharsAndInventory()
targetList = self.getObjFromCmd(charObjList + roomInv, line)
spellItem = None
spellName = ""
targetObj = None
if self.getLastCmd() == "cast":
# When casting a spell, there is no spellItem, so the first item
# in the list is the target
if len(targetList) >= 1:
targetObj = targetList[0]
spellName = line.split(" ", 1)[0]
else:
# When using a magic item, the first magic item encountered is the
# spellItem and the next, if any, is the target
for target in targetList:
if not target:
continue
if not target.isMagicItem():
continue
if not spellItem:
spellItem = target
if not targetObj:
targetObj = target
break
if spellItem:
spellName = spellItem.getSpellName()
if spellName != "":
if not targetObj and spellCanTargetSelf(spellName):
targetObj = charObj
return (spellItem, spellName, targetObj)
def getFollowerList(self, charObj, roomObj=None):
""" Returns list of characters from room that are following character """
followerList = []
if not roomObj:
roomObj = charObj.getRoom()
for onechar in roomObj.getCharacterList():
if onechar is charObj: # ignore self
continue
if onechar.getFollow() is charObj:
followerList.append(onechar)
return followerList
def selfMsg(self, msg):
""" send message using Game communnication. This simply allows us
to call it without passing the extra arg) """
return self.gameObj.charMsg(self.charObj, msg)
def othersMsg(self, roomObj, msg, ignore):
""" send message using Game communnication. This simply allows us
to call it without passing the extra arg) """
return self.gameObj.othersInRoomMsg(self.charObj, roomObj, msg, ignore)
def moveDirection(self, charObj, direction):
""" move subcommand - move in one of the the basic directions """
dLog("GAME move dir = " + direction, self._instanceDebug)
exitDict = charObj.getRoom().getExits()
if direction not in exitDict.keys():
self.selfMsg("You can't move in that direction!\n")
return False
destRoomNum = exitDict[direction]
roomObj = self.gameObj.roomLoader(destRoomNum)
if not roomObj:
logger.error("Could not create roomObj " + str(destRoomNum) + ".")
return False
if not roomObj.canBeJoined(charObj):
logger.error(roomObj.getId() + " can not be joined.")
return False
if self.gameObj.joinRoom(roomObj, charObj):
return True
else:
logger.error("joinRoom Failed\n")
return False
return False
def moveThroughPortalOrDoor(self, charObj, itemObj):
""" move subcommand - move through door or portal """
if not itemObj: # no object - take no action
self.selfMsg("That is not somewhere you can go!\n")
return False
if not itemObj.canBeEntered(charObj):
self.selfMsg("You can't go there!\n")
return False
if itemObj.hasToll():
toll = itemObj.getToll()
if charObj.canAffordAmount(toll):
charObj.subtractCoins(toll)
self.selfMsg("You paid a toll of {} coins.".format(toll))
else:
self.selfMsg("Opening this item requires more coins than you have\n")
return False
dLog(
"GAME move through obj = {}".format(itemObj.describe()), self._instanceDebug
)
roomnum = itemObj.getToWhere()
roomObj = self.gameObj.roomLoader(roomnum)
if roomObj:
if roomObj.canBeJoined(charObj):
if self.gameObj.joinRoom(roomnum, charObj):
return True
else:
logger.error("joinRoom Failed\n")
else:
logger.error(roomnum + " can not be joined")
else:
logger.error("Could not create roomObj " + roomnum)
return False
def move(self, line):
""" move character from one room to another """
cmdargs = line.split(" ")
charObj = self.charObj
moved = False
currentRoom = charObj.getRoom()
oldRoom = charObj.getRoom()
if currentRoom.isDirection(cmdargs[0]): # if command is a direction
moved = self.moveDirection(charObj, cmdargs[0])
# Folks in the old room should see the player leave, unless hidden
msg = "{} went {}\n".format(
charObj.getName(), currentRoom.directionNameDict[cmdargs[0]])
self.othersMsg(oldRoom, msg, charObj.isHidden())
else:
# handle doors and Portals
itemList = self.getObjFromCmd(currentRoom.getInventory(), line)
moved = self.moveThroughPortalOrDoor(charObj, itemList[0])
currentRoom = charObj.getRoom()
arrivedMsg = "{} has arrived\n"
if moved:
# creatures in old room should stop attacking player
self.gameObj.unAttack(oldRoom, charObj)
# character possibly loses hidden
charObj.possibilyLoseHiddenWhenMoving()
self.selfMsg(charObj.getRoom().display(charObj))
# Folks in the new room should see the player arrive, unless hidden
msg = arrivedMsg.format(charObj.getName())
self.othersMsg(currentRoom, msg, charObj.isHidden())
# Handle followers that are moving along with primary character
for onechar in self.getFollowerList(charObj, oldRoom):
if onechar.continuesToFollow(charObj):
self.gameObj.joinRoom(currentRoom, onechar)
self.gameObj.charMsg(onechar, onechar.getRoom().display(onechar))
msg = arrivedMsg.format(onechar.getName())
self.gameObj.othersInRoomMsg(
onechar, currentRoom, msg, charObj.isHidden())
else:
# Follower loses sight of leader and is no longer following
onechar.setFollow()
return True
else:
self.selfMsg("You can not go there!\n")
return False
def useObject(self, obj, line):
""" Call method for using object, based on it's type/attributes """
if not obj:
logger.error("game.useObject: Could not use a non-existent obj")
return False
if not isObjectFactoryType(obj.getType()):
logger.error("game.useObject: Could not use a non-obj obj")
return False
if not obj.isUsable():
self.selfMsg(obj.describe(article="The") + "is not usable\n")
if obj.isEquippable():
if self.charObj.equip(obj):
self.selfMsg("Ok\n")
else:
self.selfMsg("You can't equip that\n")
elif obj.isMagicItem():
self.useMagicItem(line)
def useMagicItem(self, line):
if line == "":
return self.missingArgFailure()
(spellItem, spellName, targetObj) = self.parseSpellArgs(line)
if not spellItem:
return self.missingArgFailure()
if not targetObj:
self.selfMsg("Invalid target for spell." + spellName + "\n")
return False
if spellItem.getType().lower() == "scroll":
spellItem.readScroll(self.charObj, targetObj)
# Note: A read scroll will already display the distintegrates
# message via the item's cast method. Don't add it here.
self.gameObj.removeFromPlayerInventory(self.charObj, spellItem)
else:
spellItem.cast(self.charObj, targetObj)
return None
def parseIpc(self, line):
roomObj = self.charObj.getRoom()
lastCmd = self.getLastCmd()
target = None
msg = ""
# Get recipient, if any
possibleRecipients = []
if lastCmd == "whisper":
possibleRecipients = roomObj.getCharacterList()
elif lastCmd == "send":
possibleRecipients = self.gameObj.getCharacterList()
# elif lastCmd in ['say', 'yell', 'shout', 'broadcast']:
# target = None
if len(possibleRecipients) > 0:
targetList = self.getObjFromCmd(possibleRecipients, line)
if targetList[0]:
target = targetList[0]
if re.search("[^ ]+ [^ ]+", line):
# todo: fix this if target is more than one word.
# i.e. Player #1.
junk, msg = line.split(" ", 1)
if msg == "":
msg = self.client.promptForInput(lastCmd + " what? ")
return (target, msg)
def do_accept(self, line):
""" transaction - accept an offer """
self.selfMsg(line + " not implemented yet\n")
def do_action(self, line):
""" communication - fun in-room communication """
charObj = self.charObj
roomObj = charObj.getRoom()
if line == "":
self.selfMsg("Usage: action <txt>\n")
return False
msg = charObj.getName() + " " + line
self.gameObj.roomMsg(roomObj, msg + "\n")
logger.info(msg)
charObj.setHidden(False)
def do_appeal(self, line):
""" ask DMs for help """
self.selfMsg(line + " not implemented yet\n")
def do_att(self, line):
""" combat - alias """
return self.do_attack(line)
def do_attack(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Attack what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_auction(self, line):
""" alias - sell """
return self.do_sell(list)
def do_backstab(self, line):
""" combat """
# monster gets double damage on next attack
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Backstab what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_balance(self, line):
""" info - view bank balance when in a bank """
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a bank\n")
return False
if not roomObj.isBank():
self.selfMsg("You can't do that here. Find a bank.\n")
return False
amount = charObj.getBankBalance()
self.selfMsg("Your account balance is " + str(amount) + " shillings.\n")
def do_block(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Block what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_break(self, line):
""" alias - smash """
return self.do_smash(line)
def do_bribe(self, line):
""" transaction - bribe a creature to vanish """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if len(cmdargs) < 2:
self.selfMsg("Try 'bribe <creature> <amount>'\n")
return False
if not isIntStr(cmdargs[1]):
self.selfMsg("How many shillings are you trying to bribe with?'\n")
return False
creatureName = cmdargs[0]
coins = int(cmdargs[1])
roomCreatureList = roomObj.getCreatureList()
itemList = self.getObjFromCmd(roomCreatureList, creatureName)
if not itemList[0]:
self.selfMsg("Who are you trying to bribe?\n")
return False
creatureObj = itemList[0]
if creatureObj:
if creatureObj.acceptsBribe(charObj, coins):
# Bribe succeeds - money is already subtracted
self.selfMsg(
creatureObj.describe(article="The")
+ " accepts your offer and leaves\n"
)
roomObj.removeFromInventory(creatureObj)
return False
else:
# Bribe failed - contextual response already provided
charObj.setHidden(False)
return False
def do_brief(self, line):
""" set the prompt and room description to least verbosity """
self.charObj.setPromptSize("brief")
def do_broadcast(self, line):
""" communication - send to everyone in the game
* players are limited to X broadcasts per day (currently 5)
* log broadcasted messages, in case of abuse. """
if not self.charObj.getLimitedBroadcastCount():
self.selfMsg("You have used all of your broadcasts for today\n")
return False
if line == "":
msg = self.client.promptForInput("Enter Input: ")
else:
msg = line
if msg != "":
fullmsg = self.charObj.getName() + " broadcasted, '" + msg + "'"
if self.gameObj.gameMsg(fullmsg + "\n"):
logger.info(fullmsg)
self.charObj.reduceLimitedBroadcastCount()
else:
self.selfMsg("Message not received\n")
def do_buy(self, line):
""" transaction - buy something from a vendor """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a vendor\n")
return False
if not roomObj.isVendor():
self.selfMsg("You can't do that here. Find a vendor\n")
return False
if len(cmdargs) < 1 or not isIntStr(cmdargs[0]):
self.selfMsg("usage: buy <item> [#]\n")
return False
catList = roomObj.getCatalog()
if int(cmdargs[0]) < 0 or int(cmdargs[0]) > (len(catList)) - 1:
self.selfMsg("Bad item number. Aborted\n")
return False
catItem = catList[int(cmdargs[0])]
oType, oNum = catItem.split("/")
itemObj = ObjectFactory(oType, oNum)
itemObj.load()
price = self.gameObj.calculateObjectPrice(charObj, itemObj)
# check if player has the funds
if not charObj.canAffordAmount(price):
self.selfMsg(roomObj.getCantAffordTxt())
return False
# check if player can carry the Weight
weight = itemObj.getWeight()
if not charObj.canCarryAdditionalWeight(weight):
self.selfMsg(roomObj.getCantCarryTxt(weight))
return False
# prompt player for confirmation
prompt = (
"You are about to spend "
+ str(price)
+ " shillings for "
+ itemObj.getArticle()
+ " "
+ itemObj.getName()
+ ". Proceed?"
)
successTxt = roomObj.getSuccessTxt()
abortTxt = roomObj.getAbortedTxt()
self.gameObj.buyTransaction(
charObj, itemObj, price, prompt, successTxt, abortTxt
)
def do_cast(self, line):
""" magic """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if len(cmdargs) < 1:
self.selfMsg("Cast what spell?\n")
spellName = cmdargs[0]
line = line.lstrip(spellName)
if spellName not in SpellList:
self.selfMsg("That's not a valid spell.\n")
return False
if not charObj.knowsSpell(spellName):
self.selfMsg("You haven't learned that spell.\n")
return False
if len(cmdargs) > 1:
possibleTargets = charObj.getInventory() + roomObj.getCharsAndInventory()
targetList = self.getObjFromCmd(possibleTargets, line)
if targetList[0]:
targetObj = targetList[0]
else:
self.selfMsg("Could not determine target for spell.\n")
return False
else:
targetObj = self.charObj
spellObj = Spell(charObj, targetObj, spellName)
# Apply effects of spell
spellObj.cast(roomObj)
def do_catalog(self, line):
""" info - get the catalog of items from a vendor """
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a vendor\n")
return False
if not roomObj.isVendor():
self.selfMsg("You can't do that here. Find a vendor\n")
return False
# display # list by iterating, loading, & displaying objs
itemBuf = ""
for num, oneitem in enumerate(roomObj.getCatalog()):
oType, oNum = oneitem.split("/")
itemObj = ObjectFactory(oType, oNum)
itemObj.load()
# calculate price
price = self.gameObj.calculateObjectPrice(charObj, itemObj)
ROW_FORMAT = " ({0:2}) {1:<7} {2:<60}\n"
itemBuf += ROW_FORMAT.format(num, price, itemObj.describe())
if itemBuf != "":
self.selfMsg(
"Catalog of items for sale\n"
+ ROW_FORMAT.format("#", "Price", "Description")
+ itemBuf
)
def do_circle(self, line):
""" combat - If creature is not attacking, then delay their first
attack by X seconds """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Circle what?\n")
return False
if target.isAttacking():
self.selfMsg("You can't circle an attacking creature\n")
return False
self.gameObj.circle(self.charObj, target, self.getLastCmd())
self.selfMsg("Ok.\n")
return False
def do_climb(self, line):
""" alias - go """
return self.do_go(line)
def do_clock(self, line):
""" info - time """
self.selfMsg(dateStr("now") + "\n")
def do_close(self, line):
""" close a door or container """
charObj = self.charObj
roomObj = charObj.getRoom()
itemList = self.getObjFromCmd(roomObj.getInventory(), line)
if not itemList[0]:
self.selfMsg("usage: close <item> [number]\n")
return False
targetObj = itemList[0]
if not targetObj.isClosable(charObj):
if targetObj.isClosed():
self.selfMsg("It's already closed.\n")
else:
self.selfMsg("You can not close that!\n")
return False
if targetObj.close(charObj):
self.selfMsg("Ok\n")
if targetObj.getType() == "Door":
self.gameObj.modifyCorrespondingDoor(targetObj, charObj)
return False
else:
self.selfMsg(
"You can not close " + targetObj.describe(article="the") + "\n"
)
return False
def do_d(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_debug(self, line):
""" dm - show raw debug info abot an item/room/character/etc """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if not charObj.isDm():
self.selfMsg("Unknown Command\n")
return False
if len(cmdargs) == 0:
self.selfMsg("usage: debug <room | self | object>")
return False
buf = ""
if cmdargs[0].lower() == "room":
buf += "=== Debug Info for Room " + str(roomObj.getId()) + " ===\n"
buf += roomObj.debug() + "\n"
elif cmdargs[0].lower() == "game":
buf += "=== Debug Info for game ===\n"
buf += self.gameObj.debug() + "\n"
elif cmdargs[0].lower() == "self":
buf += "=== Debug Info for Self " + str(charObj.getId()) + " ===\n"
buf += charObj.debug() + "\n"
else:
itemList = self.getObjFromCmd(
roomObj.getCharsAndInventory() + charObj.getInventory(), line
)
if itemList[0]:
buf += (
"=== Debug Info for Object " + str(itemList[0].getId()) + " ===\n"
)
buf += itemList[0].debug() + "\n"
self.selfMsg(buf)
return None
def do_deposit(self, line):
""" transaction - make a deposit in the bank """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a bank\n")
return False
if not roomObj.isBank():
self.selfMsg("You can't do that here. Find a bank\n")
return False
if len(cmdargs) < 1 or not isIntStr(cmdargs[0]):
self.selfMsg("usage: deposit <amount>\n")
return False
# check if player has the funds
amount = int(cmdargs[0])
if not charObj.canAffordAmount(amount):
self.selfMsg(roomObj.getCantAffordTxt(amount))
return False
taxRate = roomObj.getTaxRate()
bankfee, dAmount = charObj.calculateBankFees(amount, taxRate)
prompt = (
"You are about to deposit " + str(amount) + " shillings into the bank.\n"
)
if taxRate != 0:
prompt += (
"The bank charges "
+ "a "
+ str(taxRate)
+ "% deposit fee which comes to a "
+ str(bankfee)
+ " shilling charge.\n"
+ "Your account will increase by "
+ str(dAmount)
+ " shillings.\n"
)
prompt += "Continue?"
if self.client.promptForYN(prompt):
charObj.bankDeposit(amount, taxRate)
roomObj.recordTransaction("deposit/" + str(dAmount))
roomObj.recordTransaction("fees/" + str(bankfee))
self.selfMsg(roomObj.getSuccessTxt())
return False
else:
self.selfMsg(roomObj.getAbortedTxt())
return False
def do_destroy(self, line):
""" dm - destroy an object or creature """
if not self.charObj.isDm():
self.selfMsg("Unknown Command\n")
return False
charObj = self.charObj
roomObj = charObj.getRoom()
roomObjList = self.getObjFromCmd(roomObj.getInventory(), line)
if roomObjList[0]:
roomObj.removeObject(roomObjList[0])
roomObj.save()
self.selfMsg("ok\n")
return False
charObjList = self.getObjFromCmd(charObj.getInventory(), line)
if charObjList[0]:
roomObj.removeFromInventory(charObjList[0])
self.selfMsg("ok\n")
return False
def do_dminfo(self, line):
""" dm - show char info that isn't directly avaliable to players """
if not self.charObj.isDm():
return False
self.selfMsg(self.charObj.dmInfo())
def do_dm_on(self, line):
""" admin - Turn DM mode on """
if self.acctObj.isAdmin():
self.charObj.setDm()
self.selfMsg("ok\n")
logger.info("{} just became a DM".format(self.charObj.getName()))
def do_dm_off(self, line):
""" dm - turn dm mode off """
if self.charObj.isDm():
self.charObj.removeDm()
self.selfMsg("ok\n")
else:
self.selfMsg("Unknown Command\n")
def do_down(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_draw(self, line):
""" alias - use """
return self.do_use(line)
def do_drink(self, line):
""" alias - use """
return self.do_use(line)
def do_drop(self, line):
""" drop an item """
charObj = self.charObj
roomObj = charObj.getRoom()
charObjList = charObj.getInventory()
targetList = self.getObjFromCmd(charObjList, line)
if not targetList[0]:
self.selfMsg("What are you trying to drop?\n")
return False
if charObj.removeFromInventory(targetList[0]):
charObj.unEquip(targetList[0])
roomObj.addObject(targetList[0])
self.selfMsg("Ok\n")
else:
self.selfMsg("Didn't work\n")
def do_e(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_east(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_echo(self, line):
self.selfMsg(line + " not implemented yet\n")
def do_enter(self, line):
""" alias - go """
if line == "":
return self.missingArgFailure()
self.move(line)
def do_equip(self, line):
""" alias - use """
return self.do_use(line)
def do_examine(self, line):
""" alias - look """
return self.do_look(line)
def do_exit(self, line):
""" exit game - returns True to exit command loop """
return True
def do_exp(self, line):
self.selfMsg(self.charObj.expInfo())
def do_experience(self, line):
""" info - show character's exp info """
self.selfMsg(self.charObj.expInfo())
def do_feint(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Feint at what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_file(self, line):
""" info - show characters attached to account """
self.selfMsg(self.acctObj.showCharacterList())
def do_follow(self, line):
""" follow another player - follower is moved when they move """
charObj = self.charObj
roomObj = charObj.getRoom()
charList = self.getObjFromCmd(roomObj.getCharacterList(), line)
targetCharObj = charList[0]
if not targetCharObj or not targetCharObj.getType() == "Character":
self.selfMsg("You can't follow that\n")
charObj.setFollow() # Unset follow attribute
return False
if targetCharObj is charObj:
self.selfMsg("You can't follow yourself\n")
charObj.setFollow() # Unset follow attribute
return False
charObj.setFollow(targetCharObj)
self.selfMsg("ok\n")
if not charObj.isHidden():
self.gameObj.charMsg(targetCharObj,
"{} follows you\n".format(charObj.getName()))
return(False)
def do_full(self, line):
""" set the prompt and room descriptions to maximum verbosity """
self.charObj.setPromptSize("full")
def do_get(self, line): # noqa: C901
""" pick up an item """
charObj = self.charObj
roomObj = charObj.getRoom()
itemList = self.getObjFromCmd(roomObj.getInventory(), line)
itemObj = itemList[0]
containerObj = itemList[1]
if not itemObj:
return self.missingArgFailure()
if itemObj.getType() == "Container":
if containerObj:
# Player is trying to put a container from the room into a
# container in the room. Let's just say no to that
self.selfMsg("You can't put a container in a container\n")
return False
else:
# The 1st item was not found, so the container is the 1st item
containerObj = itemObj
if containerObj:
if not containerObj.getType() == "Container":
self.selfMsg("That's not a container?\n")
return False
# Find target item in the container
cList = self.getObjFromCmd(containerObj.getInventory(), line)
itemObj = cList[0]
if not itemObj:
self.selfMsg("Put what in there?\n")
return False
if not itemObj.isCarryable():
self.selfMsg(itemObj.describe() + " can not be carried.\n")
return False
if not charObj.canCarryAdditionalWeight(itemObj.getWeight()):
self.selfMsg("You are not strong enough.\n")
return False
guardingCreatureObj = roomObj.getGuardingCreature()
if guardingCreatureObj:
self.selfMsg(
guardingCreatureObj.describe() + " blocks you from taking that.\n"
)
return False
if containerObj:
if containerObj.withdraw(charObj, itemObj):
self.selfMsg("ok\n")
else:
# Get item from room
roomObj.removeObject(itemObj)
if itemObj.getType() == "Coins":
charObj.addCoins(itemObj.getValue())
else:
charObj.addToInventory(itemObj)
self.selfMsg("Ok\n")
def do_go(self, line):
""" go through a door or portal """
if line == "":
self.selfMsg("Go where?\n")
self.move(line)
def do_goto(self, line):
""" dm - teleport directly to a room """
cmdargs = line.split(" ")
charObj = self.charObj
if not self.charObj.isDm():
self.selfMsg("Unknown Command\n")
return False
if len(cmdargs) == 0:
self.selfMsg("usage: goto <room>\n")
return False
self.gameObj.joinRoom(cmdargs[0], charObj)
self.selfMsg(charObj.getRoom().display(charObj))
def do_h(self, line):
""" alias - health """
charObj = self.charObj
self.selfMsg(charObj.healthInfo())
def do_hea(self, line):
""" alias - health """
charObj = self.charObj
self.selfMsg(charObj.healthInfo())
def do_health(self, line):
""" info - show character's health """
charObj = self.charObj
self.selfMsg(charObj.healthInfo())
def do_help(self, line):
""" info - enter the help system """
enterHelp(self.client)
def do_hide(self, line):
""" attempt to hide player or item
* hidden players aren't attacked by creatures and don't show
up in room listings unless they are searched for.
* hidden items don't show up in room listings. """
# cmdargs = line.split(' ')
charObj = self.charObj
if line == "":
canhide = True
# can't hide if there are engaged creatures in the room, even if
# they are attacking someone else.
for creatObj in charObj.getRoom().getCreatureList():
if creatObj.isAttacking():
canhide = False
if canhide:
charObj.attemptToHide()
msg = "You hide in the shadows"
else:
msg = "You are noticed as you hide in the shadows"
charObj.setHidden(False)
if charObj.isDm():
msg += "(" + str(charObj.isHidden()) + ")"
self.selfMsg(msg + "\n")
else:
self.selfMsg(line + " not implemented yet\n")
def do_hint(self, line):
self.selfMsg(line + " not implemented yet\n")
def do_hit(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Hit what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_hold(self, line):
""" alias - use """
return self.do_use(line)
def do_identify(self, line):
""" info - Show detailed information about a item or character
* this is considered a limited use spell """
self.selfMsg(line + " not implemented yet\n")
def do_info(self, line):
""" alias - information """
self.selfMsg(self.charObj.getInfo())
def do_information(self, line):
""" info - show all information about a character to that character """
self.selfMsg(self.charObj.getInfo())
def do_inv(self, line):
""" alias - inventory """
self.selfMsg(self.charObj.inventoryInfo())
def do_inventory(self, line):
""" info - show items that character is carrying """
self.selfMsg(self.charObj.inventoryInfo())
def do_kill(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Kill what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_laugh(self, line):
""" communication - reaction """
charObj = self.charObj
roomObj = charObj.getRoom()
extramsg = ""
if line != "":
extramsg = " " + line
self.gameObj.roomMsg(roomObj, charObj.getName() + " laughs" + extramsg + "\n")
charObj.setHidden(False)
def do_list(self, line):
""" alias - file """
return self.do_catalog(line)
def do_lock(self, line):
""" lock an object with a key """
charObj = self.charObj
roomObj = charObj.getRoom()
roomObjList = roomObj.getInventory()
fullObjList = charObj.getInventory() + roomObjList
itemList = self.getObjFromCmd(fullObjList, line)
itemObj = itemList[0]
keyObj = itemList[1]
if not itemList[0]:
return self.missingArgFailure()
if not keyObj:
self.selfMsg("You can't lock anything without a key\n")
return False
if not itemObj.isLockable():
if itemObj.isLocked():
self.selfMsg("It's already locked!\n")
elif itemObj.isOpen():
self.selfMsg("You can't lock it when it's open!\n")
else:
self.selfMsg("This is not lockable!\n")
return False
if keyObj.getLockId() != itemObj.getLockId():
self.selfMsg("The key doesn't fit the lock\n")
return False
itemObj.lock()
if itemObj.getType() == "Door":
self.gameObj.modifyCorrespondingDoor(itemObj, charObj)
self.selfMsg("Ok\n")
return False
def do_look(self, line):
""" examine a creature, object, or player
* includes items in both the room and in the character inventory
"""
roomObj = self.charObj.getRoom()
# Experimenting with sorting. Not sure if we want this, so we have a
# Flag for now
sortList = False
if sortList:
allItems = itemSort(roomObj.getCharsAndInventory()) + itemSort(
self.charObj.getInventory()
)
else:
allItems = roomObj.getCharsAndInventory() + self.charObj.getInventory()
itemList = self.getObjFromCmd(allItems, line)
if line == "": # display the room
msg = roomObj.display(self.charObj)
if not re.search("\n$", msg):
msg += "\n"
self.selfMsg(msg)
return False
if not itemList[0]:
self.selfMsg("You must be blind because you " + "don't see that here\n")
return False
msg = itemList[0].examine()
if not re.search("\n$", msg):
msg += "\n" # append newline if needed
self.selfMsg(msg) # display the object
return False
def do_lose(self, line):
""" attempt to ditch someone that is following you """
roomObj = self.charObj.getRoom()
charList = self.getObjFromCmd(roomObj.getCharacterList(), line)
targetCharObj = charList[0]
if not targetCharObj:
self.selfMsg("You can't lose that\n")
return False
# Need to determine if lose succeeds, based on odds
targetCharObj.setFollow(None)
self.selfMsg("ok\n")
# Notify target that they have been lost
self.gameObj.charMsg(targetCharObj,
"{} loses you".format(self.charObj.getName()))
return False
def do_lunge(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Lunge at what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_n(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_north(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_now(self, line):
""" alias - clock """
return self.do_clock()
def do_o(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_offer(self, line):
""" transaction - offer player money/items [in return for $/items] """
self.selfMsg(self.getLastCmd() + " not implemented yet\n")
def do_open(self, line):
""" Open a door or a chest """
charObj = self.charObj
roomObj = charObj.getRoom()
itemList = self.getObjFromCmd(roomObj.getInventory(), line)
if not itemList[0]:
return self.missingArgFailure()
itemObj = itemList[0]
if not itemObj.isOpenable(charObj):
if itemObj.isOpen():
self.selfMsg("It's already open.\n")
elif itemObj.isLocked():
self.selfMsg("You can't. It's locked.\n")
else:
self.selfMsg("You can't open that.\n")
return False
if itemObj.getType() == "Container":
if itemObj.hasToll():
toll = itemObj.getToll()
if charObj.canAffordAmount(toll):
charObj.subtractCoins(toll)
self.selfMsg("You paid a toll of {} coins.".format(toll))
else:
self.selfMsg(
"Opening this item requires more coins than you have\n"
)
return False
if itemObj.open(charObj):
self.selfMsg("You open it.\n")
self.othersMsg(
roomObj,
charObj.getName() + " opens the " + itemObj.getSingular() + "\n",
charObj.isHidden(),
)
if itemObj.getType() == "Door":
self.gameObj.modifyCorrespondingDoor(itemObj, charObj)
return False
else:
self.selfMsg("You fail to open the door.\n")
return False
def do_out(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_panic(self, line):
""" alias - run """
self.selfMsg(line + " not implemented yet\n")
def do_parley(self, line):
""" communication - talk to a npc """
charObj = self.charObj
roomObj = charObj.getRoom()
roomCreatureList = roomObj.getCreatureList()
itemList = self.getObjFromCmd(roomCreatureList, line)
if not itemList[0]:
self.selfMsg(self.getLastCmd() + " with whom?\n")
return False
creat1 = itemList[0]
msg = creat1.getParleyTxt() + "\n"
if creat1.getParleyAction().lower() == "teleport":
self.selfMsg(msg)
self.gameObj.joinRoom(creat1.getParleyTeleportRoomNum(), charObj)
elif creat1.getParleyAction().lower() == "sell":
saleItem = creat1.getParleySaleItem()
if saleItem:
price = int(saleItem.getValue() * 0.9)
prompt = (
msg
+ " Would you like to buy "
+ saleItem.describe()
+ " for "
+ price
+ "?"
)
successTxt = (
"It's all yours. Don't tell anyone " + "that you got it from me"
)
abortTxt = "Another time, perhaps."
self.gameObj.buyTransaction(
charObj, saleItem, price, prompt, successTxt, abortTxt
)
else:
self.selfMsg("I have nothing to sell.\n")
else:
self.selfMsg(msg)
charObj.setHidden(False)
def do_parry(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Parry at what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_pawn(self, line):
""" alias - sell """
return self.do_sell(list)
def do_picklock(self, line):
""" attempt to pick the lock on a door or container and open it """
charObj = self.charObj
roomObj = charObj.getRoom()
itemList = self.getObjFromCmd(roomObj.getInventory(), line)
if not itemList[0]:
self.selfMsg("pick which item with a lock?\n")
return False
itemObj = itemList[0]
if not itemObj.isPickable():
self.selfMsg("You can't pick that.\n")
return False
if itemObj.pick(charObj):
self.selfMsg("You pick the lock.\n")
self.othersMsg(
roomObj,
charObj.getName()
+ " picks the "
+ "lock on the "
+ itemObj.getSingular()
+ "\n",
charObj.isHidden(),
)
return False
else:
self.selfMsg("You fail to pick the lock.\n")
self.othersMsg(
roomObj,
charObj.getName()
+ " fails to pick the lock on the "
+ itemObj.getSingular()
+ "\n",
charObj.isHidden(),
)
return False
return False
def do_prompt(self, line):
""" set verbosity """
self.charObj.setPromptSize("")
def do_purse(self, line):
""" info - display money """
charObj = self.charObj
self.selfMsg(charObj.financialInfo())
def do_put(self, line):
""" place an item in a container """
charObj = self.charObj
roomObj = charObj.getRoom()
charObjList = charObj.getInventory()
roomObjList = roomObj.getInventory()
targetList = self.getObjFromCmd(charObjList + roomObjList, line)
if not targetList[0]:
return self.missingArgFailure()
itemObj = targetList[0]
containerObj = targetList[1]
if not itemObj:
self.selfMsg("What are you trying to put?\n")
return False
if not containerObj:
self.selfMsg("What are you trying to put where?\n")
return False
if containerObj.getType() != "Container":
self.selfMsg("You can't put anything in that!\n")
return False
if containerObj.deposit(charObj, itemObj):
charObj.unEquip(itemObj)
self.selfMsg("ok\n")
return False
self.selfMsg("Didn't work!\n")
return False
def do_quit(self, line):
""" quit the game """
return self.do_exit(line)
def do_read(self, line):
""" magic - read a scroll to use the spell """
if line == "":
return self.missingArgFailure()
self.useMagicItem(line)
return False
def do_reloadperm(self, line):
''' dm - reload permanents from disk (i.e. after modification) '''
roomObj = self.charObj.getRoom()
if not self.charObj.isDm():
self.selfMsg("Unknown Command\n")
return False
itemList = self.getObjFromCmd(roomObj.getInventory(), line)
if not itemList[0]:
self.selfMsg("usage: reloadperm <objectname>\n")
return False
roomObj.reloadPermanent(itemList[0].getId())
self.selfMsg("Ok\n")
return False
def do_remove(self, line):
""" unequip an item that you have equipped """
return self.do_unequip(line)
def do_repair(self, line):
""" transaction - repair character's item in a repair shop """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a wright\n")
return False
if not roomObj.isRepairShop():
self.selfMsg("You can't do that here. Find a wright\n")
return False
if len(cmdargs) < 1 or not isIntStr(cmdargs[0]):
self.selfMsg("usage: repair <item> [#]\n")
playerInventory = charObj.getInventory()
itemList = self.getObjFromCmd(playerInventory, line)
if not itemList[0]:
return self.missingArgFailure()
itemObj = itemList[0]
if not itemObj.canBeRepaired():
self.selfMsg("This can't be repaired\n")
return False
price = self.gameObj.calculateObjectPrice(charObj, itemObj) * 100
prompt = (
"You are about to repair "
+ itemObj.getArticle()
+ " "
+ itemObj.getName()
+ " for "
+ str(price)
+ " shillings. Proceed?"
)
if self.client.promptForYN(prompt):
itemObj.repair()
roomObj.recordTransaction(itemObj)
roomObj.recordTransaction("repair/" + str(price))
charObj.recordTax(roomObj.getTaxAmount(price))
self.selfMsg(roomObj.getSuccessTxt())
return False
else:
self.selfMsg(roomObj.getAbortedTxt())
return False
def do_return(self, line):
""" alias - unequip """
return self.do_unequip()
def do_roominfo(self, line):
"""' dm - show room info """
if not self.charObj.isDm():
self.selfMsg("Unknown Command\n")
return False
self.selfMsg(self.charObj.getRoom().getInfo())
def do_run(self, line):
""" drop weapon and escape room in random direction """
self.gameObj.run(self.charObj)
def do_s(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_save(self, line):
""" save character """
if self.client.charObj.save():
self.selfMsg("Saved\n")
else:
self.selfMsg("Could not save\n")
def do_say(self, line):
""" communication within room """
if line == "":
msg = self.client.promptForInput("Say what? ")
else:
msg = line
if msg != "":
fullmsg = self.charObj.getName() + " said, '" + msg + "'"
if self.gameObj.roomMsg(self.charObj.getRoom(), fullmsg + "\n"):
self.charObj.setHidden(False)
logger.info(fullmsg)
else:
self.selfMsg("Message not received\n")
def do_search(self, line):
""" attempt to find items, players, or creatures that are hidden """
charObj = self.charObj
roomObj = charObj.getRoom()
foundSomething = False
for obj in roomObj.getInventory():
if obj.isHidden():
if charObj.searchSucceeds(obj):
self.selfMsg("You find " + obj.describe() + "\n")
foundSomething = True
if not foundSomething:
self.selfMsg("Your search turns up nothing\n")
def do_sell(self, line):
""" transaction - Sell an item to a pawnshop """
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a buyer\n")
return False
if not roomObj.isPawnShop():
self.selfMsg("You can't do that here. Find a buyer.\n")
return False
itemList = self.getObjFromCmd(charObj.getInventory(), line)
if not itemList[0]:
return self.missingArgFailure()
itemObj = itemList[0]
price = int(self.gameObj.calculateObjectPrice(charObj, itemObj) * 0.8)
# prompt player for confirmation
prompt = (
"You are about to pawn "
+ itemObj.getArticle()
+ " "
+ itemObj.getName()
+ " for "
+ str(price)
+ " shillings. Proceed?"
)
self.gameObj.sellTransaction(
charObj,
itemObj,
price,
prompt,
roomObj.getSuccessTxt(),
roomObj.getAbortedTxt(),
)
def do_send(self, line):
""" communication - direct message to another player """
if line == "":
self.selfMsg("usage: send <playerName> [msg]\n")
return False
target, msg = self.parseIpc(line)
if msg != "":
fullmsg = self.charObj.getName() + " sent, '" + msg + "'"
if self.gameObj.directMsg(target, fullmsg + "\n"):
self.charObj.setHidden(False)
logger.info("To " + target.getName() + ", " + fullmsg)
else:
self.selfMsg("Message not received\n")
return False
def do_shout(self, line):
""" communication - alias for yell """
return self.do_yell(line)
def do_skills(self, line):
""" info - show character's skills """
self.selfMsg(self.charObj.SkillsInfo())
def do_slay(self, line):
""" dm - combat - do max damage to creature, effectively killing it """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Slay what?\n")
return False
if self.charObj.isDm():
atkcmd = "slay"
else:
atkcmd = "attack" # if your not a dm, this is a standard attack
self.gameObj.attackCreature(self.charObj, target, atkcmd)
return False
def do_smash(self, line):
""" attempt to open a door/chest with brute force """
charObj = self.charObj
roomObj = charObj.getRoom()
itemList = self.getObjFromCmd(roomObj.getInventory(), line)
if not itemList[0]:
return self.missingArgFailure()
itemObj = itemList[0]
if not itemObj.isSmashable():
self.selfMsg("This is not smashable!\n")
return False
if itemObj.smash(charObj):
self.othersMsg(
roomObj,
charObj.getName()
+ " smashes the "
+ itemObj.getSingular()
+ " open.\n",
)
self.selfMsg("You smash it open!\n")
otherRoom = self.gameObj.getCorrespondingRoomObj(itemObj)
if otherRoom:
self.gameObj.roomMsg(
otherRoom, itemObj.getSingular() + " smashes open\n"
)
if itemObj.getType() == "Door":
self.gameObj.modifyCorrespondingDoor(itemObj, charObj)
return False
else:
self.othersMsg(
roomObj,
charObj.getName()
+ " fails to smash "
+ itemObj.describe()
+ " open.\n",
)
self.selfMsg("Bang! You fail to smash it open!\n")
otherRoom = self.gameObj.getCorrespondingRoomObj(itemObj)
if otherRoom:
self.gameObj.roomMsg(
otherRoom,
"You hear a noise on the "
+ "other side of the "
+ itemObj.getSingular()
+ "\n",
)
return False
def do_south(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_stats(self, line):
""" info - show character's stats """
self.selfMsg(self.charObj.StatsInfo())
def do_status(self, line):
""" alias - health """
return self.do_health()
def do_steal(self, line):
""" transaction - attempt to steal from another player """
self.selfMsg(line + " not implemented yet\n")
def do_stopasync(self, line):
""" dm - stop async thread (which should trigger an automatic restart) """
if not self.charObj.isDm():
self.selfMsg("Unknown Command\n")
return False
self.gameObj._asyncThread.halt()
self.selfMsg("ok\n")
def do_strike(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Strike what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_study(self, line):
""" magic - study a scroll to learn the chant """
charObj = self.charObj
if line == "":
return self.missingArgFailure()
(spellItem, spellName, targetObj) = self.parseSpellArgs(line)
if not spellItem:
self.selfMsg("Study what?\n")
return False
if not spellItem.getType().lower() == "scroll":
self.selfMsg("You can't study that.\n")
return False
# Learn the spell and display the chant
msg = spellItem.study(charObj)
self.selfMsg(msg)
# Remove item from player's inventory
self.gameObj.removeFromPlayerInventory(charObj, spellItem, "disint")
return False
def do_suicide(self, line):
if not self.client.promptForYN(
"DANGER: This will permanently "
+ "delete your character."
+ " Are you sure?"
):
return False
charObj = self.charObj
charName = charObj.getName()
self.gameObj.leaveGame(self.client.charObj, saveChar=False)
msg = self.gameObj.txtBanner(
charName + " has shuffled off this mortal coil", bChar="="
)
charObj.delete()
charObj = None
self.charObj = None
self.acctObj.removeCharacterFromAccount(charName)
self.gameObj.gameMsg(msg)
logger.info("Character deleted: " + charName)
return True
def do_take(self, line):
""" alias - get """
return self.do_get(line)
def do_talk(self, line):
""" alias - parley """
return self.do_parley(line)
def do_teach(self, line):
""" teach another player a spell """
self.selfMsg(line + " not implemented yet\n")
def do_toggle(self, line):
""" dm command to set flags """
if self.charObj.isDm():
if (
line.lower() == "character"
or line.lower() == "char"
or line.lower() == "self"
):
obj = self.charObj
elif line.lower() == "room":
obj = self.charObj.getRoom()
elif line.lower() == "game":
obj = self.gameObj
elif line.lower() == "gamecmd":
obj = self
elif line.lower() == "client":
obj = self.client
else:
roomObj = self.charObj.getRoom()
itemList = self.getObjFromCmd(roomObj.getCharsAndInventory(), line)
if itemList[0]:
obj = itemList[0]
else:
self.selfMsg("Can't toggle " + line + "\n")
self.selfMsg(
"Fixed toggles:\n" + " self, room, game, gamecmd, client\n"
)
return False
else:
self.selfMsg("Unknown Command\n")
return False
obj.toggleInstanceDebug()
self.selfMsg(
"Toggled " + line + ": debug=" + str(obj.getInstanceDebug()) + "\n"
)
return False
def do_thrust(self, line):
""" combat """
target = self.getCombatTarget(line)
if not target:
self.selfMsg("Thrust at what?\n")
return False
self.gameObj.attackCreature(self.charObj, target, self.getLastCmd())
return False
def do_track(self, line):
""" show direction last player traveled """
self.selfMsg(line + " not implemented yet\n")
def do_train(self, line):
""" increase level if exp and location allow """
charObj = self.charObj
roomObj = charObj.getRoom()
roomObj.train(charObj)
def do_turn(self, line):
""" magic - chance for clerics/paladins to destroy creatures """
self.selfMsg(line + " not implemented yet\n")
def do_u(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_unequip(self, line):
""" stop using a piece of equiptment """
charObj = self.charObj
targetList = self.getObjFromCmd(charObj.getInventory(), line)
if not targetList[0]:
return self.missingArgFailure()
elif len(targetList) > 0:
itemObj = targetList[0]
else:
itemObj = None
if charObj.unEquip(itemObj):
self.selfMsg("Ok\n")
else:
self.selfMsg("You can't do that\n")
def do_unfollow(self, line):
""" unfollow - stop following """
self.charObj.setFollow() # Unset follow attribute
self.selfMsg("ok\n")
return False
def do_unlock(self, line):
""" unlock a door/chest with a key """
charObj = self.charObj
roomObj = charObj.getRoom()
roomObjList = roomObj.getInventory()
fullObjList = charObj.getInventory() + roomObjList
itemList = self.getObjFromCmd(fullObjList, line)
if not itemList[0]:
return self.missingArgFailure()
itemObj = itemList[0]
keyObj = itemList[1]
if not keyObj:
self.selfMsg("You can't lock anything without a key\n")
return False
if not itemObj.isUnlockable():
if itemObj.isUnlocked():
self.selfMsg("It's already unlocked!\n")
elif itemObj.isOpen():
self.selfMsg("You can't unlock it when it's open!\n")
else:
self.selfMsg("This is not unlockable!\n")
return False
if keyObj.getLockId() != itemObj.getLockId():
self.selfMsg("The key doesn't fit the lock\n")
return False
if itemObj.unlock(keyObj):
if itemObj.getType() == "Door":
self.gameObj.modifyCorrespondingDoor(itemObj, charObj)
self.selfMsg("You unlock the lock.\n")
self.othersMsg(
roomObj,
charObj.getName()
+ " unlocks the "
+ "lock on the "
+ itemObj.getSingular()
+ "\n",
charObj.isHidden(),
)
return False
else:
self.selfMsg("You fail to unlock the lock.\n")
self.othersMsg(
roomObj,
charObj.getName()
+ " fails to "
+ "unlock the lock on the "
+ itemObj.getSingular()
+ "\n",
charObj.isHidden(),
)
return False
return False
def do_up(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_use(self, line):
""" equip an item or use a scroll or magic item """
if line == "":
return self.missingArgFailure()
charObj = self.charObj
roomObj = charObj.getRoom()
objList = charObj.getInventory() + roomObj.getCharsAndInventory()
targetList = self.getObjFromCmd(objList, line)
itemObj = None
# Require at least one arg after command
for target in targetList:
if not target:
continue
if not target.isUsable():
continue
if not itemObj:
itemObj = target
if not itemObj:
return self.missingArgFailure()
type = itemObj.getType()
if type == "Character" or type == "Creature":
return self.missingArgFailure()
if isObjectFactoryType(type):
self.useObject(itemObj, line)
return False
logger.warn(
"game.do_use: Attempt to use: "
+ itemObj.describe()
+ " - with type "
+ type
)
if roomObj: # tmp - remove later if room object is not needed here
pass # but there may be spells/items that affect the room.
def do_w(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_wear(self, line):
""" alias - use """
return self.do_use(line)
def do_west(self, line):
""" navigation """
self.move(self._lastinput[0]) # pass first letter
def do_where(self, line):
""" alias - look """
return self.do_look(line)
def do_whisper(self, line):
""" communication - char to char, with chance of being overheard """
if line == "":
self.selfMsg("usage: whisper <playerName> [txt]\n")
return False
target, msg = self.parseIpc(line)
received = False
charName = self.charObj.getName()
for oneChar in self.charObj.getRoom().getCharacterList():
if target == oneChar: # if is recipient
oneChar.client.spoolOut(
charName + " whispers, '" + msg + "'\n" # notify
)
received = True
else:
if not oneChar.hearsWhispers():
continue
oneChar.client.spoolOut(
"You overhear " + charName + " whisper " + msg + "\n"
)
self.charObj.setHidden(False)
if received:
self.selfMsg("ok\n")
else:
self.selfMsg("Message not received\n")
return False
def do_who(self, line):
""" info - show who is playing the game """
charTxt = ""
charObj = self.charObj
charFormat = " {:20} - {:16} - {:20}\n"
header = " Characters currently playing:\n"
header += charFormat.format("Character Name", "Login Date", "Account")
header += charFormat.format("-" * 20, "-" * 16, "-" * 20)
for onechar in charObj.getRoom().getCharacterList():
charTxt += charFormat.format(onechar.getName(),
dateStr(onechar.getLastLoginDate()),
charObj.client.acctObj.getDisplayName())
self.selfMsg(header + charTxt)
return None
def do_wield(self, line):
""" alias - use """
return self.do_use(line)
def do_withdraw(self, line):
""" transaction - take money out of the bank """
cmdargs = line.split(" ")
charObj = self.charObj
roomObj = charObj.getRoom()
if not roomObj.getType() == "Shop":
self.selfMsg("You can't do that here. Find a bank\n")
return False
if not roomObj.isBank():
self.selfMsg("You can't do that here. Find a bank\n")
return False
if len(cmdargs) < 1 or not isIntStr(cmdargs[0]):
self.selfMsg("usage: withdraw <amount>\n")
return False
amount = int(cmdargs[0])
if not charObj.canWithdraw(amount):
self.selfMsg(roomObj.getCantAffordTxt(amount))
return False
taxRate = roomObj.getTaxRate()
bankfee, wAmount = charObj.calculateBankFees(amount, taxRate)
prompt = (
"You are about to withdraw " + str(amount) + " shillings from the bank.\n"
)
if taxRate != 0:
prompt += (
"The bank charges a "
+ str(taxRate)
+ "% withdrawl fee which comes to a charge of "
+ str(bankfee)
+ "shillings.\n"
+ "As a result, you will receive "
+ str(wAmount)
+ " shillings.\n"
)
prompt += "Continue?"
if self.client.promptForYN(prompt):
charObj.bankWithdraw(amount, taxRate)
roomObj.recordTransaction("withdrawl/" + str(wAmount))
roomObj.recordTransaction("fees/" + str(bankfee))
self.selfMsg(roomObj.getSuccessTxt())
return False
else:
self.selfMsg(roomObj.getAbortedTxt())
return False
def do_yell(self, line):
""" communication - all in room and adjoining rooms """
if line == "":
msg = self.client.promptForInput(self.getLastCmd() + " what? ")
else:
msg = line
if msg != "":
fullmsg = self.charObj.getName() + " yelled, '" + msg + "'"
if self.gameObj.yellMsg(self.charObj.getRoom(), fullmsg + "\n"):
logger.info(fullmsg)
self.charObj.setHidden(False)
else:
self.selfMsg("Message not received\n")
# instanciate the _Game class
_game = _Game()
def Game():
""" return a reference to the single, existing _game instance
Thus, when we try to instanciate Game, we are just returning
a ref to the existing Game """
return _game
|
StarcoderdataPython
|
88627
|
>>> print ( '\n'.join(''.join(x) for x in zip('abc', 'ABC', '123')) )
aA1
bB2
cC3
>>>
|
StarcoderdataPython
|
4971428
|
<filename>python/reactive_planners/dcm_reactive_stepper.py
#!/usr/bin/env python
""" @namespace Controller using the dcm_vrp_planner.
@file
@copyright Copyright (c) 2017-2019,
New York University and Max Planck Gesellschaft,
License BSD-3-Clause
"""
import numpy as np
from reactive_planners_cpp import (
StepperHead,
DcmVrpPlanner,
EndEffectorTrajectory3D,
)
class DcmReactiveStepper(object):
def __init__(
self,
is_left_leg_in_contact,
l_min,
l_max,
w_min,
w_max,
t_min,
t_max,
l_p,
com_height,
weight,
mid_air_foot_height,
control_period,
previous_support_foot=None,
current_support_foot=None,
):
if previous_support_foot is None:
previous_support_foot = np.zeros((3, 1))
previous_support_foot[:] = [[0.0], [0.0], [0.0]]
if current_support_foot is None:
current_support_foot = np.zeros((3, 1))
current_support_foot[:] = [[0.0], [0.0], [0.0]]
self.control_period = control_period
# Create the stepper head.
self.stepper_head = StepperHead()
self.previous_support_foot = np.zeros((3, 1))
self.current_support_foot = np.zeros((3, 1))
self.previous_support_foot[:] = previous_support_foot
self.current_support_foot[:] = current_support_foot
self.stepper_head.set_support_feet_pos(
self.previous_support_foot, self.current_support_foot
)
# Create the dcm vrp planner and initialize it.
self.dcm_vrp_planner = DcmVrpPlanner()
self.dcm_vrp_planner.initialize(
l_min, l_max, w_min, w_max, t_min, t_max, l_p, com_height, weight
)
# Create the end-effector trajecotry generator.
self.end_eff_traj3d = EndEffectorTrajectory3D()
self.end_eff_traj3d.set_mid_air_height(mid_air_foot_height)
# Parameters
self.is_left_leg_in_contact = is_left_leg_in_contact
self.duration_before_step_landing = 0.0
self.time_from_last_step_touchdown = 0.0
self.des_com_vel = np.zeros((3, 1))
self.right_foot_position = np.zeros((3, 1))
self.right_foot_position[:] = previous_support_foot
self.right_foot_velocity = np.zeros((3, 1))
self.right_foot_acceleration = np.zeros((3, 1))
self.flying_foot_position = np.zeros((3, 1))
self.flying_foot_position[:] = previous_support_foot
self.left_foot_position = np.zeros((3, 1))
self.left_foot_position[:] = current_support_foot
self.left_foot_velocity = np.zeros((3, 1))
self.left_foot_acceleration = np.zeros((3, 1))
self.feasible_velocity = np.zeros((3, 1))
def set_end_eff_traj_costs(
self, cost_x, cost_y, cost_z, hess_regularization
):
self.end_eff_traj3d.set_costs(
cost_x, cost_y, cost_z, hess_regularization
)
def set_des_com_vel(self, des_com_vel):
self.des_com_vel = des_com_vel
def run(
self,
time,
current_flying_foot_position,
current_support_foot_position,
com_position,
com_velocity,
base_yaw,
contact,
):
if current_support_foot_position is not None:
self.stepper_head.set_support_feet_pos(
self.stepper_head.get_previous_support_location(),
current_support_foot_position,
)
if not contact[0] and not contact[1]:
self.stepper_head.run(
self.duration_before_step_landing,
current_flying_foot_position,
time,
)
elif self.is_left_leg_in_contact:
self.stepper_head.run(
self.duration_before_step_landing,
current_flying_foot_position,
time,
contact[1],
)
else:
self.stepper_head.run(
self.duration_before_step_landing,
current_flying_foot_position,
time,
contact[0],
)
self.time_from_last_step_touchdown = (
self.stepper_head.get_time_from_last_step_touchdown()
)
self.current_support_foot[
:
] = self.stepper_head.get_current_support_location().reshape((3, 1))
self.previous_support_foot[
:
] = self.stepper_head.get_previous_support_location().reshape((3, 1))
self.dcm_vrp_planner.update(
self.stepper_head.get_current_support_location(),
self.stepper_head.get_time_from_last_step_touchdown(),
self.stepper_head.get_is_left_leg_in_contact(),
self.des_com_vel,
com_position,
com_velocity,
base_yaw,
)
assert self.dcm_vrp_planner.solve()
self.duration_before_step_landing = (
self.dcm_vrp_planner.get_duration_before_step_landing()
)
start_time = 0.0
current_time = self.stepper_head.get_time_from_last_step_touchdown()
end_time = self.dcm_vrp_planner.get_duration_before_step_landing()
self.is_left_leg_in_contact = (
self.stepper_head.get_is_left_leg_in_contact()
)
# check which foot is in contact
if self.stepper_head.get_is_left_leg_in_contact():
# flying foot is the right foot
self.end_eff_traj3d.compute(
self.stepper_head.get_previous_support_location(),
self.right_foot_position,
self.right_foot_velocity,
self.right_foot_acceleration,
self.dcm_vrp_planner.get_next_step_location(),
start_time,
current_time,
end_time,
)
self.end_eff_traj3d.get_next_state(
current_time + self.control_period,
self.right_foot_position,
self.right_foot_velocity,
self.right_foot_acceleration,
)
self.flying_foot_position = self.right_foot_position
# The current support foot does not move
self.left_foot_position[:] = (
self.stepper_head.get_current_support_location()
).reshape((3, 1))
self.left_foot_velocity = np.zeros((3, 1))
self.left_foot_acceleration = np.zeros((3, 1))
else:
# flying foot is the left foot
self.end_eff_traj3d.compute(
self.stepper_head.get_previous_support_location(),
self.left_foot_position,
self.left_foot_velocity,
self.left_foot_acceleration,
self.dcm_vrp_planner.get_next_step_location(),
start_time,
current_time,
end_time,
)
self.end_eff_traj3d.get_next_state(
current_time + self.control_period,
self.left_foot_position,
self.left_foot_velocity,
self.left_foot_acceleration,
)
self.flying_foot_position = self.left_foot_position
# The current support foot does not move
self.right_foot_position[:] = (
self.stepper_head.get_current_support_location()
).reshape((3, 1))
self.right_foot_velocity = np.zeros((3, 1))
self.right_foot_acceleration = np.zeros((3, 1))
# Compute the feasible velocity.
self.feasible_velocity = (
self.dcm_vrp_planner.get_next_step_location()
- self.stepper_head.get_previous_support_location()
)
self.feasible_velocity[2] = 0.0
return
if __name__ == "__main__":
is_left_leg_in_contact = True
l_min = -0.5
l_max = 0.5
w_min = -0.5
w_max = 0.5
t_min = 0.1
t_max = 0.2
l_p = 0.1235 * 2
com_height = 0.26487417
weight = [1, 1, 5, 100, 100, 100, 100, 100, 100]
mid_air_foot_height = 0.05
control_period = 0.001
dcm_reactive_stepper = DcmReactiveStepper(
is_left_leg_in_contact,
l_min,
l_max,
w_min,
w_max,
t_min,
t_max,
l_p,
com_height,
weight,
mid_air_foot_height,
control_period,
)
time = 0.0
current_support_foot = np.array([0, 0, 0])
com_position = np.array([0, 0, com_height])
com_velocity = np.array([0, 0, 0])
base_yaw = 0.0
dcm_reactive_stepper.run(
time, current_support_foot, com_position, com_velocity, base_yaw
)
dcm_reactive_stepper.right_foot_position
dcm_reactive_stepper.right_foot_velocity
dcm_reactive_stepper.right_foot_acceleration
dcm_reactive_stepper.left_foot_position
dcm_reactive_stepper.left_foot_velocity
dcm_reactive_stepper.left_foot_acceleration
|
StarcoderdataPython
|
1713330
|
from django import forms
from .models import DailyNote, WeeklyNote
class DailyNoteEdit(forms.ModelForm):
class Meta:
model = DailyNote
exclude = [
'created_by',
'updated_by',
'valid_date'
]
class WeeklyNoteEdit(forms.ModelForm):
class Meta:
model = WeeklyNote
exclude = [
'created_by',
'updated_by',
'start_date'
]
|
StarcoderdataPython
|
226859
|
<gh_stars>0
import csv
import os
directory = "/Volumes/Disk2/Dropbox/HPC/results/Office/learn/"
all = []
for file in os.listdir(directory):
if file.endswith(".csv"):
with open(directory+file, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
count = 0
Learn_state1 = -1
Learn_previous_state1 = -1
Learn_state0 = -1
Learn_action0 = -1
Learn_reward1 = -1
Learn_reward0 = -1
Learn_previous_state0 = -1
Learn_action1 = -1
for row in spamreader:
if count == 0:
count = count + 1
for x,y in enumerate(row):
if y == "Learn_state1":
Learn_state1 = x
if y == "Learn_previous_state1":
Learn_previous_state1 = x
if y == "Learn_state0":
Learn_state0 = x
if y == "Learn_action0":
Learn_action0 = x
if y == "Learn_reward1":
Learn_reward1 = x
if y == "Learn_reward0":
Learn_reward0 = x
if y == "Learn_previous_state0":
Learn_previous_state0 = x
if y == "Learn_action1":
Learn_action1 = x
continue
r1 = row[Learn_reward0]
r2 = row[Learn_reward1]
if r1 == '1.9735e-316':
r1 = '0'
if r2 == '1.9735e-316':
r2 = '0'
if not r1 == "":
a1 = [row[Learn_previous_state0],row[Learn_state0],row[Learn_action0],r1]
all.append(a1)
if not r2 == "":
a2 = [row[Learn_previous_state1],row[Learn_state1],row[Learn_action1],r2]
all.append(a2)
#print "action: " + str(row[2]) + " reward: " + str(r1)
with open('test.csv', 'w') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(all)
|
StarcoderdataPython
|
5123408
|
from pygments.style import Style
from pygments.styles.default import DefaultStyle
from pygments.token import Error
class SimpleStyle(Style):
styles = dict(DefaultStyle.styles.items())
SimpleStyle.styles[Error] = "border:"
|
StarcoderdataPython
|
1787427
|
influencer = {
'mahathir': [
'tun mahathir',
'madey',
'dr mahathir',
'tun m',
'mahathir',
'madir',
'dr m',
'mahathir muhamad',
],
'anwar ibrahim': ['anwar ibrahim', 'anwar'],
'najib razak': [
'najib razak',
'ajib',
'pakjib',
'pok jib',
'pak jib',
'najib',
'tan sri najib',
],
'pakatan harapan': ['harapan', 'pakatan harapan', 'pakatan'],
'syed saddiq': ['syed saddiq', 'syedsaddiq', 'syed', 'saddiq'],
'parti keadilan rakyat': [
'parti keadilan rakyat',
'people justice party',
'pkr',
],
'umno': ['united malays', 'umno', 'united malays national organization'],
'barisan nasional': ['bn', 'barisan nasional'],
'parti islam semalaysia': [
'parti islam',
'malaysian islamic party',
'parti islam semalaysia',
'parti islam malaysia',
],
'nurul izzah': ['nurul izzah', 'izzah'],
'Tunku Ismail Idris': ['tunku ismail', 'tunku ismail idris', 'tmj'],
'mca': ['malaysian chinese association', 'mca'],
'democratic action party': ['democratic action party'],
'parti amanah': [
'national trust party',
'amanah',
'parti amanah',
'parti amanah negara',
],
'ppbm': [
'parti pribumi bersatu malaysia',
'ppbm',
'malaysian united',
'parti pribumi',
'malaysian united indigenous party',
],
'mic': [
'malaysian indian',
'malaysian indian congress',
'indian congress',
'mic',
],
'Tun Daim Zainuddin': [
'daim zainuddin',
'daim',
'tun daim',
'tun daim zainuddin',
],
'datuk seri abdul hadi awang': [
'hadi',
'datuk seri abdul hadi awang',
'hadi awang',
],
'majlis pakatan harapan': ['majlis pakatan harapan'],
'wan azizah': ['kak wan', 'azizah', 'wan azizah'],
'Parti Pribumi Bersatu Malaysia': [
'parti pribumi bersatu malaysia',
'ppbm',
'parti bersatu',
'parti pribumi',
],
'Datuk Seri Azmin Ali': ['azmin ali', 'datuk seri azmin ali', 'azmin'],
'Datuk Johari Abdul': ['johari', 'johari abdul', 'datuk johari abdul'],
'Tengku <NAME>': ['tengku razaleigh hamzah', 'razaleigh'],
'Tan Sri Dr Rais Yatim': ['rais yatim', 'tan sri dr rais yatim'],
'rafizi ramli': ['rafizi', 'rafizi ramli'],
'bersatu': ['bersatu'],
'bernama': ['bernama'],
'donald trump': ['trump', 'donald', 'donald trump'],
'perkasa': ['pertubuhan pribumi perkasa', 'pertubuhan pribumi', 'perkasa'],
'Tan Sri Mokhzani Mahathir': [
'tan sri mokhzani mahathir',
'mokhzani',
'mokhzani mahathir',
],
'Rais Yatim': ['rais yatim', 'rais'],
'<NAME>': ['<NAME>', 'loke siew'],
'<NAME>': ['rosmah', '<NAME>ur'],
'arul kanda': ['arul kanda', 'kanda'],
'manchester united': ['manchester', 'manchester united'],
'arsenal fc': ['arsenal fc', 'arsenal'],
'liverpool fc': ['liverpool fc', 'liverpool'],
'chelsea fc': ['chelsea', 'chelsea fc'],
'manchester city': ['manchester city'],
'fc barcelona': ['barcelona', 'fc barcelona'],
'real madrid cf': ['madrid', 'real madrid', 'real madrid cf'],
'fc bayern munich': ['bayern munich', 'fc bayern munich'],
'juventus fc': ['juventus fc', 'juventus'],
'zeti aziz': ['zeti aziz'],
'<NAME>': ['<NAME>'],
'<NAME>': ['<NAME>'],
'ks jomo': ['ks jomo'],
'jho low': ['jho low'],
'kadir jasin': ['kadir jasin'],
'zakir naik': ['zakir naik'],
'bung mokhtar': ['bung mokhtar'],
'shafie apdal': ['shafie apdal'],
'ariff md yusof': ['ariff yusof', 'datuk ariff md yusof', 'ariff md yusof'],
'felda': ['felda'],
'dato vida': ['vida', 'dato vida'],
'Jabatan Perancangan Bandar dan Desa': [
'jabatan perancangan bandar dan desa'
],
}
|
StarcoderdataPython
|
369956
|
from django.contrib.auth.models import Group
from django.test import TestCase
from apps.bot.classes.Command import Command
from apps.bot.classes.bots.tg.TgBot import TgBot
from apps.bot.classes.consts.Consts import Platform
from apps.bot.classes.consts.Exceptions import PWarning, PError
from apps.bot.classes.events.TgEvent import TgEvent
from apps.bot.models import User, Profile, Chat
from apps.service.management.commands.initial import Command as InitCommand
from apps.service.models import City, TimeZone
class BotInitializer(TestCase):
Command = Command
@classmethod
def setUpTestData(cls):
initial_command = InitCommand()
initial_command.init_groups()
tz, _ = TimeZone.objects.get_or_create(name='Europe/Samara')
city = {
'name': 'Самара',
'synonyms': 'самара смр',
'lat': 53.195538,
'lon': 50.101783,
'timezone': tz
}
city, _ = City.objects.update_or_create(name=city['name'], defaults=city)
all_groups = Group.objects.exclude(name="BANNED")
# Первый акк админа
profile = Profile.objects.create(
name="Вася",
surname="Пупкин",
nickname_real="Васёк",
gender='2',
city=city,
)
profile.groups.set(all_groups)
chat = Chat.objects.create(
name="<NAME>",
chat_id=2,
admin=profile
)
profile.chats.add(chat)
User.objects.create(
user_id=1,
profile=profile,
platform=Platform.TG.name
)
# Второй обычный юзер
profile2 = Profile.objects.create(
name="Иван",
surname="Иванов",
nickname_real="Ванёк",
gender='2',
city=city,
)
group_user = Group.objects.get(name="USER")
profile2.groups.add(group_user)
profile2.chats.add(chat)
User.objects.create(
user_id=2,
profile=profile2,
platform=Platform.TG.name
)
def setUp(self):
self.bot = TgBot()
self.event = TgEvent(bot=self.bot)
self.cmd = self.Command(self.bot, self.event)
self.setup_event()
# def tearDown(self):
# self.event.message = None
def setup_event(self):
self.event.is_from_user = True
self.event.user = User.objects.get(user_id=1)
self.event.sender = self.event.user.profile
self.event.set_message(self.cmd.name)
def check_correct_answer(self):
return self.cmd.check_and_start(self.bot, self.event)
def check_correct_pwarning(self):
try:
self.cmd.check_and_start(self.bot, self.event)
except PWarning:
return
self.fail("Команда вернула не PWarning")
def check_correct_perror(self):
try:
self.cmd.check_and_start(self.bot, self.event)
except PError:
return
self.fail("Команда вернула не PError")
|
StarcoderdataPython
|
4864729
|
# -*- coding: utf-8 -*-
from yapsy.IPlugin import IPlugin
class Wklej(IPlugin):
def execute(self, channel, username, command):
if not command:
yield channel, ("Nie bądź noobem i wklej na: "
"https://gist.github.com/")
|
StarcoderdataPython
|
3442133
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
"""Command Line Interface (CLI) for the geomodels package."""
import sys
from . import cli
sys.exit(cli.main())
|
StarcoderdataPython
|
8106353
|
<reponame>COLAB2/midca
from datetime import datetime
import os, sys, copy
import platform, string
class Logger:
logFolderOptions = ["log", "_log"]
def __init__(self, keys = [], filesStayOpen = False, verbose=2):
'''
creates a new logger for a MIDCA run. The folder where the individual log files will be stored will be named based on the current date/time. It will be placed in ./log/, which will be created if it does not exist.
Keys are both the filenames of actual log files and keys that will be passed to the logger to tell it where to log things. If no keys are passed in the default key will be "log"
'''
self.keys = keys
self.filesStayOpen = filesStayOpen
self.verbose = verbose
self.files = None
def start(self):
'''
Creates the folder where log files will be stored and creates file(s)
'''
this_os = platform.platform()
self.events = []
self.defaultKey = "log"
self.working = False
self.startTime = datetime.now()
self.timeStr = str(self.startTime)
if self.startTime.microsecond > 0:
self.timeStr = self.timeStr[:-7].replace(":", "_")
if "window" in this_os.lower():
self.timeStr = string.replace(self.timeStr, ':', '_').replace("-", "_")
#create log dir if it does not exist. If there is a file at ./log, try ./_log. If neither works, fail and print an error message.
folderFound = False
self.cwd = os.getcwd()
for logFolder in self.logFolderOptions:
if folderFound:
break
self.logDir = os.path.join(self.cwd, logFolder)
try:
if os.path.isdir(self.logDir):
folderFound = True
else:
if os.path.exists(self.logDir):
if self.verbose > 0: print("Logger: file exists at " + self.logDir + ". Trying next option.", file = sys.stderr)
else:
os.mkdir(self.logDir)
folderFound = True
except OSError as e:
if self.verbose > 0: print("Logger: error creating/accessing log directory: " + str(e), file = sys.stderr)
if not folderFound:
if self.verbose > 0: print("Logger: unable to create or find a log directory at any of: " + str([os.path.join(self.cwd, option) for option in self.logFolderOptions]) + ". Logging will be disabled.", file = sys.stderr)
else:
#now create the directory for this run
self.thisRunDir = os.path.join(self.logDir, self.timeStr)
try:
os.mkdir(self.thisRunDir)
except OSError as e:
if self.verbose > 0: print("Logger: error creating log directory: " + str(e), file = sys.stderr)
#now create the individual log file(s)
self.files = {key: None for key in self.keys}
self.working = True
for key in self.keys:
try:
f = self.openFile(key)
f.write("Log file for run starting at " + self.timeStr + "\n")
except IOError as e:
self.writeError(e, filename = os.path.join(self.thisRunDir, key), txt = "Log file for run starting at " + self.timeStr)
self.working = False
if not self.working:
if self.verbose > 0: print ("Logger disabled")
else:
if self.verbose > 0: print("Logger: logging this run in " + self.thisRunDir, file = sys.stderr)
if not self.filesStayOpen:
for file in list(self.files.values()):
file.close()
def openFile(self, key):
f = open(os.path.join(self.thisRunDir, key), 'a')
self.files[key] = f
return f
def _user_log(self, txt, keys = []):
event = UserLogEvent(txt, keys)
self.logEvent(event)
def logEvent(self, event):
if not self.working:
return
event.time = datetime.now()
if event.loggable:
if not hasattr(event, 'keys') or not event.keys:
keys = [self.defaultKey]
elif event.keys == 'all':
if self.files:
keys = list(self.files.keys())
else:
keys = [self.defaultKey]
else:
keys = event.keys
deltaTStr = str(event.time - self.startTime)
deltaTStr = deltaTStr.lstrip(":0") + " - "
for key in keys:
self._write(deltaTStr + str(event), key)
self.events.append(event)
def log(self, val, keys = []):
if isinstance(val, str):
self._user_log(val, keys)
elif isinstance(val, Event):
self.logEvent(val)
else:
raise ValueError("log must get a string or an Event object")
def _write(self, txt, key):
if not self.working:
return
if key not in self.files or not self.files[key] or self.files[key].closed:
try:
self.openFile(key)
except IOError as e:
self.writeError(e, filename = os.path.join(self.thisRunDir, key), txt = txt)
return
f = self.files[key]
if f:
f.write(txt + "\n")
if not self.filesStayOpen:
f.close()
def writeError(self, e, filename = "", txt = ""):
if self.verbose > 0: print("Logger: trying to write " + txt + " to file " + filename + "; got error " + str(e), file = sys.stderr)
def close(self):
for f in list(self.files.values()):
f.close()
def logOutput(self):
StdoutDirector(self)
class StdoutDirector:
def __init__(self, logger):
self.logger = logger
self.stdout = sys.stdout
sys.stdout = self
self.current = ""
#removes some color codes that mess up logging output
def fixForMidca(self, s):
return s.replace("[94m", "").replace("[0m ", "")
def write(self,s):
self.current += self.fixForMidca(s)
if self.current.endswith("\n"):
if len(self.current) > 1:
event = MidcaOutputEvent(self.current, ["log", "MIDCA output"])
self.logger.log(event)
self.current = ""
self.stdout.write(s)
self.stdout.flush()
def flush(self):
self.stdout.flush()
class Event:
def __init__(self, loggable = True, keys = []):
self.loggable = loggable
self.keys = keys
def __str__(self):
raise NotImplementedError("Event subclasses must implement __str__ or set loggable to False")
class MidcaOutputEvent(Event):
def __init__(self, txt, keys = []):
self.txt = txt
self.loggable = True
self.keys = keys
def __str__(self):
return self.txt
class UserLogEvent(Event):
def __init__(self, txt, keys = []):
self.txt = txt
self.loggable = True
self.keys = keys
def __str__(self):
return self.txt
class CycleStartEvent(Event):
def __init__(self, cycle):
self.cycle = cycle
self.keys = 'all'
self.loggable = True
def __str__(self):
s = "Starting cycle " + str(self.cycle) + "\n"
return s
class CycleEndEvent(Event):
def __init__(self, cycle):
self.cycle = cycle
self.keys = []
self.loggable = False
class PhaseStartEvent(Event):
def __init__(self, phase):
self.module = phase
self.keys = "all"
self.loggable = True
def __str__(self):
return "****** Starting " + str(self.module) + " Phase ******\n"
class PhaseEndEvent(Event):
def __init__(self, phase):
self.module = phase
self.keys = []
self.loggable = False
class ModuleStartEvent(Event):
def __init__(self, module):
self.module = module
self.keys = ['log']
self.loggable = True
def __str__(self):
s = "Running module " + str(self.module) + "\n"
if "instance" in s:
return s[:s.rindex("instance") - 1]
else:
return s
class ModuleEndEvent(Event):
def __init__(self, module):
self.module = module
self.keys = []
self.loggable = False
def test():
l = Logger(["f1", "f2"])
l._write("hello!", "f1")
|
StarcoderdataPython
|
9641439
|
def format_int_kind(x):
return 'int({0})'.format(x)
with printoptions(formatter={'int_kind': format_int_kind}):
print(np.random.randint(-100, 100, 10))
|
StarcoderdataPython
|
1612644
|
from pysensationcore import *
import sensation_helpers as sh
import Scan
# Inner blocks
scan = createInstance("Scan", "scan")
comparator = createInstance("Comparator", "ComparatorInstance")
# Inner block connections
connect(Constant((0, 0, 0)), comparator.returnValueIfAGreaterThanB)
connect(Constant((1, 0, 0)), comparator.returnValueIfAEqualsB)
connect(Constant((1, 0, 0)), comparator.returnValueIfALessThanB)
handScan = sh.createSensationFromPath("Hand Scan",
{
("t", scan.t) : (0, 0, 0),
("duration", scan.duration) : (2, 0, 0),
("barLength", scan.barLength) : (0.1, 0, 0),
("virtualObjectXInVirtualSpace", scan.barDirection) : (1, 0, 0),
("wrist_position", scan.animationPathStart) : (0, 0.2, -0.06),
("middleFinger_distal_position", scan.animationPathEnd) : (0, 0.2, 0.06),
("t", comparator.a) : (0, 0, 0),
("duration", comparator.b) : (2, 0, 0)
},
output = scan.out,
intensity = comparator.out,
definedInVirtualSpace = True
)
setMetaData(handScan.virtualObjectXInVirtualSpace, "Input-Visibility", False)
setMetaData(handScan.wrist_position, "Input-Visibility", False)
setMetaData(handScan.middleFinger_distal_position, "Input-Visibility", False)
setMetaData(handScan.duration, "Type", "Scalar")
setMetaData(handScan.barLength, "Type", "Scalar")
setMetaData(handScan, "IsFinite", True)
|
StarcoderdataPython
|
9689744
|
#
# Visual Cryptography Cookbook
# for UVA GenCyber 2018
#
# <NAME>
# 14 June 2018
#
### python3 visualcrypto.py --seed 1629 --xsize 80 --image message.bmp
### convert message-share.svg message-share.pdf
### print scale to 64%
### This requires the svgwrite and PIL modules are installed.
### If they are not already installed, you should be able to
### install them by running:
### pip install svgwrite
### pip install pillow
import svgwrite # for generating graphics
from PIL import Image # for reading a bitmap image
# We'll use standard Python random, even though it is not cryptographically secure!
# If you were planning to use any of these ciphers to protect nuclear secrets (please
# don't!), you should replace this with a cryptographic random nubmer generator,
# like the one provided by PyCrypto. After version 3.6, Python will provide the
# secrets module, with a cryptographic random number generator.
import random
def random_sequence(n):
"""
Returns a random sequence of bits (0 or 1) of length n.
"""
return [random.choice([0, 1]) for i in range(n)]
def xor(a, b):
"""
Returns the exclusive or (XOR) of the two input bits.
"""
assert a in [0, 1]
assert b in [0, 1]
return (a + b) % 2
def otp(m, k):
"""
Encrypts m using key k as a one-time pad.
This simple returns the xor of each corresponding message and key bit.
"""
assert len(m) == len(k)
return [xor(mm, kk) for mm, kk in zip(m, k)]
def draw_block(svgdoc, xpos, ypos, blockx, blocky, color = "black"):
"""
Draws a block at position (xpos, ypos) that is filled with the solid color.
"""
svgdoc.add(svgdoc.rect(insert = (xpos * blockx, ypos * blocky),
size = (str(blockx) + "px",
str(blocky) + "px"),
stroke_width = "1",
stroke = "black",
fill = color))
def triangle_encoding(svgdoc, xpos, ypos, blockx, blocky, code):
"""
Fills the block at (xpos, ypos) with a triangle encoding according to the code.
code = 0 colors the traingle from the top-right to bottom-left corner;
code = 1 colors the triangle from the top-left to bottom-right corner.
"""
svgdoc.add(svgdoc.polygon(points=[(xpos * blockx, ypos * blocky),
(xpos * blockx + blockx, ypos * blocky + blocky),
(xpos * blockx, ypos * blocky + blocky) if code
else (xpos * blockx + blockx, ypos * blocky),
(xpos * blockx, ypos * blocky)],
fill='black'))
def draw_encoding(svgdoc, xpos, ypos, blockx, blocky, code):
if code:
svgdoc.add(svgdoc.rect(insert = (xpos * blockx,
ypos * blocky),
size = (str(blockx // 2) + "px",
str(blocky) + "px"),
stroke_width = "0",
stroke = "black",
fill = "rgb(0,0,0)"))
else:
svgdoc.add(svgdoc.rect(insert = (xpos * blockx + blockx // 2,
ypos * blocky),
size = (str(blockx // 2) + "px",
str(blocky) + "px"),
stroke_width = "0",
stroke = "black",
fill = "rgb(0,0,0)"))
def draw_matrix(svgdoc, m, width, height, plain=False, encoding=draw_encoding):
columns = len(m[0])
rows = len(m)
xblock = width // columns
yblock = xblock # keep the blocks square (don't use full height if necessary)
# print("xblock = " + str(xblock) + " columns = " + str(columns))
# print("size: " + str(xblock * columns) + " / " + str(width))
# print("size: " + str(yblock * rows) + " / " + str(height))
for rindex in range(rows):
for cindex in range(columns):
if plain:
draw_block(svgdoc, cindex, rindex, xblock, yblock,
color = "rgb(0,0,0)" if m[rindex][cindex] else "rgb(255,255,255)")
else:
encoding(svgdoc, cindex, rindex, xblock, yblock, m[rindex][cindex])
def draw_both(svgdoc, m1, m2, width, height, plain=False, encoding=draw_encoding):
columns = len(m1[0])
assert len(m2[0]) == columns
rows = len(m1)
assert len(m2) == rows
xblock = width // columns
yblock = xblock # ysize // yrange
for rindex in range(len(m1)):
for cindex in range(len(m1[rindex])):
encoding(svgdoc, cindex, rindex, xblock, yblock, m1[rindex][cindex])
encoding(svgdoc, cindex, rindex, xblock, yblock, m2[rindex][cindex])
##
## encodings
## img key 0 key 1
## A B A B
## 0 = 1 1 / 0 0
## 1 = 0 1 / 1 0
##
## 1 = 1/0
## 0 = 0/1
## A = img
## img = a xor b
## b = key xor img
ASPECT = (8.6 / 6.5)
xsize = 880
ysize = xsize * ASPECT
def root_name(fname):
ext = imgfile.find('.bmp')
assert ext > 1
ifname = imgfile[:ext]
ifdir = ifname.rfind('/')
if ifdir >= 0:
ifname = imgfile[ifdir + 1:]
ifname = outputdir + ifname
def generate_key(width, height):
key = random_sequence(width * height)
keymat = [[key[(r * width + c)] for c in range(width)] for r in range(height)]
return keymat
def generate_image(keymat, image, width, height, outputdir="./", colored=False):
# print("Image size: " + str(width) + ", " + str(height))
print ("Processing image: " + image.name + "...")
imgname = image.name
image = Image.open(imgname).convert('1')
iwidth, iheight = image.size
if iwidth > iheight:
print("Rotating image (" + str(iwidth) + ", " + str(iheight) + ")")
image = image.rotate(90, expand=True) # make landscape orientation
image = image.resize((width, height)) # , resample=0)
ext = imgname.find('.bmp')
assert ext > 1
ifname = imgname[:ext]
iwidth, iheight = image.size
assert iheight >= iwidth
# print ("iwidth, iheight: " + str(iwidth) + "/" + str(width) + ", " + str(iheight) + "/" + str(height))
assert iwidth <= width
assert iheight <= height
imgmat = [[0 if c < iwidth and r < iheight and image.getpixel((c, r)) > 128
else 1 for c in range(width)]
for r in range(height)]
bmat = [[xor(imgmat[r][c], keymat[r][c]) for c in range(width)] for r in range(height)]
svgimg = svgwrite.Drawing(filename = ifname + "-plain.svg",
size = (str(xsize) + "px", str(ysize) + "px"))
draw_matrix(svgimg, imgmat, xsize, ysize, plain=True, encoding=triangle_encoding)
svgimg.save()
svgb = svgwrite.Drawing(filename = ifname + "-share.svg",
size = (str(xsize) + "px", str(ysize) + "px"))
if colored:
svgb.add(svgb.rect(insert = (0, 0), size = (xsize, ysize), fill = 'rgb(200,200,255)'))
draw_matrix(svgb, bmat, xsize, ysize, encoding=triangle_encoding)
svgb.save()
svgimg = svgwrite.Drawing(filename = ifname + "-both.svg",
size = (str(xsize) + "px", str(ysize) + "px"))
draw_both(svgimg, keymat, bmat, xsize, ysize, plain=True, encoding=triangle_encoding)
svgimg.save()
if __name__ == "__main__":
from argparse import ArgumentParser
import math
parser = ArgumentParser()
parser.add_argument("-s", "--seed", dest = "seed",
type=int,
help="set seed (key, a number) for random number generator")
parser.add_argument("-g", "--size", dest = "size",
type=int, nargs = 2,
help = "set the image size (two numbers, horizontal, vertical")
parser.add_argument("-x", "--xsize", dest = "xsize",
type=int,
help = "set the image size (horizontal; vertical is scaled to page)")
parser.add_argument("-k", "--keyfile", dest = "keyfilename",
help="write key to output file")
parser.add_argument("-c", "--colored", dest = "colored",
help="use a background color",
type = bool,
default = False)
parser.add_argument("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_argument("-i", "--image",
type=open,
dest="image")
args = parser.parse_args()
if args.seed:
print ("Seed: " + str(args.seed))
seed = args.seed
else:
import time
seed = int(time.time())
print ("No seed, using: " +str(seed))
random.seed(int(seed))
if args.size:
width = args.size[0]
height = args.size[1]
print ("Size: " + str(width) + ", " + str(height))
assert not args.xsize
elif args.xsize:
width = args.xsize
height = math.floor(width * ASPECT)
print ("Size: " + str(width) + ", " + str(height))
else:
# read size from source image
if len(args.images) < 1:
print ("Error: must either provide a source image or specify size.")
else:
imgfile = args.images[0]
im = Image.open(imgfile).convert('1')
width, height = im.size
keymat = generate_key(width, height)
if args.keyfilename:
keyfile = args.keyfilename
print ("Writing key to: " + keyfile)
svgkey = svgwrite.Drawing(filename = keyfile,
size = (str(xsize) + "px", str(ysize) + "px"))
if args.colored:
svgkey.add(svgkey.rect(insert = (0, 0), size = (xsize, ysize), fill = "rgb(255,200,200)"))
draw_matrix(svgkey, keymat, xsize, ysize, encoding=triangle_encoding)
svgkey.save()
if args.image:
print ("Generating image: " + args.image.name)
#for image in args.images:
generate_image(keymat, args.image, width, height) # , colored)
|
StarcoderdataPython
|
3217571
|
import warnings
import argparse
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from dogsvscats.data import get_datasets
from dogsvscats.model import train_model, load_model, MODELS
from dogsvscats.callbacks import EarlyStopping
from dogsvscats import config
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser()
parser.add_argument(
"-m",
"--model",
default=config.MODEL_NAME,
choices=MODELS,
help="Model name",
type=str,
)
parser.add_argument(
"-cp",
"--checkpoint-path",
default=config.CHECKPOINT_PATH,
help="Checkpoint Path",
type=str,
)
parser.add_argument("-w", "--workers", default=config.NW, help="Workers", type=int)
parser.add_argument(
"-bs", "--batch-size", default=config.BS, help="Batch size", type=int
)
parser.add_argument(
"-lr", "--learning-rate", default=config.LR, help="Learning rate", type=float
)
parser.add_argument("-e", "--epochs", default=config.EPOCHS, help="Epochs", type=int)
parser.add_argument(
"-sp",
"--scheduler-patience",
default=config.SCHEDULER_PATIENCE,
help="Scheduler patience",
type=int,
)
parser.add_argument(
"-esp",
"--early-stopping-patience",
default=config.EARLYSTOPPING_PATIENCE,
help="Early stopping patience",
type=int,
)
parser.add_argument("-d", "--debug", default=False, help="Debug", action="store_true")
parser.add_argument(
"-df", "--debug-frac", default=0.05, help="Debug fraction", type=float
)
parser.add_argument(
"-vf",
"--valid-frac",
default=config.VALID_FRAC,
help="Validation fraction",
type=float,
)
args = parser.parse_args()
train_ds, valid_ds, _ = get_datasets(
valid_frac=args.valid_frac, debug=args.debug, debug_frac=args.debug_frac
)
train_dl = DataLoader(train_ds, args.batch_size, shuffle=True, num_workers=args.workers)
valid_dl = DataLoader(valid_ds, args.batch_size, shuffle=True, num_workers=args.workers)
model = load_model(args.model)
optimizer = optim.SGD(model.parameters(), args.learning_rate, momentum=0.9)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode="max", patience=args.scheduler_patience, verbose=True
)
es = EarlyStopping(
patience=args.early_stopping_patience,
mode="max",
verbose=True,
path=args.checkpoint_path,
)
model = train_model(model, optimizer, scheduler, es, train_dl, valid_dl, args.epochs)
|
StarcoderdataPython
|
8154388
|
<reponame>balcilar/SemiSupervisedMarkowRandomWalk<filename>sslMarkovRandomWalks.py
import numpy as np
def sslMarkovRandomWalks(xl,yl,xu,k=10,gamma=1,t=10,improvement=10e-5):
# Written by <NAME>, <EMAIL>, France
# xl is nxd size matrix shows inputs of known data. n is number of data d is dimension
# yl is nx1 size vector shows label it is either 1 or -1
# xu is mxd size matrix shows input of unknown data. m is number of data d is dimension
# k shows number of k-nn which keep the most k number of closest neightbor of each data point
# gamma for w function
# t for degree of power
# improvement is stopping criteria if there is less than improvement in labels difference
# merge all inputs in one matrix
x=np.vstack((xl,xu))
knownlabel=len(yl)
# calculate euclide distances for every pair of data point
d=np.zeros((x.shape[0],x.shape[0]),dtype='float')
for i in range(0,x.shape[0]-1):
for j in range(i+1,x.shape[0]):
d[i,j]=np.sqrt(((x[i,:]-x[j,:])**2).sum())
d[j,i]=d[i,j]
# find most closest k data point for each data point
index=[]
for i in range(0,x.shape[0]):
index.append(np.argsort(d[i,:])[0:k])
# create w matrix
w=np.zeros((x.shape[0],x.shape[0]),dtype='float')
# fill w matrix according to paper
for i in range(0,x.shape[0]):
w[index[i],i]=np.exp(-d[index[i],i]/gamma**2)
for i in range(0,x.shape[0]):
w[i,i]=1
# initial A matirx
At=w/w.sum(axis=1)
prob=At
# take t power of A matrix
for i in range(1,t):
At=At.dot(prob)
# set initial probabilities. +1 labelled data's P is 1, -1 labelled data's P is 0
# unknonw data's P is 0.5 (means no information either -1 or 1)
P=np.zeros((x.shape[0],1))
Pold=P.copy()
posindex=np.where(yl==1)[0]
P[posindex]=1
negindex=np.where(yl==-1)[0]
uindex=[i for i in range(len(yl),x.shape[0])]
P[uindex]=0.5
cSums = At.sum(axis=0)
#Expectation Maximization step
while ((P-Pold)**2).sum() > improvement: # if tehre is significant improvement keep update of P
# update mechanism of P
Pold=P.copy()
Ppos=At*P
P[:,0]=Ppos.sum(axis=0)/cSums
# even known data P is differen force them as known value
# in that way we just change the unknonw data's P value
P[posindex,0]=1
P[negindex,0]=0
# make final decision
yu=P[:,0].copy()
for i in range(0,P.shape[0]):# if P value is greater than 0.5 make it 1 unles -1
if yu[i]>=0.5:
yu[i]=1
else:
yu[i]=-1
# return assigned label and probabilities
return yu[uindex],P[uindex,0]
|
StarcoderdataPython
|
5139362
|
"""
q7.py
Created on 2020-08-21
Updated on 2020-10-30
Copyright <NAME> 2020
Description: A file which holds the designated question class.
"""
# IMPORTS
from sympy import latex, binomial
from sympy.parsing.sympy_parser import parse_expr
from the_challenge.questions.questionClasses.questionBaseClass import Question
# CLASSES
class Q7(Question):
"""
Q7:
Determine the r-th term of a binomial expansion.
"""
def calculations(self):
# Generate the binomial expression
a = self.random.randint(1, 9)
b = self.random.randint(1, 3)
c = self.random.randint(1, 9)
d = self.random.randint(1, 9)
e = self.random.randint(1, 3)
f = self.random.randint(4, 8)
sign = self.random.choice(["+", "-"])
binomial_expression = latex(parse_expr(f"({a} * x ** {b} {sign} {c} / ({d} * x ** {e})) ** {f}"))
# Generate the term which the user is supposed to calculate
r = self.random.randint(2, f - 1)
# Generate that term
rth_term = f"{binomial(f, r - 1)} * (({a} * x ** {b}) ** {f - r + 1}) * (({sign} {c} / ({d} * x ** {e})) " \
f"** {r - 1})"
rth_term = latex(parse_expr(rth_term))
# Save variables to `self.question` and `self.answer`
self.question = [r, binomial_expression]
self.answer = rth_term
def generate_question(self):
string = f"Determine the {self.ordinal(self.question[0])} term in the binomial expansion of " \
f"$${self.question[1]}$$"
return string
def generate_answer(self):
return self.answer
def generate_input_fields_prefixes(self):
return ["Answer:"]
@staticmethod
def ordinal(n):
n = int(n)
suffix = ["th", "st", "nd", "rd", "th"][min(n % 10, 4)]
if 11 <= n % 100 <= 13:
suffix = "th"
return f"{n}<sup>{suffix}</sup>"
# DEBUG CODE
if __name__ == "__main__":
question = Q7(seed_value=1123581321)
question.calculations()
print(question.generate_question())
print("[ANSWER]", question.generate_answer())
|
StarcoderdataPython
|
8133183
|
<reponame>kevingduck/transmission
# Copyright 2019 ShipChain, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.http.response import HttpResponse
from rest_framework import permissions
from rest_framework.decorators import api_view, permission_classes, authentication_classes
@api_view(['GET'])
@authentication_classes(())
@permission_classes((permissions.AllowAny,))
def health_check(request):
"""
This endpoint (/health) returns 200 if no migrations are pending, else 503
https://engineering.instawork.com/elegant-database-migrations-on-ecs-74f3487da99f
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
return HttpResponse(status=503 if plan else 200)
|
StarcoderdataPython
|
9708230
|
<reponame>mickypaganini/IPRNN
import glob
import pandas as pd
import numpy as np
from numpy.lib.recfunctions import stack_arrays
from root_numpy import root2rec
def root2panda(files_path, tree_name, mask = False, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2rec, such as branches to consider, etc
Returns:
--------
output_panda: a panda dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
files = glob.glob(files_path)
# -- check whether a name was passed for the tree_name --> for root files with only one tree and no folders,
# -- you do not need to specify any name (I believe)
if (tree_name == ''):
ss = stack_arrays([root2rec(fpath, **kwargs) for fpath in files])
else:
ss = stack_arrays([root2rec(fpath, tree_name, **kwargs) for fpath in files])
if (mask):
return pd.DataFrame(ss.data)
else:
try:
return pd.DataFrame(ss)
except Exception, e:
return pd.DataFrame(ss.data)
def flatten(column):
'''
Args:
-----
column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done)
e.g.: my_df['some_variable']
Returns:
--------
flattened out version of the column.
For example, it will turn:
[1791, 2719, 1891]
[1717, 1, 0, 171, 9181, 537, 12]
[82, 11]
...
into:
1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ...
'''
try:
return np.array([v for e in column for v in e])
except (TypeError, ValueError):
return column
def match_shape(arr, ref):
'''
Objective:
----------
reshaping 1d array into array of arrays to match event-jets structure
Args:
-----
arr: 1d flattened array of values
ref: reference array carrying desired event-jet structure
Returns:
--------
arr in the shape of ref
'''
shape = [len(a) for a in ref]
if len(arr) != np.sum(shape):
raise ValueError('Incompatible shapes: len(arr) = {}, total elements in ref: {}'.format(len(arr), np.sum(shape)))
# reorganized = []
# ptr = 0
# for nobj in shape:
# reorganized.append(twoclass_output[ptr:(ptr + nobj)].astype('float32').tolist())
# ptr += nobj
return [arr[ptr:(ptr + nobj)].tolist() for (ptr, nobj) in zip(np.cumsum([0] + shape[:-1]), shape)]
|
StarcoderdataPython
|
1958187
|
import logging
import os
import time
import numpy as np
from simulation import sim as vrep
from utils import utils
class SimRobot:
def __init__(self, sim_port, obj_mesh_dir, num_obj, workspace_limits,
is_testing, test_preset_cases, test_preset_file, place_enabled):
self.sim_port = sim_port
self.workspace_limits = workspace_limits
self.place_enabled = place_enabled
# Define home position
self.home_position = [-0.3, 0.0, 0.45]
# Define colors for object meshes (Tableau palette)
self.color_space = np.asarray([[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167]]) / 255.0 # pink
# Read files in object mesh directory
self.obj_mesh_dir = obj_mesh_dir
self.num_obj = num_obj
self.num_obj_clear = 0
self.mesh_list = os.listdir(self.obj_mesh_dir)
# Randomly choose objects to add to scene
self.obj_mesh_ind = np.random.randint(0, len(self.mesh_list), size=self.num_obj)
self.obj_mesh_color = self.color_space[np.asarray(range(self.num_obj)) % 10, :]
self.is_testing = is_testing
self.test_preset_cases = test_preset_cases
self.test_preset_file = test_preset_file
# Setup simulation
self.setup_sim()
# If testing, read object meshes and poses from test case file
if self.test_preset_cases:
file = open(self.test_preset_file, 'r')
file_content = file.readlines()
self.test_obj_mesh_files = []
self.test_obj_mesh_colors = []
self.test_obj_positions = []
self.test_obj_orientations = []
for object_idx in range(self.num_obj):
file_content_curr_object = file_content[object_idx].split()
self.test_obj_mesh_files.append(os.path.join(self.obj_mesh_dir, file_content_curr_object[0]))
self.test_obj_mesh_colors.append(
[float(file_content_curr_object[1]), float(file_content_curr_object[2]),
float(file_content_curr_object[3])])
self.test_obj_positions.append([float(file_content_curr_object[4]), float(file_content_curr_object[5]),
float(file_content_curr_object[6])])
self.test_obj_orientations.append(
[float(file_content_curr_object[7]), float(file_content_curr_object[8]),
float(file_content_curr_object[9])])
file.close()
self.obj_mesh_color = np.asarray(self.test_obj_mesh_colors)
# Add objects to simulation environment
self.add_objects()
def setup_sim(self):
# Connect to simulator
self.sim_client = -1
vrep.simxFinish(-1) # Just in case, close all opened connections
logging.info('Connecting to simulation...')
while self.sim_client == -1:
self.sim_client = vrep.simxStart('127.0.0.1', self.sim_port, True, True, 5000, 5)
if self.sim_client == -1:
logging.error('Failed to connect to simulation. Trying again..')
time.sleep(5)
else:
logging.info('Connected to simulation.')
self.restart_sim()
break
# Get handle to camera
sim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp',
vrep.simx_opmode_blocking)
# Get camera pose and intrinsics in simulation
sim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1,
vrep.simx_opmode_blocking)
sim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1,
vrep.simx_opmode_blocking)
cam_trans = np.eye(4, 4)
cam_trans[0:3, 3] = np.asarray(cam_position)
cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]
cam_rotm = np.eye(4, 4)
cam_rotm[0:3, 0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))
self.cam_pose = np.dot(cam_trans, cam_rotm) # Compute rigid transformation representating camera pose
self.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])
self.cam_depth_scale = 1
# Get background image
self.bg_color_img, self.bg_depth_img = self.get_camera_data()
self.bg_depth_img = self.bg_depth_img * self.cam_depth_scale
def add_objects(self):
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
logging.info('Adding objects to the scene..')
self.object_handles = []
self.num_obj_clear = 0
for object_idx in range(len(self.obj_mesh_ind)):
curr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])
if self.test_preset_cases:
curr_mesh_file = self.test_obj_mesh_files[object_idx]
curr_shape_name = 'shape_%02d' % object_idx
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + \
self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + \
self.workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample()]
if self.test_preset_cases:
object_position = [self.test_obj_positions[object_idx][0], self.test_obj_positions[object_idx][1],
self.test_obj_positions[object_idx][2]]
object_orientation = [self.test_obj_orientations[object_idx][0],
self.test_obj_orientations[object_idx][1],
self.test_obj_orientations[object_idx][2]]
object_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1],
self.obj_mesh_color[object_idx][2]]
ret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client,
'remoteApiCommandServer',
vrep.sim_scripttype_childscript,
'importShape',
[0, 0, 255, 0],
object_position + object_orientation + object_color,
[curr_mesh_file,
curr_shape_name],
bytearray(),
vrep.simx_opmode_blocking)
if ret_resp == 8:
logging.error('Failed to add new objects to simulation. Restarting..')
self.setup_sim()
else:
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
if not self.test_preset_cases:
time.sleep(0.5)
self.prev_obj_positions = []
self.obj_positions = []
def restart_sim(self):
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target',
vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5, 0, 0.3),
vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1,
vrep.simx_opmode_blocking)
while gripper_position[2] > 0.4: # V-REP bug requiring multiple starts and stops to restart
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1,
vrep.simx_opmode_blocking)
def check_sim(self):
# Check if simulation is stable by checking if gripper is within workspace
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1,
vrep.simx_opmode_blocking)
sim_ok = self.workspace_limits[0][0] - 0.1 < gripper_position[0] < self.workspace_limits[0][1] + 0.1 and \
self.workspace_limits[1][0] - 0.1 < gripper_position[1] < self.workspace_limits[1][1] + 0.1 and \
self.workspace_limits[2][0] < gripper_position[2] < self.workspace_limits[2][1]
if not sim_ok:
logging.info('Simulation unstable. Restarting environment.')
self.restart_sim()
self.add_objects()
def get_task_score(self):
key_positions = np.asarray([[-0.625, 0.125, 0.0], # red
[-0.625, -0.125, 0.0], # blue
[-0.375, 0.125, 0.0], # green
[-0.375, -0.125, 0.0]]) # yellow
obj_positions = np.asarray(self.get_obj_positions())
obj_positions.shape = (1, obj_positions.shape[0], obj_positions.shape[1])
obj_positions = np.tile(obj_positions, (key_positions.shape[0], 1, 1))
key_positions.shape = (key_positions.shape[0], 1, key_positions.shape[1])
key_positions = np.tile(key_positions, (1, obj_positions.shape[1], 1))
key_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))
key_nn_idx = np.argmin(key_dist, axis=0)
return np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)
def check_goal_reached(self):
goal_reached = self.get_task_score() == self.num_obj
return goal_reached
def get_obj_positions(self):
obj_positions = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1,
vrep.simx_opmode_blocking)
obj_positions.append(object_position)
return obj_positions
def get_obj_positions_and_orientations(self):
obj_positions = []
obj_orientations = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1,
vrep.simx_opmode_blocking)
sim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1,
vrep.simx_opmode_blocking)
obj_positions.append(object_position)
obj_orientations.append(object_orientation)
return obj_positions, obj_orientations
def reposition_objects(self, workspace_limits):
# Move gripper out of the way
self.move_to([-0.1, 0, 0.3], None)
for object_handle in self.object_handles:
# Drop object at random x,y location and random orientation in robot workspace
drop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + \
workspace_limits[0][0] + 0.1
drop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + \
workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample()]
vrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation,
vrep.simx_opmode_blocking)
time.sleep(2)
def get_camera_data(self):
# Get color image from simulation
sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, self.cam_handle, 0,
vrep.simx_opmode_blocking)
color_img = np.asarray(raw_image)
color_img.shape = (resolution[1], resolution[0], 3)
color_img = color_img.astype(np.float) / 255
color_img[color_img < 0] += 1
color_img *= 255
color_img = np.fliplr(color_img)
color_img = color_img.astype(np.uint8)
# Get depth image from simulation
sim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, self.cam_handle,
vrep.simx_opmode_blocking)
depth_img = np.asarray(depth_buffer)
depth_img.shape = (resolution[1], resolution[0])
depth_img = np.fliplr(depth_img)
zNear = 0.01
zFar = 10
depth_img = depth_img * (zFar - zNear) + zNear
return color_img, depth_img
def close_gripper(self):
gripper_motor_velocity = -0.5
gripper_motor_force = 100
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint',
vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity,
vrep.simx_opmode_blocking)
gripper_fully_closed = False
while gripper_joint_position > -0.045: # Block until gripper is fully closed
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
# logging.info(gripper_joint_position)
if new_gripper_joint_position >= gripper_joint_position:
return gripper_fully_closed
gripper_joint_position = new_gripper_joint_position
gripper_fully_closed = True
return gripper_fully_closed
def open_gripper(self):
gripper_motor_velocity = 0.5
gripper_motor_force = 20
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint',
vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity,
vrep.simx_opmode_blocking)
while gripper_joint_position < 0.03: # Block until gripper is fully open
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
def move_to(self, tool_position, tool_orientation):
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
vrep.simx_opmode_blocking)
move_direction = np.asarray(
[tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1],
tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.02 * move_direction / move_magnitude
try:
num_move_steps = int(np.floor(move_magnitude / 0.02))
except ValueError:
return False
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (
UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1],
UR5_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
(tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
def go_home(self):
self.move_to(self.home_position, None)
# Primitives ----------------------------------------------------------
def grasp(self, position, heightmap_rotation_angle, grasp_vertical_offset=-0.04, grasp_location_margin=0.15, ):
logging.info('Executing: grasp at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Avoid collision with floor
position = np.asarray(position).copy()
position[2] = max(position[2] + grasp_vertical_offset, self.workspace_limits[2][0] + 0.02)
# Move gripper to location above grasp target
location_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_grasp_target
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
vrep.simx_opmode_blocking)
move_direction = np.asarray(
[tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1],
tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
try:
num_move_steps = int(np.floor(move_direction[0] / move_step[0]))
except ValueError:
return False
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1,
vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (
UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps),
UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps),
UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (
np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2),
vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
(tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1,
(np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Ensure gripper is open
self.open_gripper()
# Approach grasp target
self.move_to(position, None)
# Close gripper to grasp target
gripper_full_closed = self.close_gripper()
# Move gripper to location above grasp target
self.move_to(location_above_grasp_target, None)
# Check if grasp is successful
gripper_full_closed = self.close_gripper()
grasp_success = not gripper_full_closed
# Move the grasped object elsewhere
if grasp_success:
if self.place_enabled:
self.go_home()
else:
self.num_obj_clear += 1
object_positions = np.asarray(self.get_obj_positions())
object_positions = object_positions[:, 2]
grasped_object_ind = np.argmax(object_positions)
grasped_object_handle = self.object_handles[grasped_object_ind]
vrep.simxSetObjectPosition(self.sim_client, grasped_object_handle, -1,
(-0.5, 0.5 + 0.05 * float(grasped_object_ind), 0.1), vrep.simx_opmode_blocking)
return grasp_success
def push(self, position, heightmap_rotation_angle, push_vertical_offset=0.01, pushing_point_margin=0.1):
logging.info('Executing: push at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Adjust pushing point to be on tip of finger
position[2] = position[2] + push_vertical_offset
# Compute pushing direction
push_orientation = [1.0, 0.0]
push_direction = np.asarray([push_orientation[0] * np.cos(heightmap_rotation_angle) - push_orientation[
1] * np.sin(heightmap_rotation_angle),
push_orientation[0] * np.sin(heightmap_rotation_angle) + push_orientation[
1] * np.cos(heightmap_rotation_angle)])
# Move gripper to location above pushing point
location_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_pushing_point
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
vrep.simx_opmode_blocking)
move_direction = np.asarray(
[tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1],
tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
try:
num_move_steps = int(np.floor(move_direction[0] / move_step[0]))
except ValueError:
return False
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1,
vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (
UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps),
UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps),
UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (
np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2),
vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1,
(tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1,
(np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Ensure gripper is closed
self.close_gripper()
# Approach pushing point
self.move_to(position, None)
# Compute target location (push to the right)
push_length = 0.1
target_x = min(max(position[0] + push_direction[0] * push_length, self.workspace_limits[0][0]),
self.workspace_limits[0][1])
target_y = min(max(position[1] + push_direction[1] * push_length, self.workspace_limits[1][0]),
self.workspace_limits[1][1])
push_length = np.sqrt(np.power(target_x - position[0], 2) + np.power(target_y - position[1], 2))
# Move in pushing direction towards target location
self.move_to([target_x, target_y, position[2]], None)
# Move gripper to location above grasp target
self.move_to([target_x, target_y, location_above_pushing_point[2]], None)
push_success = True
return push_success
def place(self, position, heightmap_rotation_angle, place_vertical_offset=0.04):
logging.info('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Ensure gripper is closed
gripper_fully_closed = self.close_gripper()
if gripper_fully_closed:
# There is no object present, so we cannot possibly place!
return False
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Avoid collision with floor
position[2] = max(position[2] + place_vertical_offset, self.workspace_limits[2][0] + 0.02)
# Move gripper to location above place target
place_location_margin = 0.1
sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)
location_above_place_target = (position[0], position[1], position[2] + place_location_margin)
self.move_to(location_above_place_target, None)
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1,
vrep.simx_opmode_blocking)
if tool_rotation_angle - gripper_orientation[1] > 0:
increment = 0.2
else:
increment = -0.2
while abs(tool_rotation_angle - gripper_orientation[1]) >= 0.2:
vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1,
(np.pi / 2, gripper_orientation[1] + increment, np.pi / 2),
vrep.simx_opmode_blocking)
time.sleep(0.01)
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1,
vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1,
(np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Approach place target
self.move_to(position, None)
# Ensure gripper is open
self.open_gripper()
# Move gripper to location above place target
self.move_to(location_above_place_target, None)
return True
def shutdown(self):
logging.info('Shutting down simulation..')
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.sim_client)
time.sleep(1)
vrep.simxFinish(self.sim_client)
vrep.simxFinish(-1)
logging.info("Disconnected from simulation.")
|
StarcoderdataPython
|
9621756
|
#-*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
from utils import *
import utils
import contextlib
import os
import math
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class multires_unet(object):
def __init__(self, rows=512, cols=512):
self.rows = rows
self.cols = cols
def ChannelSE(self, input, num_channels, reduction_ratio=16):
"""
Squeeze and Excitation block, reimplementation inspired by
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
"""
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(input=pool,
size=num_channels // reduction_ratio,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv)))
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv)))
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def SpatialSE(self, input):
"""
Spatial squeeze and excitation block (applied across spatial dimensions)
"""
conv = conv_bn_layer(input=input, num_filters=input.shape[1],
filter_size=1, stride=1, act='sigmoid')
conv = fluid.layers.elementwise_mul(x=input, y=conv, axis=0)
return conv
def scSE_block(self, x, channels):
'''
Implementation of Concurrent Spatial and Channel ‘Squeeze & Excitation’ in Fully Convolutional Networks
https://arxiv.org/abs/1803.02579
'''
cse = self.ChannelSE(input = x, num_channels = channels)
sse = self.SpatialSE(input = x)
x = fluid.layers.elementwise_add(x=cse, y=sse)
return x
def multi_res_block(self, inputs,filter_size1,filter_size2,filter_size3,filter_size4):
conv1 = conv_bn_layer(
input=inputs, num_filters=filter_size1, filter_size=3, stride=1, act='relu')
conv2 = conv_bn_layer(
input=conv1, num_filters=filter_size2, filter_size=3, stride=1, act='relu')
conv3 = conv_bn_layer(
input=conv2, num_filters=filter_size3, filter_size=3, stride=1, act='relu')
conv = conv_bn_layer(
input=inputs, num_filters=filter_size4, filter_size=1, stride=1, act='relu')
concat = fluid.layers.concat([conv1, conv2, conv3], axis = 1) #merge in channel
add = fluid.layers.elementwise_add(concat,y=conv)
return add
def res_path(self, inputs,filter_size,path_number):
def block(x,fl):
conv1 = conv_bn_layer(
input=inputs, num_filters=filter_size, filter_size=3, stride=1, act='relu')
conv2 = conv_bn_layer(
input=inputs, num_filters=filter_size, filter_size=1, stride=1, act='relu')
add = fluid.layers.elementwise_add(conv1,conv2)
return add
cnn = block(inputs, filter_size)
if path_number <= 3:
cnn = block(cnn,filter_size)
if path_number <= 2:
cnn = block(cnn,filter_size)
if path_number <= 1:
cnn = block(cnn,filter_size)
return cnn
def model(self, inputs):
res_block1 = self.multi_res_block(inputs,8,17,26,51)
res_block1 = self.scSE_block(res_block1, res_block1.shape[1])
pool1 = fluid.layers.pool2d(
input=res_block1, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
print(pool1.shape)
res_block2 = self.multi_res_block(pool1,17,35,53,105)
res_block2 = self.scSE_block(res_block2, res_block2.shape[1])
pool2 = fluid.layers.pool2d(
input=res_block2, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
print(pool2.shape)
res_block3 = self.multi_res_block(pool2,31,72,106,209)
res_block3 = self.scSE_block(res_block3, res_block3.shape[1])
pool3 = fluid.layers.pool2d(
input=res_block3, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
print(pool3.shape)
res_block4 = self.multi_res_block(pool3,71,142,213,426)
res_block4 = self.scSE_block(res_block4, res_block4.shape[1])
pool4 = fluid.layers.pool2d(
input=res_block4, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
print(pool4.shape)
res_block5 = self.multi_res_block(pool4,142,284,427,853)
upsample = deconv_bn_layer(res_block5, num_filters = 853, filter_size=4, stride=2,
act='relu')
print("upsample.shape",upsample.shape)
res_path4 = self.res_path(res_block4,256,4)
concat = fluid.layers.concat([upsample,res_path4], axis = 1) #merge in channel
print("concat.shape",concat.shape)
res_block6 = self.multi_res_block(concat,71,142,213,426)
res_block6 = self.scSE_block(res_block6, res_block6.shape[1])
upsample = deconv_bn_layer(res_block6, num_filters = 426, filter_size=4, stride=2,
act='relu')
print("upsample.shape",upsample.shape)
res_path3 = self.res_path(res_block3,128,3)
concat = fluid.layers.concat([upsample,res_path3], axis = 1) #merge in channel
print("concat.shape",concat.shape)
res_block7 = self.multi_res_block(concat,31,72,106,209)
res_block7 = self.scSE_block(res_block7, res_block7.shape[1])
upsample = deconv_bn_layer(res_block7, num_filters = 209, filter_size=4, stride=2,
act='relu')
print("upsample.shape",upsample.shape)
res_path2 = self.res_path(res_block2,64,2)
concat = fluid.layers.concat([upsample,res_path2], axis = 1) #merge in channel
print("concat.shape",concat.shape)
res_block8 = self.multi_res_block(concat,17,35,53,105)
res_block8 = self.scSE_block(res_block8, res_block8.shape[1])
upsample = deconv_bn_layer(res_block8, num_filters = 105, filter_size=4, stride=2,
act='relu')
print("upsample.shape",upsample.shape)
res_path1 = self.res_path(res_block1,32,1)
concat = fluid.layers.concat([upsample,res_path1], axis = 1) #merge in channel
print("concat.shape",concat.shape)
res_block9 = self.multi_res_block(concat,8,17,26,51)
res_block9 = self.scSE_block(res_block9, res_block9.shape[1])
conv9 = conv_bn_layer(
input=res_block9, num_filters=9, filter_size=1, stride=1, act='relu')
conv9_transpose = fluid.layers.transpose(x=conv9, perm=[0, 2, 3, 1])
modelOut = fluid.layers.reshape(conv9_transpose, shape=[-1, 9])
modelOut = fluid.layers.softmax(modelOut)
return modelOut
|
StarcoderdataPython
|
102192
|
<gh_stars>1-10
#! /home/johk/anaconda3/envs/slam/bin/python
import numpy as np
import OpenGL.GL as gl
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import pangolin
import pickle
class D3Engine:
def __init__(self):
self.realPoints = []
self.creative_mode = True
pass
def display(self):
points = None
W = 990
H = 540
pangolin.CreateWindowAndBind('JSLAM', W, H)
gl.glEnable(gl.GL_DEPTH_TEST)
# Define Projection and initial ModelView matrix
scam = pangolin.OpenGlRenderState(
pangolin.ProjectionMatrix(W, H, 420, 420, 320, 240, 0.2, 100),
pangolin.ModelViewLookAt(0, 2, 15, 2, -3, -5, pangolin.AxisDirection.AxisY))
handler = pangolin.Handler3D(scam)
# Create Interactive View in window
dcam = pangolin.CreateDisplay()
dcam.SetBounds(0.0, 1.0, 0.0, 1.0, -640.0 / 480.0)
dcam.SetHandler(handler)
x = 0
y = 0
z = 0
# Perspective coordinates
xc = 0
yc = 0
zc = 0
animation_counter = 0
while not pangolin.ShouldQuit():
failed_to_load = False
try:
points = pickle.load(open("data/points.p", "rb"))
except Exception:
failed_to_load = True
pass
if not failed_to_load:
self.realPoints = []
for i, (xp, yp) in enumerate(points):
self.realPoints.append([xp, yp, z])
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glClearColor(0, 0, 0, 0)
dcam.Activate(scam)
# Draw Point Cloud
if not failed_to_load:
points = np.random.random((100000, 3)) * 10
gl.glPointSize(2)
gl.glColor3f(1.0, 0.0, 0.0)
pangolin.DrawPoints(points)
# Load the camera
print("Animation counter: {}".format(animation_counter))
pose = np.identity(4)
pose[:3, 3] = [x, y, z]
gl.glLineWidth(2)
gl.glColor3f(0.0, 1.0, 0.0)
pangolin.DrawCamera(pose, 0.5, 0.75, 0.8)
if not self.creative_mode or animation_counter > 100:
zc += 0.1
scam = pangolin.OpenGlRenderState(
pangolin.ProjectionMatrix(W, H, 420, 420, 320, 240, 0.2, 100),
pangolin.ModelViewLookAt(0, 2, 15+zc, 2, -3, -5, pangolin.AxisDirection.AxisY))
handler = pangolin.Handler3D(scam)
dcam.SetBounds(0.0, 1.0, 0.0, 1.0, -640.0 / 480.0)
dcam.SetHandler(handler)
z += 0.1
animation_counter += 1
pangolin.FinishFrame()
|
StarcoderdataPython
|
6618057
|
<reponame>mukul20-21/python_datastructure
n = int(input())
item = list(map(int,input().split()))
uqi = set(item)
print(len(uqi))
|
StarcoderdataPython
|
6698680
|
<filename>disaster_response_classifier/disaster_response_classifier/train_classifier.py
# import libraries
import os
import argparse
import pandas as pd
import numpy as np
import re
import pathlib
from sqlalchemy import create_engine
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
import joblib
import nltk
def get_project_path():
"""
this function get project absolute path regardless of we the python script being executed.
relative path for loading data or model can be define give project absolute path
return project absolute path
:return:
"""
if len(__file__.split("/")) > 1:
project_path = str(pathlib.Path(__file__).parent.parent.absolute())
else:
project_path = ".."
return project_path
def load_data(database_filepath):
"""
load stored preprocessed data from sqlite database given path to be use for generating plots and analysis.
returns train data, train labels, list of class/category names
:param database_filepath:
:return:
"""
engine = create_engine("".join(["sqlite:///", database_filepath]))
table_name = "".join([database_filepath.split("/")[-1], "Table"])
df = pd.read_sql_query("select * from DisasterResponseData", con=engine)
X = df.iloc[:, 1]
Y = df.iloc[:, 4 : df.shape[1]]
category_names = df.columns[4 : df.shape[1]].to_list()
return X, Y, category_names
def tokenize(text):
"""
receives a text message and breaks it down to relevant tokens using NLP techniques
the resulting word array will be used for feature extraction in classification pipeline
return array of tokens
:param text:
:return:
"""
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
tokens = nltk.tokenize.word_tokenize(text)
lemmatizer = nltk.stem.WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
if tok.lower() not in nltk.corpus.stopwords.words("english"):
clean_tok = lemmatizer.lemmatize(tok, pos="v").lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model_simple():
"""
This function helps in building the model.
Creating the pipeline
Return the model
:return:
"""
pipeline = Pipeline(
[
("vect", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer()),
("clf", MultiOutputClassifier(RandomForestClassifier())),
]
)
return pipeline
def build_model():
"""
This function helps in building the model.
Creating the pipeline
Applying Grid search
Return the model
"""
# creating pipeline
pipeline = Pipeline(
[
("vect", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer()),
("clf", MultiOutputClassifier(AdaBoostClassifier())),
]
)
# parameters
parameters = {
"vect__ngram_range": ((1, 1), (1, 2)),
"vect__max_df": (0.8, 1.0),
"vect__max_features": (None, 10000),
"clf__estimator__n_estimators": [50, 100],
"clf__estimator__learning_rate": [0.1, 1.0],
}
# grid search
cv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=-1)
return cv
def build_optimized_model():
"""
This function helps in building the model.
Creating the pipeline
Applying Grid search
Return the model
:return:
"""
clf = MultiOutputClassifier(SVC())
tune_parameters = {
"clf_estimator__gamma": [1e-1, 1e-2, 1e-3],
"clf_estimator__C": [1, 10, 100],
"vect__ngram_range": ((1, 1), (1, 2)),
"vect__max_df": (0.8, 1.0),
"vect__max_features": (None, 10000),
}
pipeline = Pipeline(
[
("vect", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer()),
("clf", clf),
]
)
clf_grid = GridSearchCV(
estimator=pipeline, n_jobs=-1, cv=3, param_grid=tune_parameters
)
return clf_grid
def display_evaluation_results(y_test, y_pred, label_names):
"""
applies classification metrics on predicted classes and prints out f1 score, accuracy and confusion matrix per class
:param y_test:
:param y_pred:
:param label_names
:return:
"""
for i, l_name in enumerate(label_names):
labels = np.unique(y_pred)
target_names = ["".join(["not ", l_name]), l_name]
print(
classification_report(
y_pred=y_pred[:, i],
y_true=y_test.iloc[:, i].to_numpy(),
labels=labels,
target_names=target_names,
)
)
print("")
print(
"average accuracy {}".format(
sum(
[
accuracy_score(y_test.iloc[:, i].to_numpy(), y_pred[:, i])
for i in range(y_pred.shape[1])
]
)
/ y_pred.shape[1]
)
)
print(
"average f1_score {}".format(
sum(
[
f1_score(y_test.iloc[:, i].to_numpy(), y_pred[:, i])
for i in range(y_pred.shape[1])
]
)
/ y_pred.shape[1]
)
)
def evaluate_model(model, X_test, Y_test, category_names):
"""
runs evaluation on test data and displays the results
:param model:
:param X_test:
:param Y_test:
:param category_names:
:return:
"""
y_pred = model.predict(X_test)
display_evaluation_results(Y_test, y_pred, category_names)
def save_model(model, model_filepath, model_name="dr_trained_model.lzma"):
"""
saves trained model in given path
:param model:
:param model_filepath:
:param model_name:
:return:
"""
# save
m_f = "".join([model_filepath, model_name])
if os.path.exists(m_f):
os.remove(m_f)
joblib.dump(value=model, filename=m_f, compress=("lzma", 9))
def generate_arg_parser():
"""
this function receives input arguments for various functions.
:return:
"""
project_path = get_project_path()
# load data
default_db_path = "".join([project_path, "/data/DisasterResponseDataBase.db"])
default_model_path = "".join([str(project_path), "/models/"])
parser = argparse.ArgumentParser(
description="Load data from database and train classifier and dump the trained model."
)
parser.add_argument(
"--db_file",
action="store",
dest="db_file",
type=str,
default=default_db_path,
help="Path to disaster response database",
)
parser.add_argument(
"--model_file",
action="store",
dest="model_file",
type=str,
default=default_model_path,
help="path to store trained machine leaning model.",
)
return parser.parse_args(), parser
def main():
args_params, parser = generate_arg_parser()
if not args_params.db_file or not args_params.model_file:
parser.print_help()
exit(1)
print("\n Downloading required NLTK libraries....\n")
nltk.download(["punkt", "wordnet", "stopwords", "averaged_perceptron_tagger"])
print("Loading data...\n DATABASE: {}".format(args_params.db_file))
X, Y, category_names = load_data(args_params.db_file)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# build/train/evaluate/save model
print("Building model...")
model = build_model()
print("Training model...")
model.fit(X_train, Y_train)
print("Evaluating model...")
evaluate_model(model, X_test, Y_test, category_names)
print("Saving model...\n MODEL: {}".format(args_params.model_file))
save_model(
model,
args_params.model_file,
)
# build/train/evaluate/save optimized model
print("Building optimized model...")
opt_model = build_optimized_model()
print("Training optimized model...")
opt_model.fit(X_train, Y_train)
print("Evaluating optimized model...")
evaluate_model(opt_model, X_test, Y_test, category_names)
print("Saving optimized model...\n MODEL: {}".format(args_params.model_file))
save_model(opt_model, args_params.model_file, "dr_trained_opt_model.lzma")
print("Trained model saved!")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11373048
|
import matplotlib.pyplot as plt
from collections import Counter
def pieChart(cats):
labels = []
percentages = []
most = Counter(cats).most_common(n=10)
cat_sum = sum([value for i, (key, value) in enumerate(most)])
for i, (a, b) in enumerate(most):
labels.append(a)
percentages.append(b/cat_sum)
fig1, ax1 = plt.subplots()
ax1.pie(percentages, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
return
def columnChart(heavy_hitters, title, amount):
words = []
counts = []
HHs = heavy_hitters[title]
for i in range(0, len(HHs[:amount*2]), 2):
words.append(HHs[i])
counts.append(float(HHs[i+1]))
fig, axs = plt.subplots()
fig.suptitle(title, y=1.0)
axs.bar(words, counts)
axs.set_ylabel('Word count')
axs.set_xlabel('Word')
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
return
def uniqueWords(heavy_hitters, cat, top=10):
unique_words = set([heavy_hitters[cat][i] for i in range(0, len(heavy_hitters[cat][:top*2]), 2)])
for k, _ in heavy_hitters.items():
if k == cat:
continue
unique_words = unique_words - set([heavy_hitters[k][i] for i in range(0, len(heavy_hitters[k][:top*2]), 2)])
return unique_words
if __name__ == "__main__":
result = "logs.txt"
HHs = {}
Cats = {}
with open(result, 'r', encoding="UTF-8") as f:
for line in f:
if 'Counter' in line:
line = line.replace("(", "").replace("{", "").replace(")", "").replace("}", "").replace("Counter", "")\
.replace(":", "").replace(",", "").replace("\'", "").replace("\n", "").split(" ")
HHs[line[0]] = line[1:]
else:
line = line.replace(" mapped to", "").replace("\n", "").split(" ")
Cats[line[1]] = int(line[0])
#pieChart(Cats)
columnChart(HHs, 'Sports', 10)
#print(uniqueWords(HHs, 'Sports'))
|
StarcoderdataPython
|
9610406
|
<filename>wplay/save_chat.py
# region IMPORTS
from pathlib import Path
from wplay.utils import browser_config
from wplay.utils import target_search
from wplay.utils import target_select
from wplay.utils.helpers import save_chat_folder_path
from wplay.utils.Logger import Logger
# endregion
# region LOGGER
__logger = Logger(Path(__file__).name)
# endregion
async def save_chat(target):
"""
Save the whole chat of the target person in .txt file.
"""
page, _ = await browser_config.configure_browser_and_load_whatsapp()
if target is not None:
try:
await target_search.search_and_select_target(page, target)
except Exception as e:
print(e)
await page.reload()
await target_search.search_and_select_target_without_new_chat_button(page, target)
else:
target = await target_select.manual_select_target(page)
# selectors
selector_values = "#main > div > div > div > div > div > div > div > div"
selector_sender = "#main > div > div > div > div > div > div > div > div > div.copyable-text"
# Getting all the messages of the chat
try:
__logger.info("Saving chats with target")
await page.waitForSelector(selector_values)
values = await page.evaluate(f'''() => [...document.querySelectorAll('{selector_values}')]
.map(element => element.textContent)''')
sender = await page.evaluate(f'''() => [...document.querySelectorAll('{selector_sender}')]
.map(element => element.getAttribute("data-pre-plain-text"))''')
final_values = [x[:-8] for x in values]
new_list = [a + b for a, b in zip(sender, final_values)]
# opens chat file of the target person
with open(save_chat_folder_path / f'chat_{target}.txt', 'w') as output:
for s in new_list:
output.write("%s\n" % s)
except Exception as e:
print(e)
finally:
# save the chat and close the file
output.close()
print(f'\nChat file saved in: {str(save_chat_folder_path/"chat_")}{target}.txt')
|
StarcoderdataPython
|
1715805
|
import tensorflow as tf
global_step = tf.train.get_or_create_global_step()
a=tf.constant([
[[1.0,2.0,3.0,4.0],
[5.0,6.0,7.0,8.0],
[9,10,11,12],
[13,14,15,16]],
[[17,18,19,20],
[21,22,23,24],
[25,26,27,28],
[29,30,31,32]]
])
a=tf.reshape(a,[1,4,4,2])
pooling=tf.nn.max_pool(a,[1,2,2,1],[1,1,1,1],padding='VALID')
with tf.Session() as sess:
print("image:")
image=sess.run(a)
print (image)
print("reslut:")
result=sess.run(pooling)
print (result)
print ('{:}'.format(global_step))
|
StarcoderdataPython
|
9697010
|
class DecisionNode:
"""
A Decision Node asks a question.
This holds a reference to the question, and to the two child nodes.
"""
def __init__(self,
question,
true_branch,
false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
|
StarcoderdataPython
|
11260985
|
import random
class Chromosome:
def __init__(self, genes, fitness):
self.Genes = genes
self.Fitness = fitness
def createPop(popSize, min, max):
D = len(min)
pop = []
for i in range(0, popSize):
chrom = []
for j in range(0, D):
chrom.append(random.uniform(min[j],max[j]))
fitness = None
pop.append(Chromosome(chrom, fitness))
return pop
def mutate(chrom, min, max):
index = random.randrange(0, len(min))
newGene = random.uniform(min[index], max[index])
Genes = chrom.Genes[:]
Genes[index] = newGene
return Chromosome(Genes, None)
def crossover(chrom1, chrom2):
index = random.randrange(0, len(chrom1.Genes))
newChrom1 = Chromosome(chrom1.Genes[:index]+chrom2.Genes[index:], None)
newChrom2 = Chromosome(chrom2.Genes[:index]+chrom1.Genes[index:], None)
return (newChrom1, newChrom2)
def minFit(pop):
N = len(pop)
idx = 0
val = pop[0].Fitness
for i in range(1, N):
if pop[i].Fitness<val:
val = pop[i].Fitness
idx = i
return (val, idx)
|
StarcoderdataPython
|
1862614
|
<gh_stars>1-10
from config import *
import pandas as pd
import numpy as np
import networkx as nx
import scipy.stats
from sklearn import metrics
import bct
import matplotlib.pyplot as plt
def get_adjmtx(corrmtx,density,verbose=False):
assert density<=1
cutoff=scipy.stats.scoreatpercentile(corrmtx[np.triu_indices_from(corrmtx,1)],
100-(100*density))
# scipy.stats.scoreatpercentile:
# Calculate the score at a given percentile of the input sequence.
# np.triu_indices_from
# Return the indices for the upper-triangle of arr.
if verbose:
print('cutoff:%0.3f'%cutoff)
adjmtx=(corrmtx>cutoff).astype('int')
adjmtx[np.diag_indices_from(adjmtx)]=0
return(adjmtx)
# now generate a graph using NetworkX
# created previously using get_yeo_assignments.py
labeldir = '%s/references/HCP-MMP1/MMP_yeo2011_networks.csv'%(rootdir)
labeldata = pd.read_csv(labeldir)
def gengraph(adjmtx):
G=nx.from_numpy_array(adjmtx)
# get giant component
Gc = max(nx.connected_components(G), key=len)
Gc=G.subgraph(Gc)
print('Giant component includes %d out of %d total nodes'%(len(Gc.nodes),len(G.nodes)))
labeldata_Gc=labeldata.loc[list(Gc.nodes)]
cl={0:'black',1:'red',2:'yellow',3:'green',4:'blue',5:'orange',6:'gray',7:'magenta'}
colors=[cl[labeldata['Yeo7'].iloc[i]] for i in Gc.nodes]
degrees=np.array([Gc.degree(i) for i in Gc.nodes])
layout=nx.spring_layout(Gc)
nx.draw_networkx(Gc,pos=layout,with_labels=False,node_color=colors,
node_size=degrees)
_=plt.axis('off')
yeodict={0:'Undefined',1:'Visual',2:'Somatomotor',3:'DorsalAttention',
4:'VentralAttention',5:'Limbic',
6:'Frontoparietal',7:'Default'}
for i in yeodict:
print(cl[i],':',yeodict[i])
def comdetc(corrmtx, adjmtx, density):
# get adj matrix for giant component
G=nx.from_numpy_array(adjmtx)
# get giant component
Gc = max(nx.connected_components(G), key=len)
Gc = G.subgraph(Gc)
print('Giant component includes %d out of %d total nodes'%(len(Gc.nodes),len(G.nodes)))
labeldata_Gc=labeldata.loc[list(Gc.nodes)]
Gc_nodelist=list(Gc.nodes)
tmp=corrmtx[Gc_nodelist,:]
corrmtx_Gc=tmp[:,Gc_nodelist]
adjmtx=get_adjmtx(corrmtx_Gc,density)
mod_binary=bct.modularity_louvain_und(adjmtx)
print('modularity:',mod_binary[1])
print('Multilevel modularity optimization identifed %d communities'%len(np.unique(mod_binary[0])))
ari=metrics.adjusted_rand_score(mod_binary[0],
labeldata_Gc['Yeo7'])
print('Adjusted Rand index compared to Yeo 7 networks: %0.3f'%ari)
degrees=np.array([Gc.degree(i) for i in Gc.nodes])
layout=nx.spring_layout(Gc)
nx.draw_networkx(Gc,pos=layout,with_labels=False,
node_color=[mod_binary[0][i] for i in range(len(Gc.nodes))],
node_size=degrees,cmap='viridis')
_=plt.axis('off')
def clk(G,k):
"""computes average clustering coefficient for nodes with degree k"""
ls= list(G.degree(nx.nodes(G)))
s=0
c=0
for i in ls:
if i[1]==k:
s=s+ nx.clustering(G, i[0])
c=c+1
return s/c
#small world
def ml(G,l):
"""
it computes the average number of nodes within a distance less than or equal l to
from any given vertex.
"""
s=0
for j in G.nodes:
s=s+len(nx.single_source_shortest_path_length(G, j, cutoff =l))-1 #-1 becouse it counts distance(i,i)=0<cutoff
return s/nx.number_of_nodes(G)
|
StarcoderdataPython
|
337598
|
import unittest
from unittest import TestCase
from pytheons.pyunit.decorators.test import test
class ExampleTest(TestCase):
@test
def something(self):
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4832782
|
# Generated by Django 2.0 on 2018-04-04 10:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mixnet', '0002_auto_20180216_1617'),
]
operations = [
migrations.AddField(
model_name='mixnet',
name='auth_position',
field=models.PositiveIntegerField(default=0),
),
]
|
StarcoderdataPython
|
12859296
|
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from products.models import Product
def productList(request, productName):
"""产品的列表页"""
submenu = productName
if productName == 'robot':
productName = '家用机器人'
elif productName == 'monitor':
productName = '智能门锁'
else:
productName = '人脸识别解决方案'
product_list = Product.objects.filter(product_type=productName).order_by('-publish_date')
# 分页处理
# 每页显示2条数据
p = Paginator(product_list, 2)
if p.num_pages <= 1:
page_data = ''
else:
# 得到当前页,默认为1
page = int(request.GET.get('page', 1))
# 对页数进行分页
product_list = p.page(page)
left = []
right = []
left_has_more = False
right_has_more = False
first = False
last = False
total_pages = p.num_pages # 总页数
page_range = p.page_range # 页数迭代
if page == 1:
right = page_range[page:page + 2]
print(total_pages)
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
elif page == total_pages:
left = page_range[(page - 3) if (page - 3) > 0 else 0:page - 1]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
else:
left = page_range[(page - 3) if (page - 3) > 0 else 0:page - 1]
right = page_range[page:page + 2]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
page_data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
'total_pages': total_pages,
'page': page,
}
context = {
'active_menu': 'products',
'sub_menu': submenu,
'productName': productName,
'productList': product_list,
'pageData': page_data
}
return render(request, 'products/productList.html', context)
def productDetail(request, id):
"""产品的详情页"""
product = get_object_or_404(Product, id=id) # 按id进行查找,没有时返回404
product.product_views += 1 # 浏览数加1
product.save()
return render(request, 'products/productDetail.html', {
'active_menu': 'products',
'product': product,
})
|
StarcoderdataPython
|
1923624
|
<reponame>luposdate/reviz<filename>reviz.py
import argparse
from grobid.grobid import run_grobid
from model.graph_model import run_graph
from views.flow_diagram_view import run_flow
from views.bibliography_view import run_bib
import os
import json
from views.graph_view import view_sugiyama, view_sugiyama_summary
from utils.utils import bib_to_json
import sys
parser = argparse.ArgumentParser()
parser.add_argument("action", help="""
(1) bib2json: convert bib-file with included publications to required format, required if not using parsifal export
(2) grobid: gather citations from included papers using grobid
(3) graph-model: generate json model for the citation graph with nodes and edges
(4) draw: generate pdf of citation graph
(5) draw-summary: include optimalisations for the citation graph
(6) flow: generate flow diagram, only possible if using parsifal export
""", choices=["bib2json", "grobid", "flow", "graph-model", "draw", "draw-summary"])
parser.add_argument("json", help="path for json-file")
parser.add_argument("--bib-file", help="path for input bib-file, will be written to parameter 'json'", type=str, default=None)
parser.add_argument("--pdf", help="destination folder for pdf-files, default: ./pdf-files", default="./pdf-files")
parser.add_argument("--tei", help="destination folder for tei-files, default: ./tei-files", default="./tei-files")
parser.add_argument("--tex", help="destination folder for generated tex-files, default: ./tex-files", default="./tex-files")
parser.add_argument("--bibliography", dest='bib', help="generation of a pdf bibliography for the citation graph", action="store_true")
parser.add_argument("--deviation", help="maximum number of edge deviations allowed for node summarization, default=0", type=float, default=0)
parser.add_argument("--transitivities", help="reduce number of edges by considering transitivities", action="store_true")
parser.add_argument("--transitivities-bold", help="adapt line width of transitive edges", action="store_true")
parser.add_argument("--citation-counts", help="show number of direct and indirect citations for every node", action="store_true")
parser.add_argument("--authors-colored", help="threshold for showing publications with same authors using colors, use a value between 0 and 1", type=float, default=-1 )
parser.add_argument("--with-single-nodes", help="nodes without any edge are displayed in the graph", action="store_true")
parser.add_argument("--minimum-citations", help="only nodes with the given minimum number of citations are displayed", type=float, default=0)
parser.add_argument("--original-bibtex-keys", help="the original bibtex keys are used instead of md5 hashes", action="store_true")
parser.add_argument("--without-dummy-nodes", help="avoid dummy nodes for better placement of nodes for long edges", action="store_true")
parser.add_argument("--dont-show-edge-corrections", help="do not show the list of edge corrections", action="store_true")
parser.add_argument("--y-factor", help="factor for spacing between boxes", type=float, default=1)
parser.add_argument("--without-interactive-queries", help="with this option you are not asked for manual confirmation if our algorithm is not completely certain that a citation match was found", action="store_true")
args = parser.parse_args()
sys.setrecursionlimit(1000000000)
if args.action == "bib2json":
if args.bib_file is None:
raise argparse.ArgumentError("argument bib-file is missing")
bib_to_json(args.json, args.bib_file)
if args.action == "grobid":
if not os.path.exists(args.pdf):
os.makedirs(args.pdf)
if not os.path.exists(args.tei):
os.makedirs(args.tei)
run_grobid(args.json, args.pdf, args.tei)
if args.action == "graph-model":
if not os.path.exists(args.tex):
os.makedirs(args.tex)
run_graph(args.json, args.tei, args.tex, args.original_bibtex_keys, args.without_interactive_queries)
if args.action == "flow":
if not os.path.exists(args.tex):
os.makedirs(args.tex)
run_flow(args.json, args.tex)
if 'draw' in args.action:
with open(os.path.join(args.tex, 'graph-model.json')) as f:
graph = json.load(f)
if args.action == "draw":
view_sugiyama(graph, args.tex, args.with_single_nodes, args.without_dummy_nodes)
if args.action == "draw-summary":
view_sugiyama_summary(graph, args.tex, args.deviation, args.transitivities, args.transitivities_bold,
args.citation_counts, args.authors_colored, args.with_single_nodes, args.minimum_citations, args.without_dummy_nodes, args.dont_show_edge_corrections, args.y_factor)
if args.bib:
run_bib(args.tex)
|
StarcoderdataPython
|
76142
|
import os
import argparse
import pyautogui
import time
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="absolute path to store screenshot.", default=r"./images")
parser.add_argument("-t", "--type", help="h (in hour) or m (in minutes) or s (in seconds)", default='h')
parser.add_argument("-f", "--frequency", help="frequency for taking screenshot per h/m/s.", default=1, type=int)
args = parser.parse_args()
sec = 0.
if args.type == 'h':
sec = 60 * 60 / args.frequency
elif args.type == 'm':
sec = 60 / args.frequency
if sec < 1.:
sec = 1.
if os.path.isdir(args.path) != True:
os.mkdir(args.path)
try:
while True:
t = time.localtime()
current_time = time.strftime("%H_%M_%S", t)
file = current_time + ".jpg"
image = pyautogui.screenshot(os.path.join(args.path,file))
print(f"{file} saved successfully.\n")
time.sleep(sec)
except KeyboardInterrupt:
print("End of script by user interrupt")
|
StarcoderdataPython
|
11382764
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # Methods & Results
# We are going to use multiple analysis to classify the type of the animals using 16 variables including hair, feathers, eggs, milk, airborne, aquatic, predator, toothed, backbone, breathes, venomous, fins, legs, tail, domestic, catsize as our predictors. To predict the class of a new observation, the algorithms of each type will be further explained before implementation.
# In[1]:
{
"tags": [
"hide-cell"
]
}
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append( '..' )
from src.pre_processing import *
from src.train_and_predict_model import *
from src.line_plot import *
from src.para_optimize import *
from src.std_acc import *
from src.line_plot import *
# The first thing is to import the data. The data set is downloaded from [UCI repository]("https://archive-beta.ics.uci.edu/ml/datasets/zoo"). It is then saved as a csv file in this project repository. Some exploratory data analysis needs to be run before running the actual analyses on the data set. Here is a preview of pre-processed data set:
# In[2]:
{
"tags": [
"hide-input"
]
}
zoo_data = pd.read_csv("../results/csv/head.csv")
zoo_data.columns.values[0] = "index"
zoo_data
# It is checked that there aren't missing values in the data set, we can clearly deduce that the data set is clean according to the data summary we generated above. Since most features are binary and categorical, there is no need to do normalization and standardization.
# ```{figure-md} f1
# <img src="../results/figures/fig1.png" alt="num" class="bg-primary mb-1" width="800px">
#
# A summary table of the data set
# ```
# As shown in [fig.1](f1), the histograms of each feature are generated. The ones with skewed distribution might be more decisive in the prediction. However, since the data set is relatively small, all the features except the `animalName` are going to be used to predict. In the next part, we are going to split the data, into the training set and testing set. After that, different classification models will be trained and evaluated.
# ## Classification
# Now we will use the training set to build an accurate model, whereas the testing set is used to report the accuracy of the models. Here is a list of algorithms we will use in the following section:
#
# - K Nearest Neighbor(KNN)
# - Decision Tree
# - Support Vector Machine
# - Logistic Regression
# To train and evaluate each model, we split the dataset into training and testing sets. We use 80% of the total data to train the models, and the rest of the data is aimed to test the models.
# ### KNN
# KNN captures the idea of similarity (sometimes called distance, proximity, or closeness) with some basic mathematics we might have learned earlier. Basically in terms of geometry we can always calculate the distance between points on a graph. Similarly, using KNN we can group similar points together and predict the target with our feature variables(x).
#
# First of all, we have to train the model for different set of K values and finding the best K value.
# Then we want to plot the accuracy for different K values.
# ```{figure-md} f2
# <img src="../results/figures/k_accuracy.png" alt="num" class="bg-primary mb-1" width="500px">
#
# A plot reveals the relationship between K and corresponding accuracy
# ```
# As shown in [fig.2](f2), less K values provide higher accuracy. To find the best K value, we tuned the hyperparameter using GridSearch algorithm. After tuning, the best K value is 1.
# ### KNN final model & Evaluation
# After fitting the model using `K=1`, we evaluate the KNN model by Cross Validation and calculating the precision, recall, f1-score and support.
# KNN Cross Validation Result:
# In[3]:
{
"tags": [
"hide-input"
]
}
knn_cross_validate_result = pd.read_csv("../results/csv/knn_cross_validate_result.csv")
knn_cross_validate_result.columns=["criteria", "score"]
knn_cross_validate_result
# KNN Classification Report:
# In[4]:
{
"tags": [
"hide-input"
]
}
knn_classification_report= pd.read_csv("../results/csv/knn_classification_report.csv")
knn_classification_report.columns.values[0]="index"
knn_classification_report
# ### Decision Tree
# A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility The goal of using a Decision Tree is to create a training model that can use to predict the class or value of the target variable by learning simple decision rules inferred from prior data(training data).
# ```{figure-md} f3
# <img src="../results/figures/dt_accuracy.png" alt="num" class="bg-primary mb-1" width="500px">
#
# A plot reveals the relationship between deepth and corresponding accuracy
# ```
# As shown in the [fig.3](f3), the best depth of the Decision Tree is around small. We can confirm that the best value of the depth is 5 after tuning the hyperparameter and calculating the accuracy.
# ### Decision Tree final model & evaluation
# After training the model, we obtain the Cross Validation score, as well as the precision, recall, f1-score and support.
# DT Cross Validation Result:
# In[9]:
{
"tags": [
"hide-input"
]
}
dt_cross_validate_result = pd.read_csv("../results/csv/dt_cross_validate_result.csv")
dt_cross_validate_result.columns=["criteria", "score"]
dt_cross_validate_result
# DT Cross Validation Result:
# In[11]:
{
"tags": [
"hide-input"
]
}
dt_classification_report = pd.read_csv("../results/csv/dt_classification_report.csv")
dt_classification_report.columns.values[0]="index"
dt_classification_report
# ### Support Vector Machine
# SVM or Support Vector Machine is a linear model for classification and regression problems. It can solve linear and non-linear problems and work well for many practical problems. The idea of SVM is simple: The algorithm creates a line or a hyperplane which separates the data into classes{cite:p}`towards-dsci`.
#
# Final SVM is here used the splited test part to train again for better training, and better prediction. An svm evaluation as well as the final model is also provided below.
# ### SVM training model Jaccard Score, final model and evaluation
# SVM Classification Report:
# In[7]:
{
"tags": [
"hide-input"
]
}
svm_classification_report= pd.read_csv("../results/csv/svm_classification_report.csv")
svm_classification_report.columns.values[0]="index"
svm_classification_report
# ### Logistic Regression
# Logistic Regression is a "Supervised machine learning" algorithm that can be used to model the probability of a certain class or event. It is used when the data is linearly separable and the outcome is binary or dichotomous in nature. That means Logistic regression is usually used for Binary classification problems{cite:p}`ibm-dsci`.
# ### Logistic Regression training model Jaccard Score, final model and evaluation
#
# LR Classification Report:
# In[1]:
{
"tags": [
"hide-input"
]
}
lr_classification_report= pd.read_csv("../results/csv/lr_classification_report.csv")
lr_classification_report.columns.values[0]="index"
lr_classification_report
# In[ ]:
|
StarcoderdataPython
|
4820939
|
<gh_stars>1-10
from natsbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Natsbeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
natsbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("natsbeat is running"))
exit_code = natsbeat_proc.kill_and_wait()
assert exit_code == 0
|
StarcoderdataPython
|
3266685
|
<filename>hooks/check_missing_requirements.py<gh_stars>1-10
"""Checks to see if the package requirements are all present in the current
python environment.
"""
import subprocess
import sys
from pathlib import Path
from typing import List
import requirements
from .utils import Hook
def _parse_package_name(name: str) -> str:
"""
Force lower case and replace underscore with dash to compare environment
packages (see https://www.python.org/dev/peps/pep-0426/#name)
Args:
name: Unformatted package name
Returns:
Formatted package name
"""
return name.lower().replace("_", "-")
def _get_installed_packages() -> List[str]:
"""
Get a formatted list of packages installed in the current environment as
returned by ``pip freeze``
Returns:
List of formatted names of installed packages
"""
return [
_parse_package_name(req.name)
for req in requirements.parse(
subprocess.check_output(
[sys.executable, "-m", "pip", "list", "--format", "freeze"]
).decode()
)
]
def _get_required_packages(filepath: str) -> List[str]:
"""
Get a formatted list of packages from a pip requirements file.
Note:
This filters out ``None`` values, which may occur if the requirements
file includes an install direct from a git repo without the package name
in a ``#egg=`` ending.
Args:
Path to requirements txt file
Returns:
List of formatted names of installed packages
"""
return [
_parse_package_name(req.name)
for req in requirements.parse(Path(filepath).read_text())
if req.name
]
class CheckMissingRequirements(Hook): # pylint: disable=too-few-public-methods
"""Hook to check all requirements are installed within the current dev
environment."""
def run(self) -> int:
"""
Checks to see if the package requirements are all present in the current
environment.
Note:
this hook should be run before ``isort`` in order to prevent
unwanted import sorting based on an incorrect environment spec.
"""
# assemble dict containing lists of any unmet requirements against each file:
missing_requirements = {
filepath: list(
set(_get_required_packages(filepath)) - set(_get_installed_packages())
)
for filepath in self.args.filenames
}
if all((not missing for missing in missing_requirements.values())):
return 0
# assemble output string of all unmet dependencies:
output_str = "\n".join(
[
f" - {file}: {errors}"
for file, errors in missing_requirements.items()
if errors
]
)
print(
f"These requirements are missing from your current environment:"
f"\n{output_str}\n"
f"\nPlease install the above or rebuild your environment to prevent "
f"CI issues"
)
return 1
def main():
"""Hook entry point"""
return CheckMissingRequirements().run()
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
5150846
|
<gh_stars>1-10
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, Callable, List, Optional, Union
import torch
from torch import Tensor
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
class BaseAggregator(Metric):
"""Base class for aggregation metrics.
Args:
fn: string specifying the reduction function
default_value: default tensor value to use for the metric state
nan_strategy: options:
- ``'error'``: if any `nan` values are encounted will give a RuntimeError
- ``'warn'``: if any `nan` values are encounted will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impude any `nan` values with this value
compute_on_step:
Forward only calls ``update()`` and returns None if this is
set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state.
When `None`, DDP will be used to perform the allgather.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
"""
value: Tensor
is_differentiable = None
higher_is_better = None
def __init__(
self,
fn: Union[Callable, str],
default_value: Union[Tensor, List],
nan_strategy: Union[str, float] = "error",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
allowed_nan_strategy = ("error", "warn", "ignore")
if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float):
raise ValueError(
f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}"
f" but got {nan_strategy}."
)
self.nan_strategy = nan_strategy
self.add_state("value", default=default_value, dist_reduce_fx=fn)
def _cast_and_nan_check_input(self, x: Union[float, Tensor]) -> Tensor:
"""Converts input x to a tensor if not already and afterwards checks for nans that either give an error,
warning or just ignored."""
if not isinstance(x, Tensor):
x = torch.as_tensor(x, dtype=torch.float32, device=self.device)
nans = torch.isnan(x)
if any(nans.flatten()):
if self.nan_strategy == "error":
raise RuntimeError("Encounted `nan` values in tensor")
if self.nan_strategy == "warn":
warnings.warn("Encounted `nan` values in tensor. Will be removed.", UserWarning)
x = x[~nans]
elif self.nan_strategy == "ignore":
x = x[~nans]
else:
x[nans] = self.nan_strategy
return x.float()
def update(self, value: Union[float, Tensor]) -> None: # type: ignore
"""Overwrite in child class."""
pass
def compute(self) -> Tensor:
"""Compute the aggregated value."""
return self.value.squeeze() if isinstance(self.value, Tensor) else self.value
class MaxMetric(BaseAggregator):
"""Aggregate a stream of value into their maximum value.
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encounted will give a RuntimeError
- ``'warn'``: if any `nan` values are encounted will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impude any `nan` values with this value
compute_on_step:
Forward only calls ``update()`` and returns None if this is
set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state.
When `None`, DDP will be used to perform the allgather.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torchmetrics import MaxMetric
>>> metric = MaxMetric()
>>> metric.update(1)
>>> metric.update(torch.tensor([2, 3]))
>>> metric.compute()
tensor(3.)
"""
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__(
"max",
-torch.tensor(float("inf")),
nan_strategy,
compute_on_step,
dist_sync_on_step,
process_group,
dist_sync_fn,
)
def update(self, value: Union[float, Tensor]) -> None: # type: ignore
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value = self._cast_and_nan_check_input(value)
if any(value.flatten()): # make sure tensor not empty
self.value = torch.max(self.value, torch.max(value))
class MinMetric(BaseAggregator):
"""Aggregate a stream of value into their minimum value.
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encounted will give a RuntimeError
- ``'warn'``: if any `nan` values are encounted will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impude any `nan` values with this value
compute_on_step:
Forward only calls ``update()`` and returns None if this is
set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state.
When `None`, DDP will be used to perform the allgather.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torchmetrics import MinMetric
>>> metric = MinMetric()
>>> metric.update(1)
>>> metric.update(torch.tensor([2, 3]))
>>> metric.compute()
tensor(1.)
"""
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__(
"min",
torch.tensor(float("inf")),
nan_strategy,
compute_on_step,
dist_sync_on_step,
process_group,
dist_sync_fn,
)
def update(self, value: Union[float, Tensor]) -> None: # type: ignore
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value = self._cast_and_nan_check_input(value)
if any(value.flatten()): # make sure tensor not empty
self.value = torch.min(self.value, torch.min(value))
class SumMetric(BaseAggregator):
"""Aggregate a stream of value into their sum.
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encounted will give a RuntimeError
- ``'warn'``: if any `nan` values are encounted will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impude any `nan` values with this value
compute_on_step:
Forward only calls ``update()`` and returns None if this is
set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state.
When `None`, DDP will be used to perform the allgather.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torchmetrics import SumMetric
>>> metric = SumMetric()
>>> metric.update(1)
>>> metric.update(torch.tensor([2, 3]))
>>> metric.compute()
tensor(6.)
"""
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__(
"sum", torch.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn
)
def update(self, value: Union[float, Tensor]) -> None: # type: ignore
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value = self._cast_and_nan_check_input(value)
self.value += value.sum()
class CatMetric(BaseAggregator):
"""Concatenate a stream of values.
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encounted will give a RuntimeError
- ``'warn'``: if any `nan` values are encounted will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impude any `nan` values with this value
compute_on_step:
Forward only calls ``update()`` and returns None if this is
set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state.
When `None`, DDP will be used to perform the allgather.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torchmetrics import CatMetric
>>> metric = CatMetric()
>>> metric.update(1)
>>> metric.update(torch.tensor([2, 3]))
>>> metric.compute()
tensor([1., 2., 3.])
"""
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__("cat", [], nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn)
def update(self, value: Union[float, Tensor]) -> None: # type: ignore
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
"""
value = self._cast_and_nan_check_input(value)
if any(value.flatten()):
self.value.append(value)
def compute(self) -> Tensor:
"""Compute the aggregated value."""
if isinstance(self.value, list) and self.value:
return dim_zero_cat(self.value)
return self.value
class MeanMetric(BaseAggregator):
"""Aggregate a stream of value into their mean value.
Args:
nan_strategy: options:
- ``'error'``: if any `nan` values are encounted will give a RuntimeError
- ``'warn'``: if any `nan` values are encounted will give a warning and continue
- ``'ignore'``: all `nan` values are silently removed
- a float: if a float is provided will impude any `nan` values with this value
compute_on_step:
Forward only calls ``update()`` and returns None if this is
set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state.
When `None`, DDP will be used to perform the allgather.
Raises:
ValueError:
If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
Example:
>>> from torchmetrics import MeanMetric
>>> metric = MeanMetric()
>>> metric.update(1)
>>> metric.update(torch.tensor([2, 3]))
>>> metric.compute()
tensor([2.])
"""
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__(
"sum", torch.zeros(1), nan_strategy, compute_on_step, dist_sync_on_step, process_group, dist_sync_fn
)
self.add_state("weight", default=torch.zeros(1), dist_reduce_fx="sum")
def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: # type: ignore
"""Update state with data.
Args:
value: Either a float or tensor containing data. Additional tensor
dimensions will be flattened
weight: Either a float or tensor containing weights for calculating
the average. Shape of weight should be able to broadcast with
the shape of `value`. Default to `1.0` corresponding to simple
harmonic average.
"""
value = self._cast_and_nan_check_input(value)
weight = self._cast_and_nan_check_input(weight)
# broadcast weight to values shape
if not hasattr(torch, "broadcast_to"):
if weight.shape == ():
weight = torch.ones_like(value) * weight
if weight.shape != value.shape:
raise ValueError("Broadcasting not supported on PyTorch <1.8")
else:
weight = torch.broadcast_to(weight, value.shape)
self.value += (value * weight).sum()
self.weight += weight.sum()
def compute(self) -> Tensor:
"""Compute the aggregated value."""
return self.value / self.weight
|
StarcoderdataPython
|
11297870
|
<filename>space_shooter.py
import pygame as pg
import random
import math
import os
WIDTH,HEIGHT=600,400
pg.init()
screen=pg.display.set_mode((WIDTH,HEIGHT),0,32)
def write(msg,color=(255,255,255)):
font=pg.font.SysFont("none",15)
text=font.render(msg,True,color)
text.convert()
return text
clock=pg.time.Clock()
FPS=60
running=True
screen.fill((50,50,50))
class Enemies(pg.sprite.Sprite):
r=5
targetpos=(WIDTH/2,HEIGHT/2)
def __init__(self,pos=[0,0],area=screen.get_rect(),):
pg.sprite.Sprite.__init__(self,self.groups)
self.image=pg.Surface((Enemies.r*2,Enemies.r*2))
pg.draw.circle(self.image,(200,0,0),(Enemies.r,Enemies.r),Enemies.r)
self.image.set_colorkey((0,0,0))
self.image.convert()
self.radius=Enemies.r
self.rect=self.image.get_rect()
self.pos=pos.copy()
self.area=area
def update(self,time):
if self.area.contains(self.rect):
x1=Enemies.targetpos[0]
y1=Enemies.targetpos[1]
x2=self.pos[0]
y2=self.pos[1]
d=50
self.dx=((x1-x2)/((x1-x2)**2+(y1-y2)**2)**0.5)*d*time
self.dy=((y1-y2)/((x1-x2)**2+(y1-y2)**2)**0.5)*d*time
self.pos[0]+=self.dx
self.rect.centerx=self.pos[0]
self.pos[1]+=self.dy
self.rect.centery=self.pos[1]
else:
self.kill()
Enemies.r=7
class Hero(pg.sprite.Sprite):
r=5
def __init__(self,area=screen.get_rect()):
pg.sprite.Sprite.__init__(self,self.groups)
self.image=pg.Surface((self.r*2,self.r*2))
pg.draw.circle(self.image,(0,0,200),(self.r,self.r),self.r)
self.image.set_colorkey((0,0,0))
self.image.convert()
self.rect=self.image.get_rect()
self.radius=self.r
self.area=area
self.pos=[20,20]
self.speed=250
def update(self,time):
key=pg.key.get_pressed()
dx,dy=0,0
if key[pg.K_w]:
dy=-self.speed*time
if key[pg.K_s]:
dy=self.speed*time
if key[pg.K_d]:
dx=self.speed*time
if key[pg.K_a]:
dx=-self.speed*time
if dx+self.rect.left>self.area.left and dx+self.rect.right<self.area.right:
self.pos[0]+=dx
if dy+self.rect.top>self.area.top and dy+self.rect.bottom<self.area.bottom:
self.pos[1]+=dy
self.rect.centerx=self.pos[0]
self.rect.centery=self.pos[1]
class Bullets(pg.sprite.Sprite):
r=2
def __init__(self,pos=[0,0],targetpos=[0,0],area=screen.get_rect()):
pg.sprite.Sprite.__init__(self,self.groups)
self.image=pg.Surface((self.r*2,self.r*2))
pg.draw.circle(self.image,(210,210,210),(self.r,self.r),self.r)
self.image.set_colorkey((0,0,0))
self.image.convert()
self.radius=self.r
self.rect=self.image.get_rect()
self.pos=pos.copy()
self.area=area
self.targetpos=targetpos
self.angle=math.atan2((self.targetpos[1]-self.pos[1]),(self.targetpos[0]-self.pos[0]))
self.v=700
def update(self,time):
if self.area.contains(self.rect):
self.pos[0]+=self.v*math.cos(self.angle)*time
self.pos[1]+=self.v*math.sin(self.angle)*time
self.rect.centerx=self.pos[0]
self.rect.centery=self.pos[1]
else:
self.kill()
allgroups=pg.sprite.Group()
enemygroup=pg.sprite.Group()
bulletgroup=pg.sprite.Group()
Enemies.groups=allgroups,enemygroup
Hero.groups=allgroups
Bullets.groups=allgroups,bulletgroup
background=pg.Surface(screen.get_rect()[2:])
background.fill((50,50,50))
screen.blit(background,(0,0))
frequency=1
bulletfrequency=0.2
f=0
bf=0
maxenemynumber=100
score=0
bullettarget=[]
#spawnpoints=([2*Enemies.r,2*Enemies.r],[WIDTH-2*Enemies.r,2*Enemies.r],[2*Enemies.r,HEIGHT-2*Enemies.r],[WIDTH-2*Enemies.r,HEIGHT-2*Enemies.r])
spawnpoints=([WIDTH/2-2*Enemies.r,HEIGHT/2-2*Enemies.r],[WIDTH/2+2*Enemies.r,HEIGHT/2-2*Enemies.r],[WIDTH/2-2*Enemies.r,HEIGHT/2+2*Enemies.r],[WIDTH/2+2*Enemies.r,HEIGHT/2+2*Enemies.r])
hero=Hero()
allgroups.add(hero)
while running:
screen.fill((50,50,50))
time=clock.tick(FPS)/1000.0
#print(1/time)
for event in pg.event.get():
if event.type==pg.QUIT:
running=False
if event.type==pg.KEYDOWN:
if event.key==pg.K_ESCAPE:
running=False
f+=time
bf+=time
if f>frequency and len(enemygroup)<=maxenemynumber:
for s in spawnpoints:
enemy=Enemies(s)
enemygroup.add(enemy)
allgroups.add(enemy)
f=0
Enemies.targetpos=hero.pos
if bf>bulletfrequency and pg.mouse.get_pressed()[0]:
mousepos=pg.mouse.get_pos()
bullet=Bullets(hero.pos,mousepos)
bulletgroup.add(bullet)
allgroups.add(bullet)
bf=0
collision=pg.sprite.spritecollide(hero,enemygroup,True,pg.sprite.collide_circle)
if collision:
i=0
for bullet in bulletgroup.sprites():
if pg.sprite.spritecollide(bullet,enemygroup,True,pg.sprite.collide_circle):
score+=1
allgroups.clear(screen,background)
allgroups.update(time)
group=enemygroup.sprites()
i=0
while i <len(group):
enemy=group[i]
tgroup=group.copy()
tgroup.remove(enemy)
for enemy2 in tgroup:
xDistance=enemy.pos[0]-enemy2.pos[0]
yDistance=enemy.pos[1]-enemy2.pos[1]
if enemy.radius+enemy2.radius>=(xDistance**2+yDistance**2)**0.5:
xVelocity=enemy2.dx-enemy.dx
yVelocity=enemy2.dy-enemy.dy
dotProuduct=xDistance*xVelocity+yDistance*yVelocity
if(dotProuduct>0):
collisionScale=dotProuduct/(xDistance**2+yDistance**2)
xCollision=xDistance*collisionScale
yCollision=yDistance*collisionScale
enemy.dx+=xCollision-enemy.dx
enemy.dy+=yCollision-enemy.dy
enemy2.dx-=xCollision+enemy2.dx
enemy2.dy-=yCollision+enemy2.dy
enemy.pos[0]+=enemy.dx
enemy.pos[1]+=enemy.dy
enemy2.pos[0]+=enemy2.dx
enemy2.pos[1]+=enemy2.dy
i+=1
allgroups.draw(screen)
text=write("SCORE: "+str(score))
screen.blit(text,(0,0))
pg.display.update(text.get_rect())
pg.display.flip()
pg.quit()
|
StarcoderdataPython
|
8098992
|
<filename>pxtrade/compliance/base.py
"""
Before trades can be sent for execution they need to pass
any defined compliance rules. These rules may include position
limits, restricted securities, ...
Here compliance rules have been arranged using a composite pattern
to check portfolio positions should the trade be fully executed.
"""
from abc import ABC, abstractmethod
from .. import assets
class ComplianceRule(ABC):
@abstractmethod
def passes(self, portfolio) -> bool:
raise NotImplementedError # pragma: no cover
class Compliance(ComplianceRule):
"""All compliance rule components have to pass for
compliance as a whole to pass.
"""
def __init__(self):
self._rules = set()
def add_rule(self, rule):
if not isinstance(rule, ComplianceRule):
raise TypeError("Expecting Compliance Rule instance.")
self._rules.add(rule)
return self
def remove_rule(self, rule):
self._rules.discard(rule)
return self
def passes(self, portfolio):
if not isinstance(portfolio, assets.Portfolio):
raise TypeError("Expecting Portfolio instance.")
for rule in self._rules:
if not rule.passes(portfolio):
return False
return True
def __len__(self):
return len(self._rules)
|
StarcoderdataPython
|
1634342
|
<reponame>romulus97/HYDROWIRES
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 14:31:32 2018
@author: YSu
"""
from __future__ import division
from scipy.optimize import differential_evolution
import pandas as pd
import numpy as np
from datetime import datetime
# ORCA flow data
df_flows = pd.read_excel('reservoir_inflows.xlsx',sheet_name='Hist_flows',header=0)
sites = list(df_flows.loc[:,'Oroville':])
# WECC hydro time series
df_hydro = pd.read_excel('reservoir_inflows.xlsx',sheet_name='WECC_daily',header=0)
dams = list(df_hydro.loc[:,'Balch 1':])
df_useful_data = pd.read_excel('reservoir_inflows.xlsx',sheet_name='Full_flows',header=0)
#Useful simulated ORCA sites name
ORCA_sites = list(df_useful_data)
ORCA_sites = ORCA_sites[3:]
O_site=[]
Data_needed=[]
#If we have all of the outflow data then we will use range(0,24). For now we just have inflow data. (0,15)
for i in range(0,24):
O_site=np.append(O_site,str(ORCA_sites[i]))
Data_needed=np.append(O_site,['datetime'])
Validation_Year=[2005,2010,2011]
# ORCA flow data
df_flows = pd.read_excel('reservoir_inflows.xlsx',sheet_name='Hist_flows',header=0)
sites = list(df_flows.loc[:,'Oroville':])
Hist_flows = pd.read_excel('cord-data.xlsx',header=0)
#Add year colum to the dataframe
Year=[]
Month=[]
Day=[]
for i in range(0,len(Hist_flows)):
datetime_object=datetime.strftime(Hist_flows.loc[i]['datetime'],'%Y-%m-%d %H:%M:%S')
Date=datetime.strptime(datetime_object,'%Y-%m-%d %H:%M:%S')
Year=np.append(Year,Date.year)
Month=np.append(Month,Date.month)
Day=np.append(Day,Date.day)
Hist_flows['Year']=Year
Hist_flows['Month']=Month
Hist_flows['Day']=Day
His_selection = Hist_flows.loc[Hist_flows.loc[:,'Year']==2001]
flows=[]
for y in Validation_Year:
His_selection_2 = Hist_flows.loc[Hist_flows.loc[:,'Year']==y]
His_selection=His_selection.append(His_selection_2)
for z in [3,7,8,9]:
#for z in [9]:
#
s = str(dams[z])
k = str(df_hydro.loc[1][s])
I_O=str(df_hydro.loc[2][s])
if k =='Oroville' and I_O =='Inflows':
site_name=['ORO_fnf']
elif k =='Oroville' and I_O =='Outflows':
site_name=['ORO_otf']
elif k =='Pine Flat' and I_O =='Inflows':
site_name=['PFT_fnf']
elif k =='Pine Flat' and I_O =='Outflows':
site_name=['PFT_otf']
elif k =='Shasta' and I_O =='Inflows':
site_name=['SHA_fnf']
elif k =='Shasta' and I_O =='Outflows':
site_name=['SHA_otf']
elif k =='New Melones' and I_O =='Inflows':
site_name=['NML_fnf']
elif k =='New Melones' and I_O =='Outflows':
site_name=['NML_otf']
elif k =='Pardee' and I_O =='Inflows':
site_name=['PAR_fnf']
elif k =='Pardee' and I_O =='Outflows':
site_name=['PAR_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['EXC_fnf']
elif k =='New Exchequer' and I_O =='Outflows':
site_name=['EXC_otf']
elif k =='Folsom' and I_O =='Inflows':
site_name=['FOL_fnf']
elif k =='Folsom' and I_O =='Outflows':
site_name=['FOL_otf']
elif k =='<NAME>' and I_O =='Inflows':
site_name=['DNP_fnf']
elif k =='<NAME>' and I_O =='Outflows':
site_name=['DNP_otf']
elif k =='Millerton' and I_O =='Inflows':
site_name=['MIL_fnf']
elif k =='Millerton' and I_O =='Outflows':
site_name=['MIL_otf']
elif k =='Isabella' and I_O =='Inflows':
site_name=['ISB_fnf']
elif k =='Isabella' and I_O =='Outflows':
site_name=['ISB_otf']
elif k =='Yuba' and I_O =='Inflows':
site_name=['YRS_fnf']
elif k =='Yuba' and I_O =='Outflows':
site_name=['YRS_otf']
else:
print(s)
if k in sites:
d = []
# pull relevant hydropower and flow data
h = df_hydro.loc[:,s]
flow_ts = His_selection.loc[:,site_name].values
hydro_ts = h.loc[3:].values
weeks = int(np.floor(len(hydro_ts)/7))
years = int(len(hydro_ts)/365)
# upper bound of hydropower production
upper = np.ceil(np.max(hydro_ts))
# function definition
def res_fit(params,optimizing=True):
est_power = []
refill_1_date,evac_date,peak_end,refill_2_date,storage,power_cap,starting,ending= params
#Not really assuming storage to be 0.
#This assumes there is a set starting storage level
# iterate through every week of the year
for day in range(0,365):
# available hydro production based on water availability
avail_power = flow_ts[year*365+day]*eff
# if it's during first refill
if day < refill_1_date:
gen =starting- ((starting-min_power)/refill_1_date)*day
storage = avail_power-gen
# if it maintains the water
elif day >= refill_1_date and day < evac_date:
gen=min_power
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif day >= evac_date and day < peak_end:
gen= min_power+ ((power_cap-min_power)/(peak_end-evac_date)* (day- evac_date))
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif day >= peak_end and day < refill_2_date:
gen= power_cap
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
elif day >=refill_2_date :
gen = power_cap-((power_cap-ending)/(365- refill_2_date)* (day-refill_2_date))
est_power = np.append(est_power,gen)
if optimizing:
rmse = np.sqrt(((est_power-hydro_ts[year*365:year*365+365])**2).mean())
return rmse
else:
return est_power
# years: 2001, 2005, 2010, 2011
est_power2=[]
Save_Results=[]
for year in range(0,4):
# identify date of maximum inflow at ORCA site
annual = flow_ts[year*365:year*365+365]
annual_power = hydro_ts[year*365:year*365+365]
eff=np.sum(annual_power)/np.sum(annual)
max_power = np.max(annual_power)
min_power =np.min(annual_power)
max_flow = np.max(annual[105:260])
L = list(annual)
peak_flow = L.index(max_flow)
d = np.append(d,peak_flow)
best_rmse = 100000
#
for i in range(0,100):
# optimize with DE
result = differential_evolution(res_fit, bounds=[(5,15),(15,30),(20,40),(25,50),(1000,3000),(0.5*max_power,max_power),(min_power,max_power),(min_power,max_power)], maxiter=10000, popsize=1000,polish=False)
parameters = result.x
refill_1_date = parameters[0]
evac_date = parameters[1]
peak_end=parameters[2]
refill_2_date = parameters[3]
storage = parameters[4]
power_cap= parameters[5]
starting=parameters[6]
ending=parameters[7]
est_power = []
# iterate through every day of the year
for day in range(0,365):
# available hydro production based on water availability
avail_power = flow_ts[year*356+day]*eff
# if it's during first refill
if day < refill_1_date:
gen =starting- ((starting-min_power)/refill_1_date)*day
storage = avail_power-gen
# if it maintains the water
elif day >= refill_1_date and day < evac_date:
gen=min_power
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif day >= evac_date and day < peak_end:
gen= min_power+ ((power_cap-min_power)/(peak_end-evac_date)* (day- evac_date))
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif day >= peak_end and day < refill_2_date:
gen= power_cap
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
elif day >=refill_2_date :
gen = power_cap-((power_cap-ending)/(365-refill_2_date)* (day-refill_2_date))
est_power = np.append(est_power,gen)
rmse = np.sqrt(((est_power-hydro_ts[year*365:year*365+365])**2).mean())
if rmse < best_rmse:
best_rmse = rmse
best_parameters = result.x
# use best fit parameters
refill_1_date = best_parameters[0]
evac_date = best_parameters[1]
peak_end=best_parameters[2]
refill_2_date = best_parameters[3]
storage = best_parameters[4]
power_cap= best_parameters[5]
starting=best_parameters[6]
ending=best_parameters[7]
Results=[d[year],starting,ending,refill_1_date,evac_date,peak_end,refill_2_date,storage,power_cap,eff,min_power]
exec('Results%d=np.array(Results)' %(year))
surplus = 0
transfer = 0
est_power = []
# iterate through every day of the year
for day in range(0,365):
# available hydro production based on water availability
avail_power = flow_ts[year*365+day]*eff
# if it's during first refill
if day < refill_1_date:
gen =starting- ((starting-min_power)/refill_1_date)*day
storage = avail_power-gen
# if it maintains the water
elif day >= refill_1_date and day < evac_date:
gen=min_power
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif day >= evac_date and day < peak_end:
gen= min_power+ ((power_cap-min_power)/(peak_end-evac_date)* (day- evac_date))
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
# if it's in evac period 2
elif day >= peak_end and day < refill_2_date:
gen= power_cap
if gen > power_cap:
gen=power_cap
storage= storage + (avail_power- gen)
else:
storage= storage + (avail_power- gen)
elif day >=refill_2_date :
gen = power_cap-((power_cap-ending)/(365-refill_2_date)* (day-refill_2_date))
est_power = np.append(est_power,gen)
est_power2= np.append(est_power2,est_power)
Save_Results=np.array([Results0,Results1,Results2,Results3])
exec("np.savetxt('1.0_FNF_Storage_Rule_%s.txt',Save_Results,delimiter = ' ')" %(s))
print(s)
else:
print(s)
pass
|
StarcoderdataPython
|
86629
|
<reponame>brewst001/RAPID
from django.conf.urls import patterns, include, url
from django.contrib import admin
import core.urls
import profiles.urls
import pivoteer.urls
import monitors.urls
urlpatterns = patterns('',
url(r'^$', core.views.HomePage.as_view(), name="home"),
url(r'^navigation/', include(core.urls)),
url(r'^profile/', include(profiles.urls)),
url(r'^pivoteer/', include(pivoteer.urls)),
url(r'^monitors/', include(monitors.urls)),
url(r'^admin/', include(admin.site.urls)),
)
|
StarcoderdataPython
|
1743971
|
<reponame>sensenatchanan/flood-warning-system
from .utils import sorted_by_key
from floodsystem.analysis import slope_finder
#from .station import typical_range_consistent, relative_water_level
def stations_level_over_threshold(stations, tol):
output_list = []
for station in stations:
if station.typical_range_consistent() == True:
relative_level = station.relative_water_level()
if relative_level is not None and relative_level > tol:
output_tuple = (station.name , relative_level)
output_list.append(output_tuple)
else:
pass
return sorted_by_key(output_list, 1, reverse= True)
def stations_highest_rel_level(stations, N):
output_list = []
for station in stations:
if station.typical_range_consistent() == True:
relative_level = station.relative_water_level()
if relative_level is not None:
output_tuple = (station.name , relative_level)
output_list.append(output_tuple)
else:
pass
result = sorted_by_key(output_list, 1, reverse= True)
return result[:N]
def flood_risk(station):
"""This fuction calculates the risk index for a particular station"""
level = station.relative_water_level()
risk = 0
rise = slope_finder(station)
if level != None:
if level > 10.0:
risk = 4
if level <= 10.0 and level >1.0:
if rise == True:
risk = 4
else:
risk = 3
if level <= 1.0 and level >0.5:
if rise == True:
risk = 3
else:
risk = 2
if level<= 0.5:
if rise == True:
risk = 2
else:
risk = 1
return risk
def risk_town_average(town_list):
"""This fuction takes the list of stations in the same town to calculate
an averaged risk value"""
town_risk = 0
N = 0
for s in town_list:
level = s.relative_water_level
if level != None:
town_risk += flood_risk(s)
N += 1
if N > 0:
average_town_risk = town_risk/N
else:
average_town_risk = 0
return average_town_risk
def town_by_risk(dictionary):
"""This function extracts the list of stations of the town from a
dictionary and calculate the town's average risks value. Finally, return
as a list of tuples consists of (town,risk value)"""
towns_risk =[]
for town in dictionary:
town_risk = risk_town_average(dictionary[town])
towns_risk.append((town, town_risk))
return towns_risk
|
StarcoderdataPython
|
1776473
|
<filename>sentiment-analysis/backend/src/pca/pca-transcribe-eventbridge.py<gh_stars>0
import json
import boto3
import time
import os
TABLE = os.environ["TableName"]
# Total number of retry attempts to make
RETRY_LIMIT = 2
def lambda_handler(event, context):
# Pick off our event values
transcribe = boto3.client("transcribe")
jobName = event["detail"]["TranscriptionJobName"]
response = transcribe.get_transcription_job(TranscriptionJobName = jobName)["TranscriptionJob"]
jobStatus = response["TranscriptionJobStatus"]
# Read tracking entry between Transcribe job and its Step Function
ddbClient = boto3.client("dynamodb")
tracking = ddbClient.get_item(Key={'PKJobId': {'S': jobName}},
TableName=TABLE)
# It's unlikely, but if we didn't get a value due to some race condition
# meaning that the job finishes before the token was written then wait
# for 5 seconds and try again. Just once. This may never happen
if "Item" not in tracking:
# Just sleep for a few seconds and try again
time.sleep(5)
tracking = ddbClient.get_item(Key={'PKJobId': {'S': jobName}},
TableName=TABLE)
# Did we have a result?
if "Item" in tracking:
# Delete entry in DDB table - there's no way we'll be processing this again
ddbClient.delete_item(Key={'PKJobId': {'S': jobName}},
TableName=TABLE)
# Extract the Step Functions task and previous event status
taskToken = tracking["Item"]["taskToken"]['S']
eventStatus = json.loads(tracking["Item"]["taskState"]['S'])
# If the job has FAILED then we need to check if it's a service failure,
# as this can happen, then we want to re-try the job another time
finalResponse = jobStatus
if jobStatus == "FAILED":
errorMesg = response["FailureReason"]
if errorMesg.startswith("Internal"):
# Internal failure - we want to retry a few times, but only once
retryCount = eventStatus.pop("retryCount", 0)
# Not retried enough yet - let's try another time
if (retryCount < RETRY_LIMIT):
eventStatus["retryCount"] = retryCount + 1
finalResponse = "RETRY"
# All complete - continue our workflow with this status/retry count
eventStatus["transcribeStatus"] = finalResponse
sfnClient = boto3.client("stepfunctions")
sfnClient.send_task_success(taskToken=taskToken,
output=json.dumps(eventStatus))
return {
'statusCode': 200,
'body': json.dumps('Success.')
}
# Main entrypoint for testing
# Note, Status could be COMPLETED or FAILED
if __name__ == "__main__":
event = {
'version': '0',
'id': '0029c6b1-7c8e-1f61-fed5-7ef256b3660b',
'detail-type': 'Transcribe Job State Change',
'source': 'aws.transcribe',
'account': '710514874879',
'time': '2020-08-05T13:32:03Z',
'region': 'us-east-1',
'resources': [],
'detail': {
'TranscriptionJobName': '0a.93.a0.3e.00.00-13.29.09.061-09-16-2019.wav',
'TranscriptionJobStatus': 'COMPLETED'
}
}
lambda_handler(event, "")
|
StarcoderdataPython
|
3413692
|
# -*- coding: utf-8 -*-
#####################################################################################
#
# Copyright (c) <NAME>. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
##
## Test str/byte equivalence for built-in string methods
##
## Please, Note:
## i) All commented test cases are for bytes/extensible string combination
## ii) For version 3.x the str/byte mixing below become "Raises" test cases
##
##
import sys
import unittest
from iptest import run_test
class ExtensibleStringClass(str):
pass
esa = ExtensibleStringClass("a")
esb = ExtensibleStringClass("b")
esc = ExtensibleStringClass("c")
esx = ExtensibleStringClass("x")
class StrBytesTest(unittest.TestCase):
def test_contains(self):
self.assertTrue(esa.__contains__("a"))
self.assertTrue(esa.__contains__(b"a"))
self.assertTrue(esa.__contains__(esa))
self.assertTrue("a".__contains__("a"))
self.assertTrue("a".__contains__(b"a"))
self.assertTrue("a".__contains__(esa))
self.assertTrue(b"a".__contains__("a"))
self.assertTrue(b"a".__contains__(b"a"))
self.assertTrue(b"a".__contains__(esa))
def test_format(self):
self.assertEqual("%s" % b"a", "a")
# self.assertEqual(b"%s" % b"a", b"a")
# self.assertEqual("%s" % b"a", b"%s" % "a")
def test_count(self):
self.assertEqual("aa".count(b"a"), 2)
self.assertEqual("aa".count(b"a", 0), 2)
self.assertEqual("aa".count(b"a", 0, 1), 1)
self.assertEqual("aa".count(esa), 2)
self.assertEqual("aa".count(esa, 0), 2)
self.assertEqual("aa".count(esa, 0, 1), 1)
self.assertEqual(b"aa".count("a"), 2)
self.assertEqual(b"aa".count("a", 0), 2)
self.assertEqual(b"aa".count("a", 0, 1), 1)
# self.assertEqual(b"aa".count(esa), 2)
# self.assertEqual(b"aa".count(esa, 0), 2)
# self.assertEqual(b"aa".count(esa, 0, 1), 1)
def test_find(self):
self.assertTrue("abc".find(b"b"))
self.assertTrue("abc".find(b"b", 1))
self.assertTrue("abc".find(b"b", 1, 2))
self.assertTrue("abc".find(b"b", 1L))
self.assertTrue("abc".find(b"b", 1L, 2L))
self.assertTrue("abc".find(esb))
self.assertTrue("abc".find(esb, 1))
self.assertTrue("abc".find(esb, 1, 2))
self.assertTrue("abc".find(esb, 1L))
self.assertTrue("abc".find(esb, 1L, 2L))
self.assertTrue(b"abc".find("b"))
self.assertTrue(b"abc".find("b", 1))
self.assertTrue(b"abc".find("b", 1, 2))
# self.assertTrue(b"abc".find(esb))
# self.assertTrue(b"abc".find(esb, 1))
# self.assertTrue(b"abc".find(esb, 1, 2))
def test_lstrip(self):
self.assertEqual("xa".lstrip(b"x"), "a")
self.assertEqual("xa".lstrip(esx), "a")
self.assertEqual(b"xa".lstrip("x"), b"a")
# self.assertEqual(b"xa".lstrip(esx), b"a")
def test_partition(self):
self.assertEqual("abc".partition(b"b"), ("a", "b", "c"))
self.assertEqual("abc".partition(esb), ("a", "b", "c"))
self.assertEqual(b"abc".partition("b"), (b"a", b"b", b"c"))
# self.assertEqual(b"abc".partition(esb), (b"a", b"b", b"c"))
def test_replace(self):
self.assertEqual("abc".replace(b"a", "x"), "xbc")
self.assertEqual("abc".replace(b"a", b"x"), "xbc")
self.assertEqual("abc".replace("a", b"x"), "xbc")
self.assertEqual("abc".replace(b"a", "x", 1), "xbc")
self.assertEqual("abc".replace(b"a", b"x", 1), "xbc")
self.assertEqual("abc".replace("a", b"x", 1), "xbc")
self.assertEqual("abc".replace(b"a", buffer("x")), "xbc")
self.assertEqual("abc".replace(buffer("a"), "x"), "xbc")
self.assertEqual("abc".replace(buffer("a"), buffer("x")), "xbc")
self.assertEqual("abc".replace(b"a", bytearray(b"x")), "xbc")
self.assertEqual("abc".replace(bytearray(b"a"), "x"), "xbc")
self.assertEqual("abc".replace(bytearray(b"a"), bytearray(b"x")), "xbc")
self.assertEqual("abc".replace("a", esx), "xbc")
self.assertEqual("abc".replace(b"a", esx), "xbc")
self.assertEqual("abc".replace(esa, esx), "xbc")
self.assertEqual("abc".replace(esa, b"x"), "xbc")
self.assertEqual("abc".replace("a", esx, 1), "xbc")
self.assertEqual("abc".replace(b"a", esx, 1), "xbc")
self.assertEqual("abc".replace(esa, esx, 1), "xbc")
self.assertEqual("abc".replace("a", esx, 1), "xbc")
self.assertEqual(b"abc".replace(b"a", "x"), "xbc")
self.assertEqual(b"abc".replace("a", "x"), "xbc")
self.assertEqual(b"abc".replace("a", b"x"), "xbc")
self.assertEqual(b"abc".replace(b"a", "x", 1), "xbc")
self.assertEqual(b"abc".replace("a", "x", 1), "xbc")
self.assertEqual(b"abc".replace("a", b"x", 1), "xbc")
# self.assertEqual(b"abc".replace("a", esx), "xbc")
# self.assertEqual(b"abc".replace(b"a", esx), "xbc")
# self.assertEqual(b"abc".replace(esa, esx), "xbc")
# self.assertEqual(b"abc".replace(esa, b"x"), "xbc")
# self.assertEqual(b"abc".replace("a", esx, 1), "xbc")
# self.assertEqual(b"abc".replace(b"a", esx, 1), "xbc")
# self.assertEqual(b"abc".replace(esa, esx, 1), "xbc")
# self.assertEqual(b"abc".replace("a", esx, 1), "xbc")
def test_rfind(self):
self.assertEqual("abc".rfind(b"c"), 2)
self.assertEqual("abc".rfind(b"c", 1), 2)
self.assertEqual("abc".rfind(b"c", 1, 3), 2)
self.assertEqual("abc".rfind(b"c", 1L), 2)
self.assertEqual("abc".rfind(b"c", 1L, 3L), 2)
self.assertEqual("abc".rfind(esc), 2)
self.assertEqual("abc".rfind(esc, 1), 2)
self.assertEqual("abc".rfind(esc, 1, 3), 2)
self.assertEqual("abc".rfind(esc, 1L), 2)
self.assertEqual("abc".rfind(esc, 1L, 3L), 2)
self.assertEqual(b"abc".rfind("c"), 2)
self.assertEqual(b"abc".rfind("c", 1), 2)
self.assertEqual(b"abc".rfind("c", 1, 3), 2)
# self.assertEqual(b"abc".rfind(esc), 2)
# self.assertEqual(b"abc".rfind(esc, 1), 2)
# self.assertEqual(b"abc".rfind(esc, 1, 3), 2)
def test_rindex(self):
self.assertEqual("abc".rindex(b"c"), 2)
self.assertEqual("abc".rindex(b"c", 1), 2)
self.assertEqual("abc".rindex(b"c", 1, 3), 2)
self.assertEqual("abc".rindex(b"c", 1L), 2)
self.assertEqual("abc".rindex(b"c", 1L, 3L), 2)
self.assertEqual("abc".rindex(esc), 2)
self.assertEqual("abc".rindex(esc, 1), 2)
self.assertEqual("abc".rindex(esc, 1, 3), 2)
self.assertEqual("abc".rindex(esc, 1L), 2)
self.assertEqual("abc".rindex(esc, 1L, 3L), 2)
self.assertEqual(b"abc".rindex("c"), 2)
self.assertEqual(b"abc".rindex("c", 1), 2)
self.assertEqual(b"abc".rindex("c", 1, 3), 2)
# self.assertEqual(b"abc".rindex(esc), 2)
# self.assertEqual(b"abc".rindex(esc, 1), 2)
# self.assertEqual(b"abc".rindex(esc, 1, 3), 2)
def test_rpartition(self):
self.assertEqual("abc".rpartition(b"b"), ("a", "b", "c"))
self.assertEqual("abc".rpartition(esb), ("a", "b", "c"))
self.assertEqual(b"abc".rpartition("b"), (b"a", b"b", b"c"))
# self.assertEqual(b"abc".rpartition(esb), (b"a", b"b", b"c"))
def test_rsplit(self):
self.assertEqual("abc".rsplit(b"b"), ["a", "c"])
self.assertEqual("abc".rsplit(b"b", 1), ["a", "c"])
self.assertEqual("abc".rsplit(esb), ["a", "c"])
self.assertEqual("abc".rsplit(esb, 1), ["a", "c"])
self.assertEqual(b"abc".rsplit("b"), [b"a", b"c"])
self.assertEqual(b"abc".rsplit("b", 1), [b"a", b"c"])
# self.assertEqual(b"abc".rsplit(esb), [b"a", b"c"])
# self.assertEqual(b"abc".rsplit(esb, 1), [b"a", b"c"])
def test_rstrip(self):
self.assertEqual("ax".rstrip(b"x"), "a")
self.assertEqual("ax".rstrip(esx), "a")
self.assertEqual(b"ax".rstrip("x"), b"a")
# self.assertEqual(b"ax".rstrip(esx), b"a")
def test_split(self):
self.assertEqual("abc".split(b"b"), ["a", "c"])
self.assertEqual("abc".split(b"b", 1), ["a", "c"])
self.assertEqual("abc".split(esb), ["a", "c"])
self.assertEqual("abc".split(esb, 1), ["a", "c"])
self.assertEqual(b"abc".split("b"), [b"a", b"c"])
self.assertEqual(b"abc".split("b", 1), [b"a", b"c"])
# self.assertEqual(b"abc".split(esb), [b"a", b"c"])
# self.assertEqual(b"abc".split(esb, 1), [b"a", b"c"])
def test_strip(self):
self.assertEqual("xax".strip(b"x"), "a")
self.assertEqual("xax".strip(esx), "a")
self.assertEqual(b"xax".strip("x"), b"a")
# self.assertEqual(b"xax".strip(esx), b"a")
def test_startswith(self):
self.assertTrue("abc".startswith(b"a"))
self.assertTrue("abc".startswith(b"a", 0))
self.assertTrue("abc".startswith(b"a", 0, 1))
self.assertTrue("abc".startswith(esa))
self.assertTrue("abc".startswith(esa, 0))
self.assertTrue("abc".startswith(esa, 0, 1))
self.assertTrue(b"abc".startswith("a"))
self.assertTrue(b"abc".startswith("a", 0))
self.assertTrue(b"abc".startswith("a", 0, 1))
# self.assertTrue(b"abc".startswith(esa))
# self.assertTrue(b"abc".startswith(esa, 0))
# self.assertTrue(b"abc".startswith(esa, 0, 1))
def test_endswith(self):
self.assertTrue("abc".endswith(b"c"))
self.assertTrue("abc".endswith(b"c", 0))
self.assertTrue("abc".endswith(b"c", 0, 3))
self.assertTrue("abc".endswith(esc))
self.assertTrue("abc".endswith(esc, 0))
self.assertTrue("abc".endswith(esc, 0, 3))
self.assertTrue(b"abc".endswith("c"))
self.assertTrue(b"abc".endswith("c", 0))
self.assertTrue(b"abc".endswith("c", 0, 3))
# self.assertTrue(b"abc".endswith(esc))
# self.assertTrue(b"abc".endswith(esc, 0))
# self.assertTrue(b"abc".endswith(esc, 0, 3))
def test_join(self):
self.assertEqual("abc", "b".join([b"a", b"c"]))
self.assertEqual("b", "a".join([b"b"]))
self.assertEqual("abc", "b".join([esa, esc]))
self.assertEqual("b", "a".join([esb]))
self.assertEqual(b"abc", b"b".join(["a", "c"]))
self.assertEqual(b"b", b"a".join(["b"]))
#self.assertEqual(b"abc", b"b".join([esb, esc]))
# self.assertEqual(b"b", b"a".join([esb]))
run_test(__name__)
|
StarcoderdataPython
|
4938589
|
<reponame>tristone13th/pdf-annotations
import argparse
import datetime
import io
import os
import sys
import re
from pathlib import Path
from typing import List
import pdfminer.pdftypes as pdftypes
import pdfminer.settings
import pdfminer.utils
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams, LTAnno, LTChar, LTContainer, LTTextBox
from pdfminer.pdfdocument import (PDFDestinationNotFound, PDFDocument,
PDFNoOutlines)
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.psparser import PSLiteral, PSLiteralTable
pdfminer.settings.STRICT = False
SUBSTITUTIONS = {
u'ff': 'ff',
u'fi': 'fi',
u'fl': 'fl',
u'ffi': 'ffi',
u'ffl': 'ffl',
u'‘': "'",
u'’': "'",
u'“': '"',
u'”': '"',
u'…': '...',
}
ANNOTATION_SUBTYPES = frozenset(
{'Text', 'Highlight', 'Squiggly', 'StrikeOut', 'Underline'})
ANNOTATION_NITS = frozenset({'Squiggly', 'StrikeOut', 'Underline'})
COLUMNS_PER_PAGE = 2 # default only, changed via a command-line parameter
def box_hit(item, box):
(x0, y0, x1, y1) = box
assert item.x0 <= item.x1 and item.y0 <= item.y1
assert x0 <= x1 and y0 <= y1
# does most of the item area overlap the box?
# http://math.stackexchange.com/questions/99565/simplest-way-to-calculate-the-intersect-area-of-two-rectangles
x_overlap = max(0, min(item.x1, x1) - max(item.x0, x0))
y_overlap = max(0, min(item.y1, y1) - max(item.y0, y0))
overlap_area = x_overlap * y_overlap
item_area = (item.x1 - item.x0) * (item.y1 - item.y0)
assert overlap_area <= item_area
if item_area == 0:
return False
else:
return overlap_area >= 0.5 * item_area
class RectExtractor(TextConverter):
def __init__(self, resource_manager, codec='utf-8', page_number=1, la_params=None):
dummy = io.StringIO()
TextConverter.__init__(self, resource_manager, outfp=dummy,
codec=codec, pageno=page_number, laparams=la_params)
self.annotations = set()
self._last_hit = frozenset()
self._cur_line = set()
def set_annotations(self, annotations):
self.annotations = {a for a in annotations if a.boxes}
# main callback from parent PDFConverter
def receive_layout(self, lt_page):
self.render(lt_page)
self._last_hit = frozenset()
self._cur_line = set()
def test_boxes(self, item):
hits = frozenset({a for a in self.annotations if any(
{box_hit(item, b) for b in a.boxes})})
self._last_hit = hits
self._cur_line.update(hits)
return hits
# "broadcast" newlines to _all_ annotations that received any text on the
# current line, in case they see more text on the next line, even if the
# most recent character was not covered.
def capture_newline(self):
for a in self._cur_line:
a.capture('\n')
self._cur_line = set()
def render(self, item):
# If it's a container, recurse on nested items.
if isinstance(item, LTContainer):
for child in item:
self.render(child)
# Text boxes are a subclass of container, and somehow encode newlines
# (this weird logic is derived from pdfminer.converter.TextConverter)
if isinstance(item, LTTextBox):
self.test_boxes(item)
self.capture_newline()
# Each character is represented by one LTChar, and we must handle
# individual characters (not higher-level objects like LTTextLine)
# so that we can capture only those covered by the annotation boxes.
elif isinstance(item, LTChar):
for a in self.test_boxes(item):
a.capture(item.get_text())
# Annotations capture whitespace not explicitly encoded in
# the text. They don't have an (X,Y) position, so we need some
# heuristics to match them to the nearby annotations.
elif isinstance(item, LTAnno):
text = item.get_text()
if text == '\n':
self.capture_newline()
else:
for a in self._last_hit:
a.capture(text)
class Page:
def __init__(self, page_number, media_box):
self.page_number = page_number
self.media_box = media_box
self.annotations = []
def __eq__(self, other):
return self.page_number == other.page_number
def __lt__(self, other):
return self.page_number < other.page_number
class Pos:
def __init__(self, page, x, y):
self.page = page
self.x = x if x else 0
self.y = y if y else 0
def __lt__(self, other):
"""
how to compare two positions? first by page, then by column (multi columns), finally by y
:param other: other position
:return: Boolean (less than or not)
"""
if self.page < other.page:
return True
elif self.page == other.page:
assert self.page is other.page
(sx, sy) = self.normalise_to_media_box()
(ox, oy) = other.normalise_to_media_box()
(x0, y0, x1, y1) = self.page.media_box
colwidth = (x1 - x0) / COLUMNS_PER_PAGE
self_col = (sx - x0) // colwidth
other_col = (ox - x0) // colwidth
return self_col < other_col or (self_col == other_col and sy > oy)
else:
return False
def normalise_to_media_box(self):
"""
normalise position to prevent over-floating
:return: x and y
"""
x, y = self.x, self.y
(x0, y0, x1, y1) = self.page.media_box
if x < x0:
x = x0
elif x > x1:
x = x1
if y < y0:
y = y0
elif y > y1:
y = y1
return x, y
class Annotation:
def __init__(self, page, tag_name, coords=None, rect=None, contents=None, author=None):
self.page = page
self.tag_name = tag_name
self.contents = None if contents == '' else contents
self.rect = rect
self.author = author
self.text = ''
if coords is None:
self.boxes = None
else:
assert len(coords) % 8 == 0
self.boxes = []
while coords:
(x0, y0, x1, y1, x2, y2, x3,
y3), coords = coords[:8], coords[8:]
x_values = (x0, x1, x2, x3)
y_values = (y0, y1, y2, y3)
box = (min(x_values), min(y_values),
max(x_values), max(y_values))
self.boxes.append(box)
def capture(self, text):
if text == '\n':
# Kludge for latex: elide hyphens
if self.text.endswith('-'):
self.text = self.text[:-1]
# Join lines, treating newlines as space, while ignoring successive
# newlines. This makes it easier for the renderer to
# "broadcast" LTAnno newlines to active annotations regardless of
# box hits. (Detecting paragraph breaks is tricky anyway, and left
# for future future work!)
elif not self.text.endswith(' '):
self.text += ' '
else:
self.text += text
def get_text(self):
if self.boxes:
if self.text:
# replace tex ligatures (and other common odd characters)
return ''.join([SUBSTITUTIONS.get(c, c) for c in self.text.strip()])
else:
# something's strange -- we have boxes but no text for them
return "(XXX: missing text!)"
else:
return None
def get_start_pos(self):
if self.rect:
(x0, y0, x1, y1) = self.rect
elif self.boxes:
(x0, y0, x1, y1) = self.boxes[0]
else:
return None
return Pos(self.page, min(x0, x1), max(y0, y1))
def __lt__(self, other):
if isinstance(other, Annotation):
return self.get_start_pos() < other.get_start_pos()
return self.get_start_pos() < other.pos
class Outline:
def __init__(self, level, title: str, dest, pos: Pos):
self.level = level
# remove level information in title
assert title
self.title = title.strip()
self.dest = dest
self.pos = pos
def __lt__(self, other):
if isinstance(other, Annotation):
return self.pos < other.get_start_pos()
return self.pos < other.pos
class NoInputFileError(Exception):
pass
# substitution for comment and title text
sub_dict_comment_title = {
r'\_': '\\_',
r'\$': '\\$',
r'\{': '\\{',
r'\}': '\\}',
r'\|': '\\|',
}
# substitution for quotation text
sub_dict_text = {
r'[\\]*\$': '\\$',
r'[\\]*\&': '&',
r'[\\]*\{': '\\{',
r'[\\]*\_': '\\_',
r'[\\]*\}': '\\}',
r'[\\]*\|': '\\|',
}
def format_annotation(annotation, extra=None):
rawtext = annotation.get_text()
comment = [l for l in annotation.contents.splitlines(
) if l] if annotation.contents else []
text = [l for l in rawtext.strip().splitlines() if l] if rawtext else []
# we are either printing: item text and item contents, or one of the two
# if we see an annotation with neither, something has gone wrong
assert text or comment
# compute the formatted position (and extra bit if needed) as a label
label = "Page %d (%s)." % (
annotation.page.page_number + 1, extra if extra else "")
ret = ""
labelized = False
if comment:
ret += '\n'.join(comment)
for ori, new in sub_dict_comment_title.items():
ret = re.sub(ori, new, ret)
else:
assert text
ret += label
labelized = True
if text:
ret += '\n\n'
for index, para in enumerate(text):
for ori, new in sub_dict_text.items():
para = re.sub(ori, new, para)
ret += "> " + para
if index == len(text) - 1 and not labelized:
ret += " \\| " + label
ret += "\n"
ret += "\n"
else:
ret += " \\| " + label + "\n\n"
return ret
def filter(all_items: List) -> List:
"""Drop the outlines with no content.
"""
stack = []
res = []
for item in all_items:
if not isinstance(item, Outline):
res.extend(stack)
res.append(item)
stack = []
continue
while stack and item.level <= stack[-1].level:
stack.pop()
stack.append(item)
return res
class PrettyPrinter:
"""
Pretty-print the extracted annotations according to the output options.
strict_mode: Regardless of whether there is an annotation below,
the outline should be output in the form of a markdown header.
"""
def __init__(self, stem: str, strict_mode: bool = False):
self.stem = stem
self.strict_mode = strict_mode
def print_all(self, outlines: List[Outline], annotations: List[Annotation], outfile):
# print yaml header
print('---', file=outfile)
print('categories: Notes', file=outfile)
print('title: Reading Notes for ' + self.stem, file=outfile)
print('---\n', file=outfile)
# print outlines and annotations
all_items = sorted(outlines + annotations)
if not self.strict_mode:
all_items = filter(all_items)
for a in all_items:
if isinstance(a, Outline):
for ori, new in sub_dict_comment_title.items():
a.title = re.sub(ori, new, a.title)
print("#" * a.level + " " + a.title + "\n", file=outfile)
else:
print(format_annotation(a, a.tag_name), file=outfile)
def resolve_dest(doc, dest):
if isinstance(dest, bytes):
dest = pdftypes.resolve1(doc.get_dest(dest))
elif isinstance(dest, PSLiteral):
dest = pdftypes.resolve1(doc.get_dest(dest.name))
if isinstance(dest, dict):
dest = dest['D']
return dest
def get_outlines(doc, page_list, page_dict) -> List[Outline]:
result = []
for (level, title, dest_name, action_ref, _) in doc.get_outlines():
if dest_name is None and action_ref:
action = pdftypes.resolve1(action_ref)
if isinstance(action, dict):
subtype = action.get('S')
if subtype is PSLiteralTable.intern('GoTo'):
dest_name = action.get('D')
if dest_name is None:
continue
# some may not have a link
try:
dest = resolve_dest(doc, dest_name)
except PDFDestinationNotFound:
continue
# The PDF specification knows these destination types:
# [page /XYZ left top zoom]
# [page /Fit]
# [page /FitH top]
# [page /FitV left]
# [page /FitR left bottom right top]
# [page /FitB] (PDF 1.1)
# [page /FitBH top] (PDF 1.1)
# [page /FitBV left] (PDF 1.1)
# ISO 32000-1, Table 151 – Destination syntax)
# for more, see: https://stackoverflow.com/questions/43742984/changing-zoom-in-links-in-pdf-files
if dest[1] is PSLiteralTable.intern('XYZ'):
(page_ref, _, target_x, target_y) = dest[:4]
elif dest[1] is PSLiteralTable.intern('FitH') or dest[1] is PSLiteralTable.intern('FitBH'):
page_ref, target_y = dest[0], dest[2]
target_x = 0
elif dest[1] is PSLiteralTable.intern('FitV') or dest[1] is PSLiteralTable.intern('FitBV'):
page_ref, target_x = dest[0], dest[2]
target_y = float('inf')
elif dest[1] is PSLiteralTable.intern('Fit') or dest[1] is PSLiteralTable.intern('FitR') or dest[1] is PSLiteralTable.intern('FitB'):
page_ref = dest[0]
target_x, target_y = 0, float('inf')
else:
continue
if type(page_ref) is int:
page = page_list[page_ref]
elif isinstance(page_ref, pdftypes.PDFObjRef):
page = page_dict[page_ref.objid]
else:
sys.stderr.write(
'Warning: unsupported page reference in outline: %s\n' % page_ref)
page = None
if page:
pos = Pos(page, target_x, target_y)
result.append(Outline(level, title, dest_name, pos))
return result
def get_annotations(pdf_annotations, page):
annotations = []
for pa in pdf_annotations:
subtype = pa.get('Subtype')
if subtype is not None and subtype.name not in ANNOTATION_SUBTYPES:
continue
contents = pa.get('Contents')
if contents is not None:
# decode as string, normalise line endings, replace special characters
contents = pdfminer.utils.decode_text(contents)
contents = contents.replace('\r\n', '\n').replace('\r', '\n')
contents = ''.join([SUBSTITUTIONS.get(c, c) for c in contents])
coords = pdftypes.resolve1(pa.get('QuadPoints'))
rect = pdftypes.resolve1(pa.get('Rect'))
author = pdftypes.resolve1(pa.get('T'))
if author is not None:
author = pdfminer.utils.decode_text(author)
a = Annotation(page, subtype.name, coords,
rect, contents, author=author)
annotations.append(a)
return annotations
def process_file(fh):
resource_manager = PDFResourceManager()
la_params = LAParams()
device = RectExtractor(resource_manager, la_params=la_params)
interpreter = PDFPageInterpreter(resource_manager, device)
parser = PDFParser(fh)
doc = PDFDocument(parser)
page_list = [] # pages in page order
page_dict = {} # map from PDF page object ID to Page object
all_annotations = []
for (page_number, pdf_page) in enumerate(PDFPage.create_pages(doc)):
print("Current processing page: ", page_number)
page = Page(page_number, pdf_page.mediabox)
page_list.append(page)
page_dict[pdf_page.pageid] = page
if pdf_page.annots:
pdf_annotations = []
for a in pdftypes.resolve1(pdf_page.annots):
if isinstance(a, pdftypes.PDFObjRef):
pdf_annotations.append(a.resolve())
else:
sys.stderr.write('Warning: unknown annotation: %s\n' % a)
page.annotations = get_annotations(pdf_annotations, page)
page.annotations.sort()
device.set_annotations(page.annotations)
interpreter.process_page(pdf_page) # add text to annotation
all_annotations.extend(page.annotations)
outlines = []
try:
outlines = get_outlines(doc, page_list, page_dict)
except PDFNoOutlines:
sys.stderr.write("Document doesn't include outlines (\"bookmarks\")\n")
except Exception as ex:
sys.stderr.write("Warning: failed to retrieve outlines: %s\n" % ex)
device.close()
return outlines, all_annotations
def parse_args():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("input", metavar="INFILE", type=argparse.FileType("rb"), default=sys.stdin, nargs='?',
help="PDF file to process")
p.add_argument("-o", metavar="OUTFILE", type=argparse.FileType("w", encoding="UTF-8"), dest="output",
default=sys.stdout, const=sys.stdout, nargs='?', help="output file (default is stdout)")
p.add_argument("-n", "--cols", default=1, type=int, metavar="COLS", dest="cols",
help="number of columns per page in the document (default: 1)")
return p.parse_args()
def main():
args = parse_args()
# reset input file
if args.input is sys.stdin:
f_list = os.listdir()
for f in f_list:
suffix = Path(f).suffix
if suffix == '.pdf' or suffix == '.PDF':
args.input = open(f, "rb")
break
if args.input is sys.stdin:
raise NoInputFileError
# reset output file
stem = Path(args.input.name).stem.strip()
args.output = open(str(datetime.date.today()) + '-' + stem + '.md',
"w", encoding="UTF-8") if args.output is sys.stdout else args.output
# reset columns
global COLUMNS_PER_PAGE
COLUMNS_PER_PAGE = args.cols
# processing
outlines, annotations = process_file(args.input)
pp = PrettyPrinter(stem)
pp.print_all(outlines, annotations, args.output)
return 0
if __name__ == "__main__":
try:
main()
except NoInputFileError:
print("No PDF file under current directory.")
|
StarcoderdataPython
|
1945113
|
#!/usr/bin/python
#
#
#
import sys
import os
import datetime
import commands
import re
import time
import simplejson as json
from optparse import OptionParser
def run(inJsonFilename):
inJson = open(inJsonFilename).read()
data = json.loads(inJson)
# Add a name for the output file that will be generated
for item in data:
newAudioFile = item['audioFile'].replace("/","_")
item['inputFile'] = "/data/django/orchive/audio/%s.wav" % (item['audioFile'])
item['outputFile'] = "/tmp/%s-%s-%s.wav" % (newAudioFile, item['startSec'], item['endSec'])
# Run sox on each input file
for item in data:
startSec = float(item['startSec'])
endSec = float(item['endSec'])
lengthSec = endSec - startSec
command = "sox %s %s trim %f %f" % (item['inputFile'], item['outputFile'], startSec, lengthSec)
a = commands.getoutput(command)
# Make .mf file
ts = time.time()
mfFilename = "/tmp/bextract-%i.mf" % ts
mfFile = open(mfFilename, "w")
for item in data:
mfFile.write("%s\t%s\n" % (item['outputFile'], item['label']))
mfFile.close()
# Run bextract on audio file
mplFilename = "/tmp/bextract-%i.mpl" % ts
command = "bextract %s -pm -p %s" % (mfFilename, mplFilename)
a = commands.getoutput(command)
# Return .mpl file as text
mplFile = open(mplFilename, "r")
mplData = mplFile.read()
# Remove temporary audio files when done
for item in data:
os.remove(item['outputFile'])
os.remove(mfFilename)
os.remove(mplFilename)
print mplData
if __name__ == "__main__":
usage = "usage: %prog [options] in.json"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
run(args[0])
|
StarcoderdataPython
|
11214783
|
import argparse
import logging
from . import __version__
from . import constants as c
from .main import latex2plos
def main():
# Setup command line option parser
parser = argparse.ArgumentParser(
description='Automated preparation of your LaTeX paper for submission in PLOS journals',
)
parser.add_argument(
'input_filename',
help='Filename of the main LaTeX document',
)
parser.add_argument(
'-b',
'--build-dir',
metavar='DIRECTORY',
default=c.DEFAULT_BUILD_DIR,
help="DIRECTORY where the main LaTeX document has successfully been built, '%s' by default" % c.DEFAULT_BUILD_DIR,
)
parser.add_argument(
'-e',
'--export-dir',
metavar='DIRECTORY',
default=c.DEFAULT_EXPORT_DIR,
help="Export to selected DIRECTORY, '%s' by default" % c.DEFAULT_EXPORT_DIR,
)
parser.add_argument(
'-q',
'--quiet',
action='store_const',
const=logging.WARN,
dest='verbosity',
help='Be quiet, show only warnings and errors',
)
parser.add_argument(
'-v',
'--verbose',
action='store_const',
const=logging.DEBUG,
dest='verbosity',
help='Be very verbose, show debug information',
)
parser.add_argument(
'--version',
action='version',
version="%(prog)s " + __version__,
)
args = parser.parse_args()
# Configure logging
log_level = args.verbosity or logging.INFO
logging.basicConfig(level=log_level, format="[%(levelname)s] %(message)s")
latex2plos(args.input_filename, args.build_dir, args.export_dir)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3434386
|
import logging
import os
import pickle
import re
import requests
import scrapy
from functools import partial
from . import base_path, config, notification
seen_directory = os.path.join(base_path, 'seen')
os.makedirs(seen_directory, exist_ok=True)
seen_filename_template = os.path.join(seen_directory, '{name}.pickle')
without_whitespace = partial(re.sub, r'\s', '')
class SearchKeywordPipeline:
def __init__(self):
self.logger = logging.getLogger('searchkeywordpipeline')
self.logger.setLevel(logging.INFO)
self.keywords = [
(without_whitespace(item), item) if isinstance(item, str)
else (without_whitespace(item[0]), item[1])
for item in config.keywords
]
self.crawled = set()
self.seen = set()
def open_spider(self, spider):
seen_filename = seen_filename_template.format(name=spider.name)
if os.path.exists(seen_filename):
with open(seen_filename, 'rb') as input_file:
self.seen = pickle.load(input_file)
def close_spider(self, spider):
seen_filename = seen_filename_template.format(name=spider.name)
with open(seen_filename, 'wb') as output_file:
pickle.dump(self.crawled, output_file)
def process_item(self, item, spider):
if item['id'] in self.crawled:
raise scrapy.exceptions.DropItem('Duplicated item: {id}'.format(id=item['id']))
self.crawled.add(item['id'])
for keyword_without_space, keyword in self.keywords:
if keyword_without_space in without_whitespace(item['name']):
if item['id'] not in self.seen:
self.logger.info('Product found: {name} ({keyword})'.format(
name=item['name'],
keyword=keyword_without_space
))
self.notify(item, keyword)
break
return item
def notify(self, item, keyword):
notification.send(
title='{keyword} - ₩{price:,} (-{discount_rate:.0f}%)'.format(
keyword=keyword,
price=item['price'],
discount_rate=item['discount_rate'] * 100
),
message=item['name'],
url=item['url'],
image_url=item['image_url']
)
self.logger.info('Notification sent: {message}'.format(message=item['name']))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.