id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3397843
|
<filename>src/roadimage.py
from road import Road
from mark_tracker import MarkTracker
from roadgraphics import *
from crosswalk import CrossWalk
import random
import math
import drawings as drw
class RoadImage:
def __init__(self, dimensions, path, background_images, asphalt_textures, templates_collection, seed=0, set_seed=True):
self.w, self.h = dimensions
self.path = path # Where the image will be saved
self.road = Road(self.w, self.h)
# Defining the dictionaries containing random images
self.templates_collection = templates_collection
self.backgrounds = background_images
self.grounds = asphalt_textures
if set_seed:
random.seed(seed)
def setSeed(self, seed):
random.seed(seed)
def defineLanes(self, min_lanes, max_lanes, variation):
number_of_lanes = random.randint(min_lanes, max_lanes)
self.number_of_lanes = number_of_lanes
default_lane_proportion = (self.w / float(number_of_lanes))/float(self.w)
# Variation is given in percentage
# Creating the lanes:
lane_sizes = []
for i in range(number_of_lanes - 1):
# "addition" is the variation in the specific lane
addition = random.randint(-variation, variation) / 100.00 * default_lane_proportion
lane_width = addition + default_lane_proportion
lane_width = round(lane_width, 2) # Rounding the value to simplificate the sizes
# Adding the lane sizes in a list to create the last lane without any size issue:
lane_sizes.append(lane_width)
self.road.newLane(math.ceil(lane_width*self.w))
# Creating the last lane
lanes_size = 0
for i in lane_sizes:
lanes_size += i
lane_width = self.w - math.floor(lanes_size * self.w)
self.road.newLane(lane_width)
def getRoad(self):
return self.road
def randomMark(self): # Returns a random template name
templates_names = tuple(self.templates.keys())
random_key = templates_names[random.randint(0,len(templates_names) - 1)]
return random_key
def randomBackground(self): # Returns a random background name
backgrounds = self.backgrounds
return backgrounds[random.randint(0, len(backgrounds) - 1)]
def randomGround(self):
grounds = self.grounds
return grounds[random.randint(0, len(grounds) - 1)]
def getRotation(self, minx, maxx, miny, maxy, minz, maxz):
def randomRotation(mind, maxd):
multiplier = random.randint(0, 1000) / 1000
difference = maxd - mind
return multiplier * difference + mind
# Getting Y and Z rotation signals (positive or negative):
ysig = random.sample((-1, 1), 1)[0]
zsig = random.sample((-1, 1), 1)[0]
# Getting rotations, in deegrees
x = -randomRotation(minx, maxx)
y = ysig * randomRotation(miny, maxy)
z = zsig * randomRotation(minz, maxz)
# Converting to radians:
x = x/180.00 * math.pi
y = y/180.00 * math.pi
z = z/180.00 * math.pi
# Returning rotations:
return (x, y, z)
def getShift(self, minx, maxx, miny, maxy):
# Getting shifts, in pixels
x = random.randint(0, 100) / 100 * (maxx - minx) + minx
y = random.randint(0, 100) / 100 * (maxy - miny) + miny
return (x, y)
def getRandomLane(self):
lanes = len(self.road.lanes)
return random.randint(0, lanes-1)
def getRandomSeparator(self, minwidth, maxwidth, mindotsize, maxdotsize, mindotdist, maxdotdist, minxdist, maxxdist):
# Defining colors:
colors = [
(255, 255, 255), # WHITE
(255, 255, 0), # YELLOW
(128, 128, 0) # DARK YELLOW
]
# Getting random color:
color = colors[random.randint(0,len(colors)-1)]
# Getting random dot_size:
dot_size = random.randint(mindotsize, maxdotsize)
# Getting random dot_dist:
dot_dist = random.randint(mindotdist, maxdotdist)
# Getting random x_dist:
x_dist = random.randint(minxdist, maxxdist)
# Getting random width:
width = random.randint(minwidth, maxwidth)
# Getting random true or false:
is_true = bool(random.getrandbits(1))
return (width, color, is_true, dot_size, dot_dist, x_dist)
def getLanesNumber(self):
return self.number_of_lanes
def insert_templates_at_lanes(self, delta_x, delta_y, min_h, max_h, min_w, max_w):
"""
min_h, max_h, min_w and max_w are proportions, they must be between 0 and 1
delta_x and delta_y are proportions, they must be between 0 and 1
"""
labels = self.templates_collection.labels
m = len(labels) # m : number of loaded templates.
road = self.getRoad()
# L is a vector which each index represents a lane in the road:
L = [math.ceil(m * random.randint(0, 100) / 100) for i in range(self.number_of_lanes)]
# Creating one empty lane:
if len(L) > 1:
L[int(random.randint(0, 100) / 100) * (len(L)) - 1] = -1 # -1 means that there will be no template at that lane.
# Defining the exact position and vectors of the to-be-inserted templates:
templates = []
for l in range(len(L)):
Ln = L[l] - 1
if Ln == -1: continue # Skipping the "supposed-to-be-empty" lanes
lane = road.lanes[l]
# Defining the template's dimensions:
min_size = (min_h + min_w) / 2 * lane.w
max_size = (max_h + max_w) / 2 * lane.w
base_siz = random.randint(0, 100) / 100 * (max_size - min_size) + min_size
base_dim = (int(base_siz), int(base_siz))
# Getting the template vector:
template = self.templates_collection.get(labels[Ln], base_dim)
# Inserting the template at the lane:
dx, dy = lane.getAbsoluteCoordinates(int(delta_x * lane.w), int(delta_y * lane.h))
template.displacement = dx, dy
templates.append(template)
return templates
def draw_templates(self, img, templates):
for template in templates:
img = template.draw(img, (255, 255, 255, 255))
return img
def getTransform(self, maxblur, maxconstrast, maxbrightness):
constrast = random.randint(0, maxconstrast)
brightness = random.randint(0, maxbrightness)
blurvalues = [1, 1, 3, 3, 5, 5, 7, 7, 9, 9]
constrast = random.randint(0, maxconstrast)
blur = blurvalues[random.randint(0, maxblur)]
return blur, constrast/100, brightness/100
def getAgingMatrix(self, max_age):
h, w = self.h, self.w
aging_matrix = np.abs(np.random.randn(h, w))
aging_matrix = np.clip(aging_matrix, 0, 0.01 * max_age)
return aging_matrix
|
StarcoderdataPython
|
6619224
|
from omero.gateway import (
BlitzObjectWrapper,
_DatasetWrapper,
_ImageWrapper,
)
from qtpy.QtCore import QModelIndex
from qtpy.QtGui import QStandardItem, QStandardItemModel
from .gateway import QGateWay
from typing import Dict
class OMEROTreeItem(QStandardItem):
def __init__(self, wrapper: BlitzObjectWrapper):
super().__init__()
self.wrapper = wrapper
self.setData(wrapper)
# self._has_fetched = False
if self.hasChildren():
self.setText(f"{self.wrapper.getName()} ({self.numChildren()})")
else:
self.setText(f"{self.wrapper.getName()}")
# def canFetchMore(self) -> bool:
# if self._has_fetched or not self.hasChildren():
# return False
# return self.wrapper.countChildren() > 0
# def fetchChildren(self):
# for child in self.wrapper.listChildren():
# self.appendRow(OMEROTreeItem(child))
# self._has_fetched = True
def hasChildren(self):
return bool(self.wrapper.CHILD_WRAPPER_CLASS)
def numChildren(self) -> int:
return self.wrapper.countChildren()
def isDataset(self) -> bool:
return isinstance(self.wrapper, _DatasetWrapper)
def isImage(self) -> bool:
return isinstance(self.wrapper, _ImageWrapper)
class OMEROTreeModel(QStandardItemModel):
def __init__(self, gateway: QGateWay, parent=None):
super().__init__(parent)
self.gateway = gateway
self.gateway.connected.connect(
lambda g: self.gateway._submit(self._populate_tree)
)
self._wrapper_map: Dict[BlitzObjectWrapper, QModelIndex] = {}
def _populate_tree(self):
if not self.gateway.isConnected():
return
root = self.invisibleRootItem()
projects = []
for project in list(self.gateway.conn.listProjects()):
item = OMEROTreeItem(project)
root.appendRow(item)
projects.append(item)
self._wrapper_map[project.getId()] = self.indexFromItem(item)
yield
if not self.gateway.isConnected():
return
for item in projects:
for dataset in list(item.wrapper.listChildren()):
dchild = OMEROTreeItem(dataset)
item.appendRow(dchild)
self._wrapper_map[dataset.getId()] = self.indexFromItem(dchild)
yield
if not self.gateway.isConnected():
return
for image in list(dataset.listChildren()):
ichild = OMEROTreeItem(image)
dchild.appendRow(ichild)
self._wrapper_map[image.getId()] = self.indexFromItem(ichild)
yield
# def canFetchMore(self, index: QModelIndex) -> bool:
# item = self.itemFromIndex(index)
# return bool(item and item.canFetchMore())
# def fetchMore(self, index: QModelIndex) -> None:
# self.itemFromIndex(index).fetchChildren()
def hasChildren(self, index: QModelIndex) -> bool:
item = self.itemFromIndex(index)
if item is not None:
return item.hasChildren() and item.numChildren() > 0
return True
def itemFromIndex(self, index: QModelIndex) -> OMEROTreeItem:
return super().itemFromIndex(index)
|
StarcoderdataPython
|
1636941
|
from datetime import datetime
from django.db.models import Count
import olympia.core.logger
from olympia.amo.celery import task
from olympia.amo.decorators import use_primary_db
from .models import Collection, CollectionAddon
log = olympia.core.logger.getLogger('z.task')
@task
@use_primary_db
def collection_meta(*ids, **kw):
log.info(f'[{len(ids)}@{collection_meta.rate_limit}] Updating collection metadata.')
qs = CollectionAddon.objects.filter(collection__in=ids).values_list('collection')
counts = dict(qs.annotate(Count('id')))
now = datetime.now()
for collection_id, old_count in Collection.objects.filter(id__in=ids).values_list(
'pk', 'addon_count'
):
addon_count = counts.get(collection_id, 0)
if addon_count == old_count:
continue
# We want to set addon_count & modified without triggering post_save
# as it would cause an infinite loop (this task is called on
# post_save). So we update queryset.update() and set modified ourselves
# instead of relying on auto_now behaviour.
Collection.objects.filter(id=collection_id).update(
addon_count=addon_count, modified=now
)
|
StarcoderdataPython
|
3388643
|
<filename>zinki_smachine/src/zinki_smachine/__init__.py<gh_stars>0
from state import *
from state_machine import *
from state_name import *
from transition_name import *
|
StarcoderdataPython
|
1922768
|
'''
Server-Class for the extractions of IoC's.
'''
# pylint: disable=C0413, C0411
import os
import sys
import json
import pytz
import re
import iocextract as ioce
sys.path.append('..')
from io import StringIO
from threading import Thread
from kafka.producer import KafkaProducer
from kafka.consumer import KafkaConsumer
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from ioc_finder import find_iocs
from flask import Flask
from flask import request
from flask import render_template
from flask_script import Server
from flask_apscheduler import APScheduler
from flask_dropzone import Dropzone
from libs.core.filter import filter_dict_values
from libs.core.filter import filter_by_blacklist
from libs.core.merge_dicts import merge_dicts
from libs.core.environment import envvar
from libs.kafka.topichandler import create_topic_if_not_exists
from libs.kafka.logging import LogMessage
from libs.kafka.logging import send_health_message
from libs.extensions.loader import load_extensions
from libs.gitlabl.files import read_file_from_gitlab
from libs.gitlabl.sanitize_title import sanitize_title
from libs.text_summarization.tsummarization import summarize
import traceback
# ENVIRONMENT-VARS
SERVICENAME = envvar("SERVICENAME", "Extractor")
IOC_TOPIC_NAME = envvar("IOC_TOPIC", "ioc")
SCRAPER_TOPIC_NAME = envvar("SCRAPER_TOPIC", "datascraper")
# nosec
KAFKA_SERVER = envvar("KAFKA_SERVER", "0.0.0.0:9092")
HEALTHTOPIC = envvar("HEALTH_TOPIC", "health_report")
# nosec
GITLAB_SERVER = envvar("GITLAB_SERVER", "0.0.0.0:10082")
GITLAB_TOKEN = envvar("GITLAB_TOKEN", "<PASSWORD>")
GITLAB_REPO_NAME = envvar("GITLAB_REPO_NAME", "IOCFindings")
DOCKER_REPORTS_PATH = "/app/iocextractor/reports"
class Config:
'''
Config class with configs for flask.
'''
SCHEDULER_API_ENABLED = True
app = Flask(SERVICENAME, template_folder='templates', static_folder="static/", static_url_path='/static')
app.config.from_object(Config())
app.config['DROPZONE_ALLOWED_FILE_CUSTOM'] = True
app.config['DROPZONE_ALLOWED_FILE_TYPE'] = '.pdf'
app.config['DROPZONE_MAX_FILE_SIZE'] = 10
app.config['DROPZONE_MAX_FILES'] = 100
app.config['UPLOADED_PATH'] = os.path.join(DOCKER_REPORTS_PATH, 'uploads')
dropzone = Dropzone(app)
scheduler = APScheduler()
scheduler.init_app(app)
def flaskapp():
'''
flaskapp will return the FLASK_APP.
@return a flask_app
'''
return app
class Extractor(Server):
'''
Extractor will be the class for the extractor-server.
'''
EXTENSIONS = load_extensions(SERVICENAME)
BLACKLIST = {}
@app.route('/', methods=['GET', 'POST'])
def file_dropzone():
'''
file_dropzone will render a drag and drop view for the reports.
@return a rendered template.
'''
if request.method == 'POST':
files = request.files.get('file')
file_path = os.path.join(DOCKER_REPORTS_PATH, files.filename)
files.save(file_path)
return render_template('index.html')
@staticmethod
@scheduler.task("interval", id="refetch", seconds=30, timezone=pytz.UTC)
def refetch_blacklist():
'''
refetch_blacklist will fetch the blacklist from the master every 30 minutes.
'''
content = {}
try:
if Extractor.BLACKLIST is None or len(Extractor.BLACKLIST) <= 0:
LogMessage("Using local blacklist.", LogMessage.LogTyp.INFO, SERVICENAME).log()
with open(os.path.abspath("../datasets/blacklist.json")) as content:
content = json.load(content)
else:
LogMessage("Using blacklist from gitlab.", LogMessage.LogTyp.INFO, SERVICENAME).log()
content = read_file_from_gitlab(gitlabserver=GITLAB_SERVER, token=<PASSWORD>LAB_TOKEN, repository=GITLAB_REPO_NAME, file="blacklist.json", servicename=SERVICENAME, branch_name="master")
content = json.loads(content)
if content is not None:
Extractor.BLACKLIST = content
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
@staticmethod
def pushfindings(findings):
'''
pushfindings will push all findings to KAFKA.
@param findings will be the findings.
'''
try:
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER, client_id='ioc_extractor', api_version=(2, 7, 0))
message = str(json.dumps(findings)).encode('UTF-8')
producer.send(IOC_TOPIC_NAME, message)
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
@staticmethod
def extensions(string):
'''
extensions will execute extensions for this server.
@param string will be the string to check against.
@return findings in the string machting the extensions-rules.
'''
findings = {}
try:
for i in Extractor.EXTENSIONS:
try:
l_findings = re.findall(i.get_pattern(), string)
if len(l_findings) > 0 and isinstance(l_findings[0], tuple):
findings[str(i.field)] = [
element[i.get_group()] if len(element) - 1 >= i.get_group() else element.group(0) for
element in l_findings]
else:
findings[str(i.field)] = l_findings
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
return findings
def extract_ioc(pdftext):
'''
extract_ioc will extract ioc from a given text all ioc.
@param pdftext will be the text to search trough.
@return will return a dictonary with all icos or an empty
dict incase of no ioc or an error.
'''
iocs = {}
try:
iocs = find_iocs(pdftext)
urls = [rule for rule in ioce.extract_urls(pdftext, refang=True)]
iocs['urls'] = list(dict.fromkeys(urls))
yara_rules = [rule for rule in ioce.extract_yara_rules(pdftext)]
iocs['yara_rules'] = yara_rules
if len(pdftext) > 200: iocs['textsummary'] = summarize(pdftext, SERVICENAME)
ex_ioc = Extractor.extensions(pdftext)
iocs = merge_dicts(iocs, filter_dict_values(ex_ioc, SERVICENAME), SERVICENAME)
iocs = filter_by_blacklist(iocs, Extractor.BLACKLIST, SERVICENAME)
except Exception as error:
LogMessage(f"{str(error)} {''.join(traceback.format_tb(error.__traceback__))}", LogMessage.LogTyp.ERROR, SERVICENAME).log()
return iocs
@staticmethod
def extract(reportpath):
'''
extract will take a PDF-File as path and try to extract all IoC's. After the
Extraction, the file will be removed. The IoC's will be pushed to KAFKA
by calling the pushfindings-Function.
@param reportpath will be the path to the PDF-File.
'''
try:
pdf_content = StringIO()
LogMessage(f"Extract ioc's from file: {reportpath}", LogMessage.LogTyp.INFO, SERVICENAME).log()
with open(reportpath, 'rb') as file:
resource_manager = PDFResourceManager()
device = TextConverter(resource_manager, pdf_content, laparams=LAParams())
interpreter = PDFPageInterpreter(resource_manager, device)
for page in PDFPage.create_pages(PDFDocument(PDFParser(file))):
interpreter.process_page(page)
pdftext = pdf_content.getvalue()
iocs = Extractor.extract_ioc(pdftext)
input_filename = sanitize_title(unsanitized_title=str((os.path.basename(reportpath))), servicename=SERVICENAME)
iocs['input_filename'] = input_filename
Extractor.pushfindings(iocs)
os.remove(reportpath)
LogMessage(f"The ioc's had been extracted from the file and the file has been removed: {reportpath}", LogMessage.LogTyp.INFO, SERVICENAME).log()
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
@scheduler.task("interval", id="health_push", seconds=5, timezone=pytz.UTC)
def healthpush():
'''
healthpush will send a health message to KAFKA.
'''
try:
send_health_message(KAFKA_SERVER, HEALTHTOPIC, SERVICENAME)
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
@staticmethod
def handle_scraper_feed(data):
'''
handle_scraper_feed take the data from scraper
and extract all ioc and push the result to
KAFKA for the ioc pusher.
@param data will be the data from KAFKA.
'''
try:
if (json_data := json.loads(data.value.decode("utf-8"))) is not None:
iocs = Extractor.extract_ioc(json_data.get('content'))
if iocs is not None and len(iocs) > 0:
input_filename = sanitize_title(unsanitized_title=str(json_data.get('title')), servicename=SERVICENAME)
iocs['input_filename'] = input_filename
Extractor.pushfindings(iocs)
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
@staticmethod
def consume_findings_from_scraper():
'''
consume_findings_from_scraper will consume all findings from KAFKA and
push them into the gitlab repository.
'''
try:
consumer = KafkaConsumer(SCRAPER_TOPIC_NAME, bootstrap_servers=KAFKA_SERVER, client_id='ioc_extractor',
api_version=(2, 7, 0), )
for report in consumer:
Thread(target=Extractor.handle_scraper_feed, args=(report,), daemon=True).start()
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
@scheduler.task("interval", id="execute", seconds=10, timezone=pytz.UTC, misfire_grace_time=900)
def execute():
'''
execute will run the service and search for PDF's and start for every file a
thread. The thread will execute the extract-Function and extract all IoC's
in a file.
'''
try:
if (reports := os.listdir(DOCKER_REPORTS_PATH)) is not None and len(reports) > 0:
threads = []
for report in reports:
if report.endswith(".pdf"):
threads.append(
Thread(target=Extractor.extract, args=(os.path.join(DOCKER_REPORTS_PATH, report),)))
for instance in threads:
instance.start()
for instance in threads:
instance.join()
except Exception as error:
LogMessage(str(error), LogMessage.LogTyp.ERROR, SERVICENAME).log()
def __call__(self, app, *args, **kwargs):
'''
__call__ will be executed befor the server creation and run some functions
on startup. So a Topic will be create for the IoC's and the scheduler
will be started for the cron-jobs.
@param self is the Server-Object.
@param app will be the app passed to the __call__ function of the server-class
@param *args and **kwargs will be the vargs passed to the __call__ function
of the server-class
'''
create_topic_if_not_exists(KAFKA_SERVER, IOC_TOPIC_NAME)
Extractor.BLACKLIST = Extractor.refetch_blacklist()
scheduler.start()
Thread(target=Extractor.consume_findings_from_scraper, daemon=True).start()
return Server.__call__(self, app, *args, **kwargs)
|
StarcoderdataPython
|
8102763
|
import sys
def get_python_version():
major=sys.version_info.major
minor=sys.version_info.minor
micro=sys.version_info.micro
releaselevel=sys.version_info.releaselevel
serial=sys.version_info.serial
version=f"{major}.{minor}.{micro}"
if releaselevel!="final":
version+=f"-{releaselevel}.{serial}"
return version
PLUGIN_METADATA={
"id":"python",
"version":get_python_version(),
"name":f"Python {get_python_version()}",
"description":f"Python {sys.version}",
"link":"https://python.org"
}
|
StarcoderdataPython
|
1643902
|
from django.apps import AppConfig
class ShareimgConfig(AppConfig):
name = 'shareimg'
|
StarcoderdataPython
|
3218065
|
<reponame>daVinciCEB/Basic-Python-Package
import unittest
from context import core
class ExampleTest(unittest.TestCase):
"""An example test in unittest fashion."""
def setUp(self):
pass
def test_will_pass(self):
self.assertEqual(1, 1)
def test_will_not_pass(self):
self.assertEqual(1, 1)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3570868
|
# Training
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard, CSVLogger
from keras.optimizers import SGD
from keras import backend as K
from models.model3 import M_b_Xception_896
trainset_dir = 'data/train/'
valset_dir = 'data/val/'
num_classes = 6
learning_rate = 1e-3
batch_size = 8
input_shape = (229, 229, 3)
momentum = 0.9
train_datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
trainset_dir,
target_size=(input_shape[0], input_shape[1]),
batch_size=batch_size)
val_generator = val_datagen.flow_from_directory(
valset_dir,
target_size=(input_shape[0], input_shape[1]),
batch_size=batch_size,
shuffle=False)
K.clear_session()
optim = SGD(lr=learning_rate, momentum=momentum)
model = M_b_Xception_896(input_shape, num_classes)
model.compile(optimizer=optim, loss='categorical_crossentropy',
metrics=['acc'])
csv_path = 'result_show/M_b_Xception_896.csv'
log_dir = 'result_show/M_b_Xception_896/'
save_weights_path = 'weights/M_b_Xception_896/trash-model-weight-ep-{epoch:02d}-val_loss-{val_loss:.4f}-val_acc-{val_acc:.4f}.h5'
checkpoint = ModelCheckpoint(save_weights_path, monitor='val_acc', verbose=1,
save_weights_only=True, save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=20, verbose=1, min_lr='1e-4')
#earlystop = EarlyStopping(monitor='val_acc', patience=25, verbose=1)
logging = TensorBoard(log_dir=log_dir, batch_size=batch_size)
csvlogger = CSVLogger(csv_path, append=True)
callbacks = [checkpoint, reduce_lr, logging, csvlogger]
num_epochs = 5000
model.fit_generator(train_generator,
steps_per_epoch=len(train_generator),
epochs=num_epochs,
verbose=1,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=len(val_generator),
workers=1)
# fit_generator(self, generator, steps_per_epoch, epochs=1, verbose=1,
# callbacks=None, validation_data=None, validation_steps=None,
# class_weight=None, max_q_size=10, workers=1, pickle_safe=False, initial_epoch=0)
|
StarcoderdataPython
|
6428700
|
<filename>tests/conftest.py
import asyncio
import pytest
@pytest.fixture(scope='session')
def simple_gen():
return _simple_gen
async def _simple_gen(sequence, delay=0):
for item in sequence:
yield await asyncio.sleep(delay, item)
|
StarcoderdataPython
|
3496106
|
<filename>glycan_profiling/output/xml.py
import os
import re
import bisect
from collections import defaultdict, OrderedDict, namedtuple, deque
from brainpy import mass_charge_ratio
import glypy
from glypy.composition import formula
from glypy.io.nomenclature import identity
from glypy.structure.glycan_composition import (
MonosaccharideResidue,
FrozenMonosaccharideResidue, SubstituentResidue, FrozenGlycanComposition)
from glycopeptidepy.structure import parser, modification
from psims.mzid import components
from psims.mzid.writer import MzIdentMLWriter
from psims.controlled_vocabulary.controlled_vocabulary import load_gno
from ms_deisotope.output import mzml
from glycan_profiling import task, serialize, version
from glycan_profiling.chromatogram_tree import Unmodified
from glycan_profiling.chromatogram_tree.chromatogram import group_by
class mass_term_pair(namedtuple("mass_term_pair", ('mass', 'term'))):
def __lt__(self, other):
return self.mass < float(other)
def __gt__(self, other):
return self.mass > float(other)
def __float__(self):
return self.mass
valid_monosaccharides = [
glypy.MonosaccharideResidue.from_iupac_lite("Hex"),
glypy.MonosaccharideResidue.from_iupac_lite("HexNAc"),
glypy.MonosaccharideResidue.from_iupac_lite("dHex"),
glypy.MonosaccharideResidue.from_iupac_lite("NeuAc"),
glypy.MonosaccharideResidue.from_iupac_lite("NeuGc"),
glypy.MonosaccharideResidue.from_iupac_lite("Pen"),
glypy.MonosaccharideResidue.from_iupac_lite("Fuc"),
# glypy.MonosaccharideResidue.from_iupac_lite("HexA"),
# glypy.MonosaccharideResidue.from_iupac_lite("HexN"),
]
def monosaccharide_to_term(monosaccharide):
if glypy.MonosaccharideResidue.from_iupac_lite("dHex") == monosaccharide:
return 'dHex'
return str(monosaccharide)
substituent_map = {
"S": "sulfate",
"P": "phosphate",
# "Me": "methyl",
# "Ac": "acetyl",
}
inverted_substituent_map = {
v: k for k, v in substituent_map.items()
}
substituent_map['Sulpho'] = "sulfate"
substituent_map['Phospho'] = "phosphate"
def mparam(name, value=None, accession=None, cvRef="PSI-MS", **kwargs):
if isinstance(name, dict):
value = name.pop('value', None)
accession = name.pop('accession')
cvRef = name.pop('cvRef', cvRef)
name_ = name.pop("name")
kwargs.update(kwargs)
name = name_
return components.CVParam(
name=name,
value=value,
accession=accession,
cvRef=cvRef,
**kwargs)
def parse_glycan_formula(glycan_formula):
gc = FrozenGlycanComposition()
if glycan_formula.startswith("\""):
glycan_formula = glycan_formula[1:-1]
for mono, count in re.findall(r"([^0-9]+)\((\d+)\)", glycan_formula):
count = int(count)
if mono in substituent_map:
parsed = SubstituentResidue(substituent_map[mono])
elif mono in ("Sia", ):
continue
elif mono in ("Pent", ):
mono = "Pen"
parsed = FrozenMonosaccharideResidue.from_iupac_lite(mono)
elif mono == 'Xxx':
continue
elif mono == 'X':
continue
else:
parsed = FrozenMonosaccharideResidue.from_iupac_lite(mono)
gc[parsed] += count
return gc
class GNOmeResolver(object):
def __init__(self, cv=None):
if cv is None:
cv = load_gno()
self.cv = cv
self.build_mass_search_index()
self.add_glycan_compositions()
def add_glycan_compositions(self):
formula_key = "GNO:00000202"
for term in self.cv.terms.values():
glycan_formula = term.get(formula_key)
if glycan_formula:
term['glycan_composition'] = parse_glycan_formula(glycan_formula)
def build_mass_search_index(self):
mass_index = []
for term in self.cv.terms.values():
match = re.search(r"weight of (\d+\.\d+) Da", term.definition)
if match:
mass = float(match.group(1))
term['mass'] = mass
mass_index.append(mass_term_pair(mass, term))
mass_index.sort()
self.mass_index = mass_index
def _find_mass_match(self, mass):
i = bisect.bisect_left(self.mass_index, mass)
lo = self.mass_index[i - 1]
lo_err = abs(lo.mass - mass)
hi = self.mass_index[i]
hi_err = abs(hi.mass - mass)
if hi_err < lo_err:
term = hi.term
elif hi_err > lo_err:
term = lo.term
else:
raise ValueError(
"Ambiguous duplicate masses (%0.2f, %0.2f)" % (lo.mass, hi.mass))
return term
def resolve_gnome(self, glycan_composition):
mass = glycan_composition.mass()
term = self._find_mass_match(mass)
recast = glycan_composition.clone().reinterpret(valid_monosaccharides)
visit_queue = deque(term.children)
while visit_queue:
child = visit_queue.popleft()
gc = child.get("glycan_composition")
if gc is None:
visit_queue.extend(child.children)
elif gc == recast:
return child
def glycan_composition_to_terms(self, glycan_composition):
out = []
term = self.resolve_gnome(glycan_composition)
if term is not None:
out.append({
"accession": term.id,
"name": term.name,
"cvRef": term.vocabulary.name
})
reinterpreted = glycan_composition.clone().reinterpret(valid_monosaccharides)
for mono, count in reinterpreted.items():
if isinstance(mono, SubstituentResidue):
subst = inverted_substituent_map.get(
mono.name.replace("@", ""))
if subst is not None:
out.append({
"name": "monosaccharide count",
"value": ("%s:%d" % (subst, count)),
"accession": "MS:XXXXX2",
"cvRef": "PSI-MS"
})
else:
out.append({
"name": "unknown monosaccharide count",
"value": ("%s:%0.3f:%d" % (mono.name.replace("@", ""), mono.mass(), count)),
"accession": "MS:XXXXX3",
"cvRef": "PSI-MS"
})
elif isinstance(mono, MonosaccharideResidue):
for known in valid_monosaccharides:
if identity.is_a(mono, known):
out.append({
"name": "monosaccharide count",
"value": ("%s:%d" % (monosaccharide_to_term(known), count)),
"accession": "MS:XXXXX2",
"cvRef": "PSI-MS"
})
break
else:
out.append({
"name": "unknown monosaccharide count",
"value": ("%s:%0.3f:%d" % (monosaccharide_to_term(mono), mono.mass(), count)),
"accession": "MS:XXXXX3",
"cvRef": "PSI-MS"
})
else:
raise TypeError("Cannot handle unexpected component of type %s" % (type(mono), ))
return out
def convert_to_protein_dict(protein, include_sequence=True):
data = {
"id": protein.id,
"accession": protein.name,
"search_database_id": 1,
}
if include_sequence:
data["sequence"] = protein.protein_sequence
return data
def convert_to_peptide_evidence_dict(glycopeptide, id_tracker):
data = {
"start_position": glycopeptide.protein_relation.start_position,
"end_position": glycopeptide.protein_relation.end_position,
"peptide_id": id_tracker(glycopeptide),
"db_sequence_id": glycopeptide.protein_relation.protein_id,
"is_decoy": False,
"id": glycopeptide.id
}
return data
def convert_to_identification_item_dict(spectrum_match, seen=None, id_tracker=None):
if seen is None:
seen = set()
charge = spectrum_match.scan.precursor_information.charge
if spectrum_match.target.id not in seen:
return None
data = {
"charge_state": charge,
"experimental_mass_to_charge": mass_charge_ratio(
spectrum_match.scan.precursor_information.neutral_mass, charge),
"calculated_mass_to_charge": mass_charge_ratio(
spectrum_match.target.total_mass, charge),
"peptide_id": id_tracker(spectrum_match.target),
"peptide_evidence_id": spectrum_match.target.id,
"score": mparam({
"name": "GlycReSoft:total score",
"value": spectrum_match.score,
"accession": "MS:XXX10A",
}),
"params": [
components.CVParam(**{
"name": "glycan dissociating, peptide preserving",
"accession": "MS:XXX111", "cvRef": "PSI-MS"}),
components.CVParam(**{
"name": "glycan eliminated, peptide dissociating",
"accession": "MS:XXX114", "cvRef": "PSI-MS"}),
{
"name": "scan start time",
"value": spectrum_match.scan.scan_time,
"unit_name": "minute"
}
],
"id": spectrum_match.id
}
if spectrum_match.is_multiscore():
score_params = [
mparam("GlycReSoft:peptide score",
spectrum_match.score_set.peptide_score, "MS:XXX10C"),
mparam("GlycReSoft:glycan score",
spectrum_match.score_set.glycan_score, "MS:XXX10B"),
mparam("GlycReSoft:glycan coverage",
spectrum_match.score_set.glycan_coverage, "MS:XXX10H"),
mparam("GlycReSoft:joint q-value",
spectrum_match.q_value, "MS:XXX10G"),
mparam("GlycReSoft:peptide q-value",
spectrum_match.q_value_set.peptide_q_value,
"MS:XXX10E"),
mparam("GlycReSoft:glycan q-value",
spectrum_match.q_value_set.glycan_q_value, "MS:XXX10F"),
mparam("GlycReSoft:glycopeptide q-value",
spectrum_match.q_value_set.glycopeptide_q_value, "MS:XXX10D"),
]
data['params'].extend(score_params)
else:
data['params'].extend([
mparam("GlycReSoft:glycopeptide q-value",
spectrum_match.q_value, "MS:XXX10D"),
])
if spectrum_match.mass_shift.name != Unmodified.name:
data['params'].append(
mparam("GlycReSoft:mass shift", "%s:%0.3f:%0.3f" % (
spectrum_match.mass_shift.name,
spectrum_match.mass_shift.mass,
spectrum_match.mass_shift.tandem_mass),
"MS:XXX10I"))
return data
def convert_to_spectrum_identification_dict(spectrum_solution_set, seen=None, id_tracker=None):
data = {
"spectra_data_id": 1,
"spectrum_id": spectrum_solution_set.scan.scan_id,
"id": spectrum_solution_set.id
}
idents = []
for item in spectrum_solution_set:
d = convert_to_identification_item_dict(item, seen=seen, id_tracker=id_tracker)
if d is None:
continue
idents.append(d)
data['identifications'] = idents
return data
class MzMLExporter(task.TaskBase):
def __init__(self, source, outfile):
self.reader = mzml.ProcessedMzMLDeserializer(source)
self.outfile = outfile
self.writer = None
self.n_spectra = None
def make_writer(self):
self.writer = mzml.MzMLScanSerializer(
self.outfile, sample_name=self.reader.sample_run.name,
n_spectra=self.n_spectra)
def aggregate_scan_bunches(self, scan_ids):
scans = defaultdict(list)
for scan_id in scan_ids:
scan = self.reader.get_scan_by_id(scan_id)
scans[scan.precursor_information.precursor_scan_id].append(
scan)
bunches = []
for precursor_id, products in scans.items():
products.sort(key=lambda x: x.scan_time)
precursor = self.reader.get_scan_by_id(precursor_id)
bunches.append(mzml.ScanBunch(precursor, products))
bunches.sort(key=lambda bunch: bunch.precursor.scan_time)
return bunches
def begin(self, scan_bunches):
self.n_spectra = sum(len(b.products) for b in scan_bunches) + len(scan_bunches)
self.make_writer()
for bunch in scan_bunches:
self.put_scan_bunch(bunch)
def put_scan_bunch(self, bunch):
self.writer.save_scan_bunch(bunch)
def extract_chromatograms_from_identified_glycopeptides(self, glycopeptide_list):
by_chromatogram = group_by(
glycopeptide_list, lambda x: (
x.chromatogram.chromatogram if x.chromatogram is not None else None))
i = 0
for chromatogram, members in by_chromatogram.items():
if chromatogram is None:
continue
self.enqueue_chromatogram(chromatogram, i, params=[
{"name": "GlycReSoft:profile score", "value": members[0].ms1_score},
{"name": "GlycReSoft:assigned entity", "value": str(members[0].structure)}
])
i += 1
def enqueue_chromatogram(self, chromatogram, chromatogram_id, params=None):
if params is None:
params = []
chromatogram_data = dict()
rt, signal = chromatogram.as_arrays()
chromatogram_dict = OrderedDict(zip(rt, signal))
chromatogram_data['chromatogram'] = chromatogram_dict
chromatogram_data['chromatogram_type'] = 'selected ion current chromatogram'
chromatogram_data['id'] = chromatogram_id
chromatogram_data['params'] = params
self.writer.chromatogram_queue.append(chromatogram_data)
def complete(self):
self.writer.complete()
self.writer.format()
class SequenceIdTracker(object):
def __init__(self):
self.mapping = dict()
def convert(self, glycopeptide):
s = str(glycopeptide)
if s in self.mapping:
return self.mapping[s]
else:
self.mapping[s] = glycopeptide.id
return self.mapping[s]
def __call__(self, glycopeptide):
return self.convert(glycopeptide)
def dump(self):
for key, value in self.mapping.items():
print(value, key)
def glycosylation_type_to_term(glycosylation_type):
remap = {
"N-Linked": {
"name": "N-glycan",
"accession": "MS:XXXXX5",
"cvRef": "PSI-MS",
},
"O-Linked": {
"name": "<NAME>",
"accession": "MS:XXXXX6",
"cvRef": "PSI-MS",
},
"GAG linker": {
"name": "glycosaminoglycan",
"accession": "MS:XXXXX7",
"cvRef": "PSI-MS",
},
}
return remap[glycosylation_type]
class MzIdentMLSerializer(task.TaskBase):
def __init__(self, outfile, glycopeptide_list, analysis, database_handle,
q_value_threshold=0.05, ms2_score_threshold=0,
export_mzml=True, source_mzml_path=None,
output_mzml_path=None, embed_protein_sequences=True):
self.outfile = outfile
self.database_handle = database_handle
self._glycopeptide_list = glycopeptide_list
self.protein_list = None
self.analysis = analysis
self.scan_ids = set()
self._id_tracker = SequenceIdTracker()
self.q_value_threshold = q_value_threshold
self.ms2_score_threshold = ms2_score_threshold
self.export_mzml = export_mzml
self.source_mzml_path = source_mzml_path
self.output_mzml_path = output_mzml_path
self.embed_protein_sequences = embed_protein_sequences
self.gnome_resolver = GNOmeResolver()
@property
def glycopeptide_list(self):
return self._glycopeptide_list
def extract_proteins(self):
self.protein_list = [self.database_handle.query(
serialize.Protein).get(i) for i in
{gp.protein_relation.protein_id for gp in self.glycopeptide_list}]
def convert_to_peptide_dict(self, glycopeptide, id_tracker):
data = {
"id": glycopeptide.id,
"peptide_sequence": parser.strip_modifications(glycopeptide),
"modifications": []
}
i = 0
# TODO: handle N-terminal and C-terminal modifications
glycosylation_event_count = len(glycopeptide.convert().glycosylation_manager)
glycosylation_events_handled = 0
for _pos, mods in glycopeptide:
i += 1
if not mods:
continue
else:
mod = mods[0]
if mod.rule.is_a("glycosylation"):
glycosylation_events_handled += 1
is_aggregate_stub = False
mod_params = [
glycosylation_type_to_term(
str(mod.rule.glycosylation_type))
]
if mod.rule.is_core:
mod_params.extend(
self.gnome_resolver.glycan_composition_to_terms(glycopeptide.glycan_composition.clone()))
mass = glycopeptide.glycan_composition.mass()
if glycosylation_event_count == 1:
mod_params.append({
"name": "glycan composition",
"cvRef": "PSI-MS",
"accession": "MS:XXXX14"
})
else:
mod_params.append({
"name": "glycan aggregate",
"cvRef": "PSI-MS",
"accession": "MS:XXXX15"
})
if glycosylation_events_handled > 1:
mass = 0
is_aggregate_stub = True
if not is_aggregate_stub:
mod_params.append({
"accession": 'MS:1000864',
"cvRef": "PSI-MS",
"name": "chemical formula",
"value": formula(glycopeptide.glycan_composition.total_composition()),
})
else:
mod_params.append({
"accession": 'MS:1000864',
"cvRef": "PSI-MS",
"name": "chemical formula",
"value": formula(mod.rule.composition),
})
if mod.rule.is_composition:
mod_params.extend(self.gnome_resolver.glycan_composition_to_terms(mod.rule.glycan.clone()))
mod_params.append({
"name": "glycan composition",
"cvRef": "PSI-MS",
"accession": "MS:XXXX14"
})
else:
mod_params.append({
"name": "glycan structure",
"cvRef": "PSI-MS",
"accession": "MS:XXXXXXX"
})
mass = mod.mass
mod_dict = {
"monoisotopic_mass_delta": mass,
"location": i,
# "name": "unknown modification",
"name": "glycosylation modification",
"params": [components.CVParam(**x) for x in mod_params]
}
data['modifications'].append(mod_dict)
else:
mod_dict = {
"monoisotopic_mass_delta": mod.mass,
"location": i,
"name": mod.name,
}
data['modifications'].append(mod_dict)
return data
def extract_peptides(self):
self.log("Extracting Proteins")
self.extract_proteins()
self._peptides = []
seen = set()
self.log("Extracting Peptides")
for gp in self.glycopeptide_list:
d = self.convert_to_peptide_dict(gp.structure, self._id_tracker)
if self._id_tracker(gp.structure) == gp.structure.id:
self._peptides.append(d)
seen.add(gp.structure.id)
self.log("Extracting PeptideEvidence")
self._peptide_evidence = [
convert_to_peptide_evidence_dict(
gp.structure, self._id_tracker) for gp in self.glycopeptide_list
]
self._proteins = [
convert_to_protein_dict(prot, self.embed_protein_sequences)
for prot in self.protein_list
]
def extract_spectrum_identifications(self):
self.log("Extracting SpectrumIdentificationResults")
spectrum_identifications = []
seen_scans = set()
accepted_solution_ids = {gp.structure.id for gp in self.glycopeptide_list}
for gp in self.glycopeptide_list:
for solution in gp.spectrum_matches:
if solution.scan.scan_id in seen_scans:
continue
if solution.best_solution().q_value > self.q_value_threshold:
continue
if solution.score < self.ms2_score_threshold:
continue
seen_scans.add(solution.scan.scan_id)
d = convert_to_spectrum_identification_dict(
solution, seen=accepted_solution_ids,
id_tracker=self._id_tracker)
if len(d['identifications']):
spectrum_identifications.append(d)
self.scan_ids = seen_scans
self._spectrum_identification_list = {
"id": 1,
"identification_results": spectrum_identifications
}
def software_entry(self):
software = {
"name": "GlycReSoft",
"version": version.version,
"uri": None
}
return [software]
def search_database(self):
hypothesis = self.analysis.hypothesis
spec = {
"name": hypothesis.name,
"location": self.database_handle._original_connection,
"id": 1
}
if "fasta_file" in hypothesis.parameters:
spec['file_format'] = 'fasta format'
spec['location'] = hypothesis.parameters['fasta_file']
elif "mzid_file" in hypothesis.parameters:
spec['file_format'] = 'mzIdentML format'
return spec
def source_file(self):
spec = {
"location": self.database_handle._original_connection,
"file_format": "data stored in database",
"id": 1
}
return spec
def spectra_data(self):
spec = {
"location": self.analysis.parameters['sample_path'],
"file_format": 'mzML format',
"spectrum_id_format": "multiple peak list nativeID format",
"id": 1
}
return spec
def protocol(self):
hypothesis = self.analysis.hypothesis
analysis = self.analysis
mods = []
def transform_modification(mod):
if isinstance(mod, basestring):
mod_inst = modification.Modification(mod)
target = modification.extract_targets_from_rule_string(mod)
new_rule = mod_inst.rule.clone({target})
return new_rule
return mod
def pack_modification(mod, fixed=True):
mod_spec = {
"fixed": fixed,
"mass_delta": mod.mass,
"residues": [res.symbol for rule in mod.targets
for res in rule.amino_acid_targets],
"params": [
mod.name
]
}
return mod_spec
for mod in hypothesis.parameters.get('constant_modifications', []):
mod = transform_modification(mod)
mods.append(pack_modification(mod, True))
for mod in hypothesis.parameters.get('variable_modifications', []):
mod = transform_modification(mod)
mods.append(pack_modification(mod, False))
strategy = analysis.parameters.get("search_strategy")
if strategy == "multipart-target-decoy-competition":
fdr_params = [
{"name": "peptide glycopeptide false discovery rate control strategy",
"accession": "MS:XXX106", "cvRef": "PSI-MS"},
{"name": "glycan glycopeptide false discovery rate control strategy",
"accession": "MS:XXX107", "cvRef": "PSI-MS"},
{"name": "total glycopeptide false discovery rate control strategy",
"accession": "MS:XXX108", "cvRef": "PSI-MS"},
{"name": "joint glycopeptide false discovery rate control strategy",
"accession": "MS:XXX11A", "cvRef": "PSI-MS"},
]
else:
fdr_params = [
{"name": "total glycopeptide false discovery rate control strategy",
"accession": "MS:XXX108", "cvRef": "PSI-MS"},
]
spec = {
"enzymes": [
{"name": getattr(e, 'name', e), "missed_cleavages": hypothesis.parameters.get(
'max_missed_cleavages', None), "id": i}
for i, e in enumerate(hypothesis.parameters.get('enzymes'))
],
"fragment_tolerance": (analysis.parameters['fragment_error_tolerance'] * 1e6, None, "parts per million"),
"parent_tolerance": (analysis.parameters['mass_error_tolerance'] * 1e6, None, "parts per million"),
"modification_params": mods,
"id": 1,
"additional_search_params": [
{
"name": "glycopeptide search",
"accession": "MS:XXX101",
"cvRef": "PSI-MS",
}
] + fdr_params
}
spec['additional_search_params'] = [components.CVParam(**x) for x in spec['additional_search_params']]
return spec
def run(self):
f = MzIdentMLWriter(self.outfile, vocabularies=[
components.CV(
id='GNO', uri="http://purl.obolibrary.org/obo/gno.obo", full_name='GNO'),
])
self.log("Loading Spectra Data")
spectra_data = self.spectra_data()
self.log("Loading Search Database")
search_database = self.search_database()
self.log("Building Protocol")
protocol = self.protocol()
source_file = self.source_file()
self.extract_peptides()
self.extract_spectrum_identifications()
had_specified_mzml_path = self.source_mzml_path is None
if self.source_mzml_path is None:
self.source_mzml_path = spectra_data['location']
if self.source_mzml_path is None:
did_resolve_mzml_path = False
else:
did_resolve_mzml_path = os.path.exists(self.source_mzml_path)
if not did_resolve_mzml_path:
self.log("Could not locate source mzML file.")
if not had_specified_mzml_path:
self.log("If you did not specify an alternative location to "
"find the mzML path, please do so.")
if self.export_mzml and did_resolve_mzml_path:
if self.output_mzml_path is None:
prefix = os.path.splitext(self.outfile.name)[0]
self.output_mzml_path = "%s.export.mzML" % (prefix,)
exporter = None
self.log("Begin Exporting mzML")
with open(self.output_mzml_path, 'wb') as handle:
exporter = MzMLExporter(self.source_mzml_path, handle)
self.log("... Aggregating Scan Bunches")
scan_bunches = exporter.aggregate_scan_bunches(self.scan_ids)
self.log("... Exporting Spectra")
exporter.begin(scan_bunches)
self.log("... Exporting Chromatograms")
exporter.extract_chromatograms_from_identified_glycopeptides(
self.glycopeptide_list)
self.log("... Finalizing mzML")
exporter.complete()
self.log("mzML Export Finished")
analysis = [[spectra_data['id']], [search_database['id']]]
with f:
f.controlled_vocabularies()
f.providence(software=self.software_entry())
f.register("SpectraData", spectra_data['id'])
f.register("SearchDatabase", search_database['id'])
f.register("SpectrumIdentificationList", self._spectrum_identification_list['id'])
with f.sequence_collection():
for prot in self._proteins:
f.write_db_sequence(**prot)
for pep in self._peptides:
f.write_peptide(**pep)
for pe in self._peptide_evidence:
f.write_peptide_evidence(**pe)
with f.analysis_protocol_collection():
f.spectrum_identification_protocol(**protocol)
with f.element("AnalysisCollection"):
f.SpectrumIdentification(*analysis).write(f)
with f.element("DataCollection"):
f.inputs(source_file, search_database, spectra_data)
with f.element("AnalysisData"):
with f.spectrum_identification_list(id=self._spectrum_identification_list['id']):
for result_ in self._spectrum_identification_list['identification_results']:
result = dict(result_)
identifications = result.pop("identifications")
result = f.spectrum_identification_result(**result)
with result:
for item in identifications:
f.write_spectrum_identification_item(**item)
f.outfile.close()
|
StarcoderdataPython
|
3480722
|
<gh_stars>1-10
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from articles.models import Article, Tag
class ArticleListView(ListView):
model = Article
ordering = ['-first_commit']
paginate_by = 10
def get_context_data(self, **kwargs):
context = super(ArticleListView, self).get_context_data(**kwargs)
context['tags'] = Tag.objects.iterator()
context['article_count'] = Article.objects.count()
return context
class TaggedArticleListView(ListView):
template_name = 'articles/tagged_article_list.html'
paginate_by = 10
def tags(self):
tag_list = self.kwargs['tags_with_plus'].split('+')
for tag_name in set(tag_list):
tag = get_object_or_404(Tag, name=tag_name)
yield tag
def get_queryset(self):
articles = Article.objects
for tag in self.tags():
articles = articles.filter(tags=tag)
return articles.order_by('-first_commit')
def get_context_data(self, **kwargs):
context = super(TaggedArticleListView, self).get_context_data(**kwargs)
context['current_tags'] = self.tags
context['tags'] = Tag.objects.iterator()
context['article_count'] = Article.objects.count()
return context
class ArticleDetailView(DetailView):
model = Article
def get_object(self):
obj = super(ArticleDetailView, self).get_object()
obj.views += 1
obj.save()
return obj
# def search(request):
# q = request.GET.get('q')
# # TODO
# context = {}
# return render(request, 'article/articles.html', context)
class SearchView(ListView):
model = Article
|
StarcoderdataPython
|
1918914
|
<reponame>1323ED5/tic-tac-toe-AI
from src.dimension import Dimension
from src.game_mechanic import GameMechanic
from src.turn import generate_turns
from src.utils import clear_console
class AIMixin:
def bot_turn(self):
turns = generate_turns(self.area)
root_dimension = Dimension(self.area, self.active_player)
calculations = root_dimension.born()
winnable_result = max(calculations)
index_of_max = calculations.index(winnable_result)
winnable_turn = turns[index_of_max]
cell_id = winnable_turn.cell_id
self.make_turn(cell_id)
class ConsoleGame(GameMechanic, AIMixin):
def display_area(self):
print()
print("", " | ".join(map(lambda x: " " if x is None else x, self.area[:3])))
print("-" * 11)
print("", " | ".join(map(lambda x: " " if x is None else x, self.area[3:6])))
print("-" * 11)
print("", " | ".join(map(lambda x: " " if x is None else x, self.area[6:9])))
print()
def player_turn(self):
cell_id = int(input("cell_id: "))
self.make_turn(cell_id)
def start(self):
while True:
clear_console()
self.display_area()
if self.active_player == 0:
self.player_turn()
else:
self.bot_turn()
gameover = self.check_game_over()
if gameover is not None:
print("WON:", {1: "AI", 0: "TIE", -1: "YOU"}.get(gameover))
self.display_area()
break
|
StarcoderdataPython
|
353960
|
<reponame>hechth/vimms<filename>vimms/scripts/box_controller.py
import itertools
import random
from time import perf_counter
from vimms.Box import GenericBox, DictGrid, ArrayGrid, LocatorGrid, AllOverlapGrid, IdentityDrift
from vimms.GridEstimator import GridEstimator
from vimms.ChemicalSamplers import DatabaseFormulaSampler
from vimms.Chemicals import ChemicalMixtureCreator
from vimms.Common import *
from vimms.Controller.box import NonOverlapController
from vimms.Environment import Environment
from vimms.MassSpec import IndependentMassSpectrometer
from vimms.Noise import GaussianPeakNoise
class BoxEnv():
def __init__(self, min_rt, max_rt, max_mz, min_xlen, max_xlen, min_ylen, max_ylen):
self.min_rt, self.max_rt = min_rt, max_rt
self.max_mz = max_mz
self.min_x1, self.max_x1 = min_rt, max_rt - max_xlen
self.min_xlen, self.max_xlen, self.min_ylen, self.max_ylen = min_xlen, max_xlen, min_ylen, max_ylen
self.grid = None
def init_grid(self, grid_class, rt_box_size, mz_box_size):
self.grid = grid_class(self.min_rt, self.max_rt, rt_box_size, 0, self.max_mz, mz_box_size)
def generate_box(self):
x1 = random.uniform(self.min_x1, self.max_x1)
y1 = random.uniform(0, self.max_mz - self.max_ylen)
xlen = random.uniform(self.min_xlen, self.max_xlen)
ylen = random.uniform(self.min_ylen, self.max_ylen)
return GenericBox(x1, x1 + xlen, y1, y1 + ylen, intensity=1)
@classmethod
def random_boxenv(cls):
min_rt, max_rt = 0, random.randint(1000, 2000)
max_mz = random.randint(1000, 3000)
min_xlen = random.randint(1, 4)
max_xlen = random.randint(min_xlen, 10)
min_ylen = random.randint(1, 5)
max_ylen = random.randint(min_ylen, 10)
return BoxEnv(min_rt, max_rt, max_mz, min_xlen, max_xlen, min_ylen, max_ylen)
def box_score(self, box): return self.grid.non_overlap(box)
def register_box(self, box): self.grid.register_box(box)
class TestEnv(BoxEnv):
def __init__(self, min_rt, max_rt, max_mz, min_xlen, max_xlen, min_ylen, max_ylen):
super().__init__(min_rt, max_rt, max_mz, min_xlen, max_xlen, min_ylen, max_ylen)
self.boxes_by_injection = [[]]
@classmethod
def random_boxenv(cls, boxes_per_injection, no_injections):
boxenv = super().random_boxenv()
boxenv = TestEnv(boxenv.min_rt, boxenv.max_rt, boxenv.max_mz, boxenv.min_xlen, boxenv.max_xlen, boxenv.min_ylen,
boxenv.max_ylen)
boxenv.boxes_by_injection = [[boxenv.generate_box() for j in range(boxes_per_injection)] for i in
range(no_injections)]
return boxenv
def test_simple_splitter(self):
return [
[LocatorGrid.splitting_non_overlap(box, itertools.chain(*self.boxes_by_injection[:i], inj[:j])) for j, box
in enumerate(inj)] for i, inj in enumerate(self.boxes_by_injection)]
def test_non_overlap(self, grid_class, rt_box_size, mz_box_size):
self.init_grid(grid_class, rt_box_size, mz_box_size)
def score_box(box):
score = self.grid.non_overlap(box)
self.grid.register_box(box)
return score
return [[score_box(b) for b in inj] for inj in self.boxes_by_injection]
def test_intensity_non_overlap(self, grid_class, rt_box_size, mz_box_size):
self.init_grid(grid_class, rt_box_size, mz_box_size)
def score_box(box):
score = self.grid.intensity_non_overlap(box, box.intensity, {"theta1" : 1})
self.grid.register_box(box)
return score
return [[score_box(b) for b in inj] for inj in self.boxes_by_injection]
def run_vimms(no_injections, rt_box_size, mz_box_size):
rt_range = [(0, 1440)]
min_rt, max_rt = rt_range[0]
ionisation_mode, isolation_width = POSITIVE, 1
N, rt_tol, mz_tol, min_ms1_intensity = 10, 15, 10, 5000
min_roi_intensity, min_roi_length, min_roi_length_for_fragmentation = 500, 3, 3
grid = GridEstimator(LocatorGrid(min_rt, max_rt, rt_box_size, 0, 3000, mz_box_size), IdentityDrift())
hmdbpath = os.path.join(os.path.abspath(os.getcwd()), "..", "..", "tests", "fixtures", "hmdb_compounds.p")
hmdb = load_obj(hmdbpath)
df = DatabaseFormulaSampler(hmdb, min_mz=100, max_mz=1000)
cm = ChemicalMixtureCreator(df, adduct_prior_dict={POSITIVE: {"M+H": 1}})
chemicals = cm.sample(2000, 1)
boxes = []
for i in range(no_injections):
mz_noise = GaussianPeakNoise(0.1)
mass_spec = IndependentMassSpectrometer(POSITIVE, chemicals, mz_noise=mz_noise)
controller = NonOverlapController(
ionisation_mode, isolation_width, mz_tol, min_ms1_intensity, min_roi_intensity,
min_roi_length, N, grid, rt_tol=rt_tol, min_roi_length_for_fragmentation=min_roi_length_for_fragmentation
)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)
set_log_level_warning()
env.run()
boxes.append([r.to_box(0.01, 0.01) for r in controller.roi_builder.get_rois()])
return boxes
def main():
class Timer():
def __init__(self): self.time = None
def start_time(self): self.time = perf_counter()
def end_time(self): return perf_counter() - self.time
def time_f(self, f):
self.start_time()
result = f()
return result, self.end_time()
def run_area_calcs(boxenv, rt_box_size, mz_box_size):
def pretty_print(scores):
print({i: x for i, x in enumerate(itertools.chain(*scores)) if x > 0.0 and x < 1.0})
print("\nRun area calcs start!")
print("\nDictGrid Scores:")
scores_by_injection, dict_time = Timer().time_f(
lambda: boxenv.test_non_overlap(DictGrid, rt_box_size, mz_box_size))
pretty_print(scores_by_injection)
print("\nBoolArrayGrid Scores:")
scores_by_injection_2, array_time = Timer().time_f(
lambda: boxenv.test_non_overlap(ArrayGrid, rt_box_size, mz_box_size))
pretty_print(scores_by_injection_2)
print("\nExact Scores:")
scores_by_injection_3, exact_time = Timer().time_f(lambda: boxenv.test_simple_splitter())
pretty_print(scores_by_injection_3)
print("\nExact Scores Grid:")
rt_box_size, mz_box_size = (boxenv.max_rt - boxenv.min_rt) / 50, boxenv.max_mz / 50
scores_by_injection_4, exact_grid_time = Timer().time_f(
lambda: boxenv.test_non_overlap(LocatorGrid, rt_box_size, mz_box_size))
pretty_print(scores_by_injection_4)
def compare_scores(scores_1, scores_2):
return {i: (x, y) for i, (x, y) in enumerate(zip(itertools.chain(*scores_1), itertools.chain(*scores_2))) if
not math.isclose(x, y)}
print("Differences between grid + no grid:", compare_scores(scores_by_injection_3, scores_by_injection_4))
# note: below non_overlap (not multiplied by intensity) + intensity_non_overlap should have same behaviour assuming that all box intensities are 1
print("Differences between no intensity and intensity overlap:", compare_scores(scores_by_injection_4,
boxenv.test_intensity_non_overlap(
AllOverlapGrid, rt_box_size,
mz_box_size)))
print("\nDictGrid Time Taken: {}".format(dict_time))
print("BoolArray Time Taken: {}".format(array_time))
print("BoxSplitting Time Taken: {}".format(exact_time))
print("BoxSplitting with Grid Time Taken {}".format(exact_grid_time))
def box_adjust(boxenv, *no_boxes):
for x_n, y_n in no_boxes:
rt_box_size, mz_box_size = (boxenv.max_rt - boxenv.min_rt) / x_n, boxenv.max_mz / y_n
_, exact_grid_time = Timer().time_f(lambda: boxenv.test_non_overlap(LocatorGrid, rt_box_size, mz_box_size))
print("Time with {}, {} Boxes: {}".format(x_n, y_n, exact_grid_time))
boxenv = TestEnv.random_boxenv(200, 3)
run_area_calcs(boxenv, (boxenv.max_rt - boxenv.min_rt) / 10000, boxenv.max_mz / 10000)
boxenv = TestEnv(0, 50, 50, 2, 3, 2, 3)
boxenv.boxes_by_injection = [[GenericBox(0, 10, 0, 30, intensity=1), GenericBox(5, 15, 0, 30, intensity=2),
GenericBox(0, 10, 15, 45, intensity=3), GenericBox(0, 17, 0, 30, intensity=4)]]
run_area_calcs(boxenv, 0.2, 0.2)
print("Intensity Non-Overlap Scores: ", boxenv.test_intensity_non_overlap(AllOverlapGrid, 0.2, 0.2))
print()
box = GenericBox(0, 10, 0, 10)
other_boxes = [[GenericBox(0 + x, 10 + x, 0, 10) for x in range(0, 11)],
[GenericBox(0, 10, 0 + y, 10 + y) for y in range(0, 11)],
[GenericBox(0 + n, 10 + n, 0 + n, 10 + n) for n in range(0, 11)]]
for ls in other_boxes:
print([box.overlap_2(b) for b in ls])
print()
boxenv = TestEnv(0, 1440, 1500, 0, 0, 0, 0)
vimms_boxes = run_vimms(20, (boxenv.max_rt - boxenv.min_rt) / 150, boxenv.max_mz / 150)
boxenv.boxes_by_injection = vimms_boxes
run_area_calcs(boxenv, 0.2, 0.01)
print()
for ratio in range(1, 11):
print("---Ratio of {}---\n".format(ratio))
box_adjust(boxenv, *((n // ratio, n) for n in range(ratio, 1001, 10 * ratio)))
from statistics import mean
def box_lengths(b):
return b.pt2.x - b.pt1.x, b.pt2.y - b.pt1.y
print("Avg. xlen == {}, Avg. ylen == {}".format(
*map(mean, zip(*(box_lengths(b) for inj in boxenv.boxes_by_injection for b in inj)))))
boxenv = TestEnv(0, 1440, 1500, 0, 0, 0, 0)
boxenv.boxes_by_injection = vimms_boxes
grid = AllOverlapGrid(0, 2000, 100, 0, 3000, 100)
_, time = Timer().time_f(lambda: grid.boxes_by_overlaps(boxes=itertools.chain(*boxenv.boxes_by_injection)))
print(f"Time taken for split all no grid: {time}")
def split_all():
for b in itertools.chain(*boxenv.boxes_by_injection): grid.register_box(b)
return grid.boxes_by_overlaps()
_, time = Timer().time_f(split_all)
print(f"Time taken for split all grid: {time}")
if __name__ == "__main__": main()
|
StarcoderdataPython
|
4970731
|
from app.api.models.LXDModule import LXDModule
from pylxd import Client
import logging
logging = logging.getLogger(__name__)
class LXCProfile(LXDModule):
def __init__(self, input):
logging.info('Connecting to LXD')
super().__init__()
self.input = input
def info(self):
try:
return self.client.api.profiles[self.input.get('name')].get().json()['metadata']
except Exception as e:
raise ValueError(e)
def info(self, name):
try:
logging.info('Reading profile {} information'.format(name))
return self.client.api.profiles[name].get().json()['metadata']
except Exception as e:
logging.error('Failed to retrieve information for profile {}'.format(name))
logging.exception(e)
raise ValueError(e)
def createProfile(self):
try:
logging.info('Creating profile {}'.format(self.input.get('name')))
self.client.profiles.create(self.input.get('name'), config=self.input.get('config'),
devices=self.input.get('devices'))
return self.client.api.profiles[self.input.get('name')].get().json()['metadata']
except Exception as e:
logging.error('Failed to create container {}'.format(self.input.get('name')))
logging.exception(e)
raise ValueError(e)
def deleteProfile(self):
try:
logging.info('Deleting profile {}'.format(self.input.get('name')))
return self.client.api.profiles[self.input.get('name')].delete(json=self.input).json()
except Exception as e:
logging.error('Failed to delete profile {}'.format(self.input.get('name')))
logging.exception(e)
raise ValueError(e)
def updateProfile(self):
try:
logging.info('Updating profile {}'.format(self.input.get('name')))
self.client.api.profiles[self.input.get('name')].put(json={'config': self.input.get('config'), 'devices': self.input.get('devices')})
if self.input.get('new_name'):
if self.input.get('new_name') != self.input.get('name'):
return self.rename()
return self.info(self.input.get('name'))
except Exception as e:
logging.error('Failed to update profile {}'.format(self.input.get('name')))
logging.exception(e)
raise ValueError(e)
def rename(self):
try:
logging.info('Renaming profile {}'.format(self.input.get('name')))
profile = self.client.profiles.get(self.input.get('name'))
profile.rename(self.input.get('new_name'))
return self.info(self.input.get('new_name'))
except Exception as e:
logging.error('Failed to rename profile {}'.format(self.input.get('name')))
logging.exception(e)
raise ValueError(e)
|
StarcoderdataPython
|
12841803
|
<filename>proxy_server/helpers.py
import base64
def generate_service_url(function_path, params=None, encrypted=False):
if not params:
return function_path
else:
path_end = str()
for key, value in params.iteritems():
if encrypted:
value = base64.urlsafe_b64encode(str(value)).replace('=', '')
else:
value = str(value)
if not path_end:
path_end += '?{0}={1}'.format(key, value)
else:
path_end += '&{0}={1}'.format(key, value)
return function_path + path_end
|
StarcoderdataPython
|
209005
|
import time, math, board, busio, adafruit_mprls, adafruit_mma8451, serial, picamera
cam = picamera.PiCamera()
#path = "/sys/bus/w1/devices"
#tempData = open(path+"w1_slave", "r")
i2c = busio.I2C(board.SCL, board.SDA)
mpr = adafruit_mprls.MPRLS(i2c, psi_min=0, psi_max=25)
mma = adafruit_mma8451.MMA8451(i2c, address=0x1D)
mhz = serial.Serial("/dev/ttyS0",9600,timeout=1)
packet = [0xff,0x01,0x86,0x00,0x00,0x00,0x00,0x00,0x79]
def read_temp():
'''read_temp() -> float
Reads data from thermometer
Returns temperature (Celsius)'''
try:
lines = tempData.readLines()
while lines[0].strip()[-3:] != "YES":
lines = tempData.readLines()
return float(lines[1][lines[1].find("t=")+2:])/1000
except:
return -1
def read_baro():
'''read_barometer() -> tuple
Reads data from barometer
Returns pressure (hPa), altitude (m)'''
try:
p = mpr.pressure
return (p, (10**(math.log10(p/1013.25)/5.2558797) - 1) / 6.8755856 * -1 * 10**6 / 3.2808)
except:
return (-1, -1)
def read_accel():
'''read_accel() -> tuple
Reads data from accelerometer
Returns acceleration in x, y, z directions'''
try:
x, y, z = mma.acceleration
return (x, y, z)
except:
return (0, 0, 0)
def read_co2():
'''read_co2() -> float (int?)
Reads data from CO2 sensor
Returns concentration of CO2 (ppm)'''
try:
mhz.write(bytearray(packet))
res = mhz.read(size=9)
res = bytearray(res)
return (res[2]<<8)|res[3]
except:
return -1
# 1 big log file
log = open("sensor-data-log", "w")
while True:
#temp = read_temp()
baro = read_baro()
accel = read_accel()
co2 = read_co2()
log.write("{} {:4.3f} hPa {:6.3f} m X: {:.3f} m/s\u00b2 Y: {:.3f} m/s\u00b2 Z: {:.3f} m/s\u00b2 {} ppm".format(time.strftime("%H:%M:%S", time.gmtime(time.time()-14400)), baro[0], baro[1], accel[0], accel[1], accel[2], co2))
print("{} {:4.3f} hPa {:6.3f} m X: {:.3f} m/s\u00b2 Y: {:.3f} m/s\u00b2 Z: {:.3f} m/s\u00b2 {} ppm".format(time.strftime("%H:%M:%S", time.gmtime(time.time()-14400)), baro[0], baro[1], accel[0], accel[1], accel[2], co2))
#log.write("{} {:6.3f} K {:4.3f} hPa {:6.3f} m X: {:.3f} m/s\u00b2 Y: {:.3f} m/s\u00b2 Z: {:.3f} m/s\u00b2 {} ppm".format(time.strftime("%H:%M:%S", time.gmtime(time.time()-14400)), temp, baro[0], baro[1], accel[0], accel[1], accel[2], co2))
cam.capture("IMG_" + time.strftime("%H%M%S", time.gmtime(time.time()-14400)) + ".jpg")
time.sleep(120)
|
StarcoderdataPython
|
6446362
|
"""
Handles the connections to the database
get_trending_scores puts the trending_scores from the
database in a dictionary
get_train_matrix fetches data from the database and puts
it into a training matrix for the lightFM model
get_test_matrix fetches data from the database and puts
it into a test matrix from the lightFM model
get_movie_title returns the movie title for one movie id
"""
from scipy.sparse import coo_matrix
# import random
from Product.Database.DatabaseManager.Retrieve.RetrieveMovie import RetrieveMovie
from Product.Database.DatabaseManager.Retrieve.RetrieveRating import RetrieveRating
from Product.Database.DatabaseManager.Retrieve.RetrieveUser import RetrieveUser
def get_train_matrix():
"""
Author: <NAME> / <NAME>
Date: 2017-10-02
Last update: 2017-11-14 by <NAME>
Purpose:
returns the train matrix. The matrix is 80% (4/5) of the user ratings at the moment
OBS! coo_matrix is a sparse matrix and will (most likely) have the same
dimensions for train_matrix, test_matrix and new_user_matrix
:return: training matrix in the form of a numpy matrix
"""
user_list = []
movie_list = []
rating_list = []
ratings = RetrieveRating().retrieve_ratings()
counter = 0
# Puts everything but every 5th row (1, 2, 3, 4, 6, 7, 8, 9, 11...) in train_matrix
for rating in ratings:
if counter % 5 != 0:
user_list.append(rating.user_id)
movie_list.append(rating.movie_id)
rating_list.append(rating.rating)
counter += 1
# Added +1 because else the matrix will be to small
# TODO Change dimensions to greater than 1 if problem with dimensions
train_matrix = coo_matrix((rating_list, (user_list, movie_list)),
shape=(RetrieveUser().retrieve_largest_user_id()+1,
RetrieveMovie().retrieve_largest_movie_id()+1))
return train_matrix
def get_test_matrix():
"""
Author: <NAME> / <NAME>
Date: 2017-11-06
Last update: 2017-11-14 by <NAME>
Purpose:
returns the test matrix. The matrix is 10% of the user ratings at the moment
:return: test matrix in the form of a numpy matrix
"""
test_user_list = []
test_movie_list = []
test_rating_list = []
ratings = RetrieveRating().retrieve_ratings()
counter = 0
# Puts every 10th row (5, 15, 25...) in test_matrix
for rating in ratings:
if counter % 5 == 0 and counter % 2 == 1:
test_user_list.append(rating.user_id)
test_movie_list.append(rating.movie_id)
test_rating_list.append(rating.rating)
counter += 1
# TODO Change dimensions to greater than 1 if problem with dimensions
test_matrix = coo_matrix((test_rating_list, (test_user_list, test_movie_list)),
shape=(RetrieveUser().retrieve_largest_user_id()+1,
RetrieveMovie().retrieve_largest_movie_id()+1))
return test_matrix
def get_new_users_matrix():
"""
Author: <NAME>
Date: 2017-11-06
Last update: 2017-11-14 by <NAME>
Purpose: returns the new users matrix. The matrix is 10 % of the user ratings.
Is used for showing that model is evolving
:return: new users matrix in the form of a numpy matrix
"""
user_list = []
movie_list = []
rating_list = []
ratings = RetrieveRating().retrieve_ratings()
counter = 0
# Puts every 10th row (10, 20, 30...) in new_users_matrix
for rating in ratings:
if counter % 10 == 0:
user_list.append(rating.user_id)
movie_list.append(rating.movie_id)
rating_list.append(rating.rating)
counter += 1
# TODO Change dimensions to greater than 1 if problem with dimensions
new_users_matrix = coo_matrix((rating_list, (user_list, movie_list)),
shape=(RetrieveUser().retrieve_largest_user_id()+1,
RetrieveMovie().retrieve_largest_movie_id()+1))
return new_users_matrix
def get_movie_title(movie_id):
"""
Author: <NAME>
Date: 2017-11-01
Last update: 2017-11-13
Purpose: returns the movie title from a movie id input
:param movie_id:
:return: movie name as string
"""
return RetrieveMovie().retrieve_movie(movie_id).title
|
StarcoderdataPython
|
4826894
|
# coding:utf-8
from django import template
register = template.Library()
@register.filter
def appc(value):
return str(value) + "1222"
|
StarcoderdataPython
|
6533242
|
<gh_stars>1-10
# -*- mode: python; -*-
"""
Support code related to OS detection in general. System specific facilities or customization
hooks live in mongo_platform_<PLATFORM>.py files.
"""
import os
# --- OS identification ---
#
# This needs to precede the options section so that we can only offer some options on certain
# operating systems.
# This function gets the running OS as identified by Python
# It should only be used to set up defaults for options/variables, because
# its value could potentially be overridden by setting TARGET_OS on the
# command-line. Treat this output as the value of HOST_OS
def get_running_os_name():
running_os = os.sys.platform
if running_os.startswith('linux'):
running_os = 'linux'
elif running_os.startswith('freebsd'):
running_os = 'freebsd'
elif running_os.startswith('openbsd'):
running_os = 'openbsd'
elif running_os == 'sunos5':
running_os = 'solaris'
elif running_os == 'win32':
running_os = 'windows'
elif running_os == 'darwin':
running_os = 'macOS'
else:
running_os = 'unknown'
return running_os
def env_get_os_name_wrapper(self):
return self['TARGET_OS']
def is_os_raw(target_os, os_list_to_check):
darwin_os_list = [ 'macOS', 'tvOS', 'tvOS-sim', 'iOS', 'iOS-sim' ]
linux_os_list = [ 'android', 'linux' ]
posix_os_list = [ 'openbsd', 'freebsd', 'solaris' ] + darwin_os_list + linux_os_list
os_families = {
"darwin": darwin_os_list,
"posix": posix_os_list,
"linux": linux_os_list,
}
for os in os_list_to_check:
if os == target_os or ( os in os_families and target_os in os_families[os] ):
return True
return False
# This function tests the running OS as identified by Python
# It should only be used to set up defaults for options/variables, because
# its value could potentially be overridden by setting TARGET_OS on the
# command-line. Treat this output as the value of HOST_OS
def is_running_os(*os_list):
return is_os_raw(get_running_os_name(), os_list)
def env_os_is_wrapper(self, *os_list):
return is_os_raw(self['TARGET_OS'], os_list)
|
StarcoderdataPython
|
6636881
|
from tkinter import (
Tk,
Label,
Button,
PhotoImage,
LEFT
)
from tkinter.ttk import Separator
class MainWindow:
def __init__(self, dolar: str, euro: str, bitcoin: str) -> None:
"""Construtor da classe MainWindow."""
self.__lista_cotacoes: list = list([dolar, euro, bitcoin])
self.__main_window: Tk = Tk()
self.__main_window.iconbitmap("images/money01.ico")
self.__main_window.title("Cotações")
self.__main_window.configure(background="#FFFFFF")
self.__main_window.resizable(0, 0)
self.__main_window.geometry(
newGeometry="%dx%d+%d+%d" % (
400,
200,
self.__main_window.winfo_screenwidth() / 2 - 400 / 2,
self.__main_window.winfo_screenheight() / 2 - 200 - 2
)
)
# Images:
self.__exit = PhotoImage(file="images/exit_01.png")
# Make Widgets:
self.__make_labels(self.__lista_cotacoes[0], self.__lista_cotacoes[1], self.__lista_cotacoes[2])
self.__make_separators()
self.__make_buttons()
def __make_labels(self, dolar: str, euro: str, bitcoin: str) -> None:
"""Método responsável por criar todas as labels da janela principal."""
Label(
master=self.__main_window,
text="Cotações do Dia",
background="#FFFFFF",
foreground="#808080",
font=("Candara Light Italic", 20)
).place(x=110, y=5)
Label(master=self.__main_window, text="DOLAR", background="#FFFFFF", foreground="#436253", font=("Arial", 13)).place(x=38, y=60)
Label(master=self.__main_window, text="EURO", background="#FFFFFF", foreground="#3e4758", font=("Arial", 13)).place(x=167, y=60)
Label(master=self.__main_window, text="BITCOIN", background="#FFFFFF", foreground="#8ea97c", font=("Arial", 13)).place(x=280, y=60)
Label(master=self.__main_window,
text=f"R$ {dolar}",
background="#FFFFFF",
foreground="#436253",
font=("Arial Black", 14)
).place(x=25, y=85)
Label(master=self.__main_window,
text=f"R$ {euro}",
background="#FFFFFF",
foreground="#3e4758",
font=("Arial Black", 14)
).place(x=148, y=85)
Label(master=self.__main_window,
text=f"R$ {bitcoin}",
background="#FFFFFF",
foreground="#8ea97c",
font=("Arial Black", 14)
).place(x=260, y=85)
def __make_buttons(self):
Button(
master=self.__main_window,
text="Sair",
font=("Calibri Bold", 15),
foreground="#FF0000",
background="#ffffff",
width=100,
command=lambda: exit(0),
image=self.__exit,
cursor = "hand2",
compound=LEFT).place(x=140, y=150)
def __make_separators(self) -> None:
"""Método responsável por criar todos os separadores da janela principal."""
Separator(master=self.__main_window, orient="horizontal").place(x=25, y=40, width=350)
def run(self) -> None:
self.__main_window.mainloop()
|
StarcoderdataPython
|
3405142
|
<reponame>Bhuvan-21/SyferText
import torch
from torchvision import transforms
class ToTensor:
def __init__(self):
self.transform = transforms.ToTensor()
def __call__(self, x):
return self.transform(x)
class Resize:
def __init__(self, size):
self.transform = transforms.Resize(size)
def __call__(self, x):
return self.transform(x)
|
StarcoderdataPython
|
3576856
|
<reponame>Omarzintan/bumblebee-ai
from features.default import BaseFeature
import wolframalpha
from features import wiki_search
class Feature(BaseFeature):
def __init__(self, bumblebee_api):
self.tag_name = "wolfram_search"
self.patterns = [
"calculate",
"evaluate",
"how many",
"how much",
"how long",
"compute"
]
self.api = bumblebee_api
self.bs = bumblebee_api.get_speech()
self.config = bumblebee_api.get_config()
def action(self, spoken_text, arguments_list: list = []):
search_query = self.get_search_query(spoken_text, self.patterns)
app_id = self.config["Api_keys"]["wolframalpha"]
try:
client = wolframalpha.Client(app_id)
except Exception:
self.bs.respond("I cannot use the wolframalpha api key")
return
try:
res = client.query(search_query, width=200)
answer = next(res.results).text
self.bs.respond(answer)
except Exception:
# Trying Wikipedia
wiki_search_obj = wiki_search.Feature(self.api)
wiki_search_obj.action(spoken_text)
return
'''
Parses spoken text to retrieve a search query for Wolframalpha
Argument: <list> spoken_text (tokenized. i.e. list of words), OR
<str> spoken_text (not tokenized),
<list> patterns
Return type: <string> spoken_text (this is actually the search query as
retrieved from spoken_text.)
'''
def get_search_query(self, spoken_text, patterns):
search_terms = patterns
query_found = False
for search_term in search_terms:
if search_term in spoken_text:
search_index = spoken_text.index(search_term)
# get everything after the search term
spoken_text = spoken_text[search_index+1:]
query_found = True
break
# In case none of the search terms are included in spoken_text.
if not query_found:
for phrase in patterns:
# split the phrase into individual words
phrase_list = phrase.split(' ')
# remove phrase list from spoken_text
spoken_text = [
word for word in spoken_text if word not in phrase_list
]
return ' '.join(spoken_text)
|
StarcoderdataPython
|
1655051
|
<filename>StimControl/Experiments/Quest.py
#!/usr/bin/env python
# Copyright (c) 1996-2002 <NAME>
# Copyright (c) 1996-9 <NAME>
# Copyright (c) 2004-7 <NAME>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
__all__ = ['QuestObject']
import math
import copy
import warnings
import random
import sys
import time
import numpy as num
num.seterr(all='ignore')
def getinf(x):
return num.nonzero( num.isinf( num.atleast_1d(x) ) )
class QuestObject:
"""Measure threshold using a Weibull psychometric function.
Threshold 't' is measured on an abstract 'intensity' scale, which
usually corresponds to log10 contrast.
The Weibull psychometric function:
p2=delta*gamma+(1-delta)*(1-(1-gamma)*exp(-10**(beta*(x2+xThreshold))))
where x represents log10 contrast relative to threshold. The
Weibull function itself appears only in recompute(), which uses
the specified parameter values in self to compute a psychometric
function and store it in self. All the other methods simply use
the psychometric function stored as instance
variables. recompute() is called solely by __init__() and
beta_analysis() (and possibly by a few user programs). Thus, if
you prefer to use a different kind of psychometric function,
called Foo, you need only subclass QuestObject, overriding
__init__(), recompute(), and (if you need it) beta_analysis().
instance variables:
tGuess is your prior threshold estimate.
tGuessSd is the standard deviation you assign to that guess.
pThreshold is your threshold criterion expressed as probability of
response==1. An intensity offset is introduced into the
psychometric function so that threshold (i.e. the midpoint of the
table) yields pThreshold.
beta, delta, and gamma are the parameters of a Weibull
psychometric function.
beta controls the steepness of the psychometric
function. Typically 3.5.
delta is the fraction of trials on which the observer presses
blindly. Typically 0.01.
gamma is the fraction of trials that will generate response 1 when
intensity==-inf.
grain is the quantization of the internal table. E.g. 0.01.
scope is the intensity difference between the largest and smallest
intensity that the internal table can store. E.g. 5. This interval
will be centered on the initial guess tGuess,
i.e. [tGuess-scope/2, tGuess+scope/2]. QUEST assumes that
intensities outside of this interval have zero prior probability,
i.e. they are impossible.
"""
def __init__(self,tGuess,tGuessSd,pThreshold,beta,delta,gamma,grain=0.01,scope=None):
"""Initialize Quest parameters.
Create an instance of QuestObject with all the information
necessary to measure threshold.
This was converted from the Psychtoolbox's QuestCreate function.
"""
grain = float(grain) # make sure grain is a float
if scope is None:
dim = 500
else:
if scope <= 0:
raise ValueError('argument "scope" must be greater than zero.')
dim=scope/grain
dim=2*math.ceil(dim/2.0) # round up to even integer
self.updatePdf = True
self.warnPdf = True
self.normalizePdf = False
self.tGuess = tGuess
self.tGuessSd = tGuessSd
self.pThreshold = pThreshold
self.beta = beta
self.delta = delta
self.gamma = gamma
self.grain = grain
self.dim = dim
self.recompute()
def beta_analysis(self,stream=None):
"""Analyze the quest function with beta as a free parameter.
It returns the mean estimates of alpha (as logC) and
beta. Gamma is left at whatever value the user fixed it at.
"""
def beta_analysis1(stream=None):
"""private function called by beta_analysis()"""
if stream is None:
stream=sys.stdout
q2 = []
for i in range(1,17):
q_copy=copy.copy(self)
q_copy.beta=2**(i/4.0)
q_copy.dim=250
q_copy.grain=0.02
q_copy.recompute()
q2.append(q_copy)
na = num.array # shorthand
t2 = na([q2i.mean() for q2i in q2])
p2 = na([q2i.pdf_at(t2i) for q2i,t2i in zip(q2,t2)])
sd2 = na([q2i.sd() for q2i in q2])
beta2 = na([q2i.beta for q2i in q2])
i=num.argsort(p2)[-1]
t=t2[i]
sd=q2[i].sd()
p=num.sum(p2)
betaMean=num.sum(p2*beta2)/p
betaSd=math.sqrt(num.sum(p2*beta2**2)/p-(num.sum(p2*beta2)/p)**2)
iBetaMean=num.sum(p2/beta2)/p
iBetaSd=math.sqrt(num.sum(p2/beta2**2)/p-(num.sum(p2/beta2)/p)**2)
stream.write('%5.2f %5.2f %5.2f %4.1f %4.1f %6.3f\n'%(t,10**t,sd,1/iBetaMean,betaSd,self.gamma))
print 'Now re-analyzing with beta as a free parameter. . . .'
if stream is None:
stream=sys.stdout
stream.write('logC C sd beta sd gamma\n');
beta_analysis1(stream)
def mean(self):
"""Mean of Quest posterior pdf.
Get the mean threshold estimate.
This was converted from the Psychtoolbox's QuestMean function.
"""
return self.tGuess + num.sum(self.pdf*self.x)/num.sum(self.pdf)
def mode(self):
"""Mode of Quest posterior pdf.
t,p=q.mode()
't' is the mode threshold estimate
'p' is the value of the (unnormalized) pdf at t.
This was converted from the Psychtoolbox's QuestMode function.
"""
iMode = num.argsort(self.pdf)[-1]
p=self.pdf[iMode]
t=self.x[iMode]+self.tGuess
return t,p
def p(self,x):
"""probability of correct response at intensity x.
p=q.p(x)
The probability of a correct (or yes) response at intensity x,
assuming threshold is at x=0.
This was converted from the Psychtoolbox's QuestP function.
"""
if x < self.x2[0]:
return self.x2[0]
if x > self.x2[-1]:
return self.x2[-1]
return num.interp(x,self.x2,self.p2)
def pdf_at(self,t):
"""The (unnormalized) probability density of candidate threshold 't'.
This was converted from the Psychtoolbox's QuestPdf function.
"""
i=int(round((t-self.tGuess)/self.grain))+1+self.dim/2
i=min(len(self.pdf),max(1,i))-1
p=self.pdf[i]
return p
def quantile(self,quantileOrder=None):
"""Get Quest recommendation for next trial level.
intensity=q.quantile([quantileOrder])
Gets a quantile of the pdf in the struct q. You may specify
the desired quantileOrder, e.g. 0.5 for median, or, making two
calls, 0.05 and 0.95 for a 90confidence interval. If the
'quantileOrder' argument is not supplied, then it's taken from
the QuestObject instance. __init__() uses recompute() to
compute the optimal quantileOrder and saves that in the
QuestObject instance; this quantileOrder yields a quantile
that is the most informative intensity for the next trial.
This was converted from the Psychtoolbox's QuestQuantile function.
"""
if quantileOrder is None:
quantileOrder = self.quantileOrder
p = num.cumsum(self.pdf)
if len(getinf(p[-1])[0]):
raise RuntimeError('pdf is not finite')
if p[-1]==0:
raise RuntimeError('pdf is all zero')
m1p = num.concatenate(([-1],p))
index = num.nonzero( m1p[1:]-m1p[:-1] )[0]
if len(index) < 2:
raise RuntimeError('pdf has only %g nonzero point(s)'%len(index))
ires = num.interp([quantileOrder*p[-1]],p[index],self.x[index])[0]
return self.tGuess+ires
def sd(self):
"""Standard deviation of Quest posterior pdf.
Get the sd of the threshold distribution.
This was converted from the Psychtoolbox's QuestSd function."""
p=num.sum(self.pdf)
sd=math.sqrt(num.sum(self.pdf*self.x**2)/p-(num.sum(self.pdf*self.x)/p)**2)
return sd
def simulate(self,tTest,tActual):
"""Simulate an observer with given Quest parameters.
response=QuestSimulate(q,intensity,tActual)
Simulate the response of an observer with threshold tActual.
This was converted from the Psychtoolbox's QuestSimulate function."""
t = min( max(tTest-tActual, self.x2[0]), self.x2[-1] )
response= num.interp([t],self.x2,self.p2)[0] > random.random()
return response
def recompute(self):
"""Recompute the psychometric function & pdf.
Call this immediately after changing a parameter of the
psychometric function. recompute() uses the specified
parameters in 'self' to recompute the psychometric
function. It then uses the newly computed psychometric
function and the history in self.intensity and self.response
to recompute the pdf. (recompute() does nothing if q.updatePdf
is False.)
This was converted from the Psychtoolbox's QuestRecompute function."""
if not self.updatePdf:
return
if self.gamma > self.pThreshold:
warnings.warn( 'reducing gamma from %.2f to 0.5'%self.gamma)
self.gamma = 0.5
self.i = num.arange(-self.dim/2,self.dim/2+1)
self.x = self.i * self.grain
self.pdf = num.exp(-0.5*(self.x/self.tGuessSd)**2)
self.pdf = self.pdf/num.sum(self.pdf)
i2 = num.arange(-self.dim,self.dim+1)
self.x2 = i2*self.grain
self.p2 = self.delta*self.gamma+(1-self.delta)*(1-(1-self.gamma)*num.exp(-10**(self.beta*self.x2)))
if self.p2[0] >= self.pThreshold or self.p2[-1] <= self.pThreshold:
raise RuntimeError('psychometric function range [%.2f %.2f] omits %.2f threshold'%(self.p2[0],self.p2[-1],self.pThreshold)) # XXX
if len(getinf(self.p2)[0]):
raise RuntimeError('psychometric function p2 is not finite')
index = num.nonzero( self.p2[1:]-self.p2[:-1] )[0] # strictly monotonic subset
if len(index) < 2:
raise RuntimeError('psychometric function has only %g strictly monotonic points'%len(index))
self.xThreshold = num.interp([self.pThreshold],self.p2[index],self.x2[index])[0]
self.p2 = self.delta*self.gamma+(1-self.delta)*(1-(1-self.gamma)*num.exp(-10**(self.beta*(self.x2+self.xThreshold))))
if len(getinf(self.p2)[0]):
raise RuntimeError('psychometric function p2 is not finite')
self.s2 = num.array( ((1-self.p2)[::-1], self.p2[::-1]) )
if not hasattr(self,'intensity') or not hasattr(self,'response'):
self.intensity = []
self.response = []
if len(getinf(self.s2)[0]):
raise RuntimeError('psychometric function s2 is not finite')
eps = 1e-14
pL = self.p2[0]
pH = self.p2[-1]
pE = pH*math.log(pH+eps)-pL*math.log(pL+eps)+(1-pH+eps)*math.log(1-pH+eps)-(1-pL+eps)*math.log(1-pL+eps)
pE = 1/(1+math.exp(pE/(pL-pH)))
self.quantileOrder=(pE-pL)/(pH-pL)
if len(getinf(self.pdf)[0]):
raise RuntimeError('prior pdf is not finite')
# recompute the pdf from the historical record of trials
for intensity, response in zip(self.intensity,self.response):
inten = max(-1e10,min(1e10,intensity)) # make intensity finite
ii = len(self.pdf) + self.i-round((inten-self.tGuess)/self.grain)-1
if ii[0]<0:
ii = ii-ii[0]
if ii[-1]>=self.s2.shape[1]:
ii = ii+self.s2.shape[1]-ii[-1]-1
iii = ii.astype(num.int_)
if not num.allclose(ii,iii):
raise ValueError('truncation error')
self.pdf = self.pdf*self.s2[response,iii]
if self.normalizePdf and k%100==0:
self.pdf = self.pdf/num.sum(self.pdf) # avoid underflow; keep the pdf normalized
if self.normalizePdf:
self.pdf = self.pdf/num.sum(self.pdf) # avoid underflow; keep the pdf normalized
if len(getinf(self.pdf)[0]):
raise RuntimeError('prior pdf is not finite')
def update(self,intensity,response):
"""Update Quest posterior pdf.
Update self to reflect the results of this trial. The
historical records self.intensity and self.response are always
updated, but self.pdf is only updated if self.updatePdf is
true. You can always call QuestRecompute to recreate q.pdf
from scratch from the historical record.
This was converted from the Psychtoolbox's QuestUpdate function."""
if response < 0 or response > self.s2.shape[0]:
raise RuntimeError('response %g out of range 0 to %d'%(response,self.s2.shape[0]))
if self.updatePdf:
inten = max(-1e10,min(1e10,intensity)) # make intensity finite
ii = len(self.pdf) + self.i-round((inten-self.tGuess)/self.grain)-1
if ii[0]<0 or ii[-1] > self.s2.shape[1]:
if self.warnPdf:
low=(1-len(self.pdf)-self.i[0])*self.grain+self.tGuess
high=(self.s2.shape[1]-len(self.pdf)-self.i[-1])*self.grain+self.tGuess
warnings.warn( 'intensity %.2f out of range %.2f to %.2f. Pdf will be inexact.'%(intensity,low,high),
RuntimeWarning,stacklevel=2)
if ii[0]<0:
ii = ii-ii[0]
else:
ii = ii+self.s2.shape[1]-ii[-1]-1
iii = ii.astype(num.int_)
if not num.allclose(ii,iii):
raise ValueError('truncation error')
self.pdf = self.pdf*self.s2[response,iii]
if self.normalizePdf:
self.pdf=self.pdf/num.sum(self.pdf)
# keep a historical record of the trials
self.intensity.append(intensity)
self.response.append(response)
def demo():
"""Demo script for Quest routines.
By commenting and uncommenting a few lines in this function, you
can use this file to implement three QUEST-related procedures for
measuring threshold.
QuestMode: In the original algorithm of Watson & Pelli (1983) each
trial and the final estimate are at the MODE of the posterior pdf.
QuestMean: In the improved algorithm of King-Smith et al. (1994).
each trial and the final estimate are at the MEAN of the posterior
pdf.
QuestQuantile & QuestMean: In the ideal algorithm of Pelli (1987)
each trial is at the best QUANTILE, and the final estimate is at
the MEAN of the posterior pdf.
This was converted from the Psychtoolbox's QuestDemo function.
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994) Efficient and unbiased modifications of
the QUEST threshold method: theory, simulations, experimental
evaluation and practical implementation. Vision Res, 34 (7),
885-912.
<NAME>. (1987) The ideal psychometric
procedure. Investigative Ophthalmology & Visual Science, 28
(Suppl), 366.
<NAME>. and <NAME>. (1983) QUEST: a Bayesian adaptive
psychometric method. Percept Psychophys, 33 (2), 113-20.
"""
print 'The intensity scale is abstract, but usually we think of it as representing log contrast.'
tActual = None
while tActual is None:
sys.stdout.write('Specify true threshold of simulated observer: ')
input = raw_input()
try:
tActual = float(input)
except:
pass
tGuess = None
while tGuess is None:
sys.stdout.write('Estimate threshold: ')
input = raw_input()
try:
tGuess = float(input)
except:
pass
tGuessSd = 2.0 # sd of Gaussian before clipping to specified range
pThreshold = 0.82
beta = 3.5
delta = 0.01
gamma = 0.5
q=QuestObject(tGuess,tGuessSd,pThreshold,beta,delta,gamma)
# Simulate a series of trials.
trialsDesired=100
wrongRight = 'wrong', 'right'
timeZero=time.time()
for k in range(trialsDesired):
# Get recommended level. Choose your favorite algorithm.
tTest=q.quantile()
#tTest=q.mean()
#tTest=q.mode()
tTest=tTest+random.choice([-0.1,0,0.1])
# Simulate a trial
timeSplit=time.time(); # omit simulation and printing from reported time/trial.
response=q.simulate(tTest,tActual)
print 'Trial %3d at %4.1f is %s'%(k+1,tTest,wrongRight[int(response)])
timeZero=timeZero+time.time()-timeSplit;
# Update the pdf
q.update(tTest,response);
# Print results of timing.
print '%.0f ms/trial'%(1000*(time.time()-timeZero)/trialsDesired)
# Get final estimate.
t=q.mean()
sd=q.sd()
print 'Mean threshold estimate is %4.2f +/- %.2f'%(t,sd)
#t=QuestMode(q);
#print 'Mode threshold estimate is %4.2f'%t
print '\nQuest beta analysis. Beta controls the steepness of the Weibull function.\n'
q.beta_analysis()
print 'Actual parameters of simulated observer:'
print 'logC beta gamma'
print '%5.2f %4.1f %5.2f'%(tActual,q.beta,q.gamma)
if __name__ == '__main__':
demo() # run the demo
|
StarcoderdataPython
|
11312234
|
"""toolsql makes it easy to read and write from sql databases"""
from .cli import *
from .crud_utils import *
from .migrate_utils import *
from .sqlalchemy_utils import *
from .dba_utils import *
from .exceptions import *
from .schema_utils import *
from .spec import *
from .summary_utils import *
__version__ = '0.3.0'
|
StarcoderdataPython
|
4994597
|
<filename>src/pages.py
import streamlit as st
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix, classification_report,plot_confusion_matrix
from sklearn.model_selection import cross_val_score
plt.style.use('fivethirtyeight')
from config import config as cf
class DataView:
def __init__(self,input_df,df,col_properties):
self.input_df = input_df
self.df = df
self.col_properties = col_properties
def app(self):
st.header('Data View')
st.subheader('Raw data')
st.dataframe(self.input_df.head())
st.markdown(f"""
data has shape `{self.input_df.shape}` \n
unique identifier: `customerId` \n
Target variable: `churn`
""")
st.subheader('Processed data')
st.dataframe(self.df.head())
st.subheader('Column properties')
st.dataframe(self.col_properties)
class PlotView:
def __init__(self,df):
self.df = df
def app(self):
st.header('Plots')
st.subheader('Correlation Matrix')
fig,ax = plt.subplots(figsize=(7,7))
ax = sns.heatmap(
self.df.corr(),
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
)
st.pyplot(fig)
st.subheader('Stacked Histograms')
cols = ['TotalCharges','MonthlyCharges','tenure']
fig,ax = plt.subplots(1,len(cols),figsize=(20,7))
for i,col in enumerate(cols):
self.df.pivot(columns='isChurned')[col].plot(kind='hist', stacked=True,ax=ax[i],xlabel=col)
ax[i].set_xlabel(col)
st.pyplot(fig)
st.subheader('Monthly Charges vs Total Charges')
fig,ax = plt.subplots(figsize = (4,4))
self.df[self.df['isChurned']==0][['TotalCharges','MonthlyCharges','tenure','isChurned']].plot(kind='scatter',x='MonthlyCharges',y='TotalCharges'
,ax=ax,label='Not Churned',c='g',alpha=0.5)
self.df[self.df['isChurned']==1][['TotalCharges','MonthlyCharges','tenure','isChurned']].plot(kind='scatter',x='MonthlyCharges',y='TotalCharges'
, label='Churned'
,ax=ax
,c='r',alpha=0.5)
st.pyplot(fig)
st.subheader('Categorical Column distributions')
cols = ['MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','Contract']
fig,ax = plt.subplots(3,3,figsize=(15,10))
for i,col in enumerate(cols):
if i < 3:
self.df[col].value_counts().plot(kind='pie',ax = ax[i,0])
elif i < 6:
self.df[col].value_counts().plot(kind='pie',ax = ax[i-3,1])
elif i < 9:
self.df[col].value_counts().plot(kind='pie',ax = ax[i-6,2])
st.pyplot(fig)
st.subheader('Churned customers by service type')
cols = ['MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','Contract']
fig,ax = plt.subplots(3,3,figsize=(15,10))
for i,col in enumerate(cols):
if i < 3:
self.df.groupby('isChurned')[col].value_counts().unstack(0).plot(kind='bar',ax = ax[i,0],rot=45)
ax[i,0].set_title(col)
ax[i,0].get_legend().remove()
elif i < 6:
self.df.groupby('isChurned')[col].value_counts().unstack(0).plot(kind='bar',ax = ax[i-3,1],rot=45)
ax[i-3,1].set_title(col)
ax[i-3,1].get_legend().remove()
elif i < 9:
self.df.groupby('isChurned')[col].value_counts().unstack(0).plot(kind='bar',ax = ax[i-6,2],rot=45)
ax[i-6,2].set_title(col)
ax[i-6,2].get_legend().remove()
st.pyplot(fig)
class ModelMetrics:
def __init__(self,model_metrics):
self.model_metrics = model_metrics
def app(self):
st.header('ML Model Metrics')
accuracy_score = self.model_metrics["accuracy_score"]
roc_auc_score = self.model_metrics["roc_auc_score"]
st.dataframe(pd.DataFrame([[accuracy_score,roc_auc_score]], columns=['accuracy_score','roc_auc_score']))
st.subheader('Classification Report')
st.markdown(self.model_metrics["classification_report"])
st.subheader('Confusion Matrix')
fig,ax = plt.subplots(figsize=(5,5))
ax = sns.heatmap(self.model_metrics["confusion_matrix"],annot=True,fmt='g')
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.xaxis.set_ticklabels(['Not Churned', 'Churned']); ax.yaxis.set_ticklabels(['Not Churned', 'Churned'])
st.pyplot(fig)
|
StarcoderdataPython
|
11300837
|
<gh_stars>10-100
# BSD 3-Clause License
#
# Copyright (c) 2017, Science and Technology Facilities Council and
# The University of Nottingham
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This testing module contains the tests for the testconnections method within
the shellwrappers module.
"""
try:
from unittest import mock
except ImportError:
import mock
import pytest
import longbow.exceptions as exceptions
from longbow.shellwrappers import checkconnections
def sshfunc(job, cmd):
"""Function to mock the throwing of exception for a test."""
if cmd[0] == "module avail" and job["resource"] == "resource1":
raise exceptions.SSHError(
"Err", ("", "bash: module: command not found", 0))
@mock.patch('longbow.shellwrappers.sendtossh')
def test_testconnections_single(mock_sendtossh):
"""
Test that the connection test is launched.
"""
jobs = {
"LongbowJob1": {
"resource": "resource1"
}
}
checkconnections(jobs)
assert mock_sendtossh.call_count == 2, "sendtossh should be called twice"
@mock.patch('longbow.shellwrappers.sendtossh')
def test_testconnections_multiple(mock_sendtossh):
"""
Test that the connection test is run only for each host once.
"""
jobs = {
"LongbowJob1": {
"resource": "resource1"
},
"LongbowJob2": {
"resource": "resource2"
},
"LongbowJob3": {
"resource": "resource1"
}
}
checkconnections(jobs)
assert mock_sendtossh.call_count == 4, "should be called four times"
@mock.patch('longbow.shellwrappers.sendtossh')
def test_testconnections_sshexcept(mock_sendtossh):
"""
Test to see that if the underlying SSH call fails, the resulting
SSHError is passed up the chain. This is important!
"""
jobs = {
"LongbowJob1": {
"resource": "resource1"
},
"LongbowJob2": {
"resource": "resource2"
},
"LongbowJob3": {
"resource": "resource1"
}
}
mock_sendtossh.side_effect = exceptions.SSHError("SSH Error", "output")
with pytest.raises(exceptions.SSHError):
checkconnections(jobs)
@mock.patch('longbow.shellwrappers.sendtossh')
def test_testconnections_envfix(mock_sendtossh):
"""
Test that the environment checking works.
"""
jobs = {
"LongbowJob1": {
"resource": "resource1",
"env-fix": "false"
},
"LongbowJob2": {
"resource": "resource2",
"env-fix": "false"
},
"LongbowJob3": {
"resource": "resource1",
"env-fix": "false"
}
}
mock_sendtossh.side_effect = sshfunc
checkconnections(jobs)
assert jobs["LongbowJob1"]["env-fix"] == "true"
assert jobs["LongbowJob2"]["env-fix"] == "false"
assert jobs["LongbowJob3"]["env-fix"] == "true"
|
StarcoderdataPython
|
4826220
|
<reponame>MeWu-IDM/scirisweb
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 16:57:44 2019
@author: cliffk
"""
from distributed import Scheduler
from tornado.ioloop import IOLoop
from threading import Thread
loop = IOLoop.current()
t = Thread(target=loop.start, daemon=True)
t.start()
s = Scheduler(loop=loop)
s.start('tcp://:8786') # Listen on TCP port 8786
|
StarcoderdataPython
|
3297126
|
import numpy as np
print ("hello world")
def circumference(r):
return np.pi*2*r
def surface_area (r):
return np.pi*r**2
|
StarcoderdataPython
|
9607354
|
<filename>ranking.py
import csv
READ_PATH = 'yt_wonderland/data/world-happiness-report/2017.csv'
WRITE_PATH = 'new_data.csv'
DIMENSIONS_TO_RANK = [
{
"name": "Economy_GDP_Per_Capita",
"higher_is_better": True
},
{
"name": "Generosity",
"higher_is_better": True
},
{
"name": "Family",
"higher_is_better": True
},
{
"name": "Health_Life_Expectancy",
"higher_is_better": True
},
{
"name": "Freedom",
"higher_is_better": True
},
{
"name": "Trust_Government_Corruption",
"higher_is_better": True
},
]
with open(READ_PATH, 'r') as data_src:
csv_reader = csv.reader(data_src)
lines = []
countries = []
# read csv
for line in csv_reader:
lines.append(line)
data_src.close()
legend = lines[0]
new_legend = list(legend)
lines = lines[1::]
# process into dicts
for country_ln in lines:
country = {}
for i in xrange(len(legend)):
if legend[i] == 'Country':
country[legend[i]] = country_ln[i]
elif legend[i] == 'Happiness_Rank':
country[legend[i]] = int(country_ln[i])
else:
country[legend[i]] = float(country_ln[i])
countries.append(country)
def sort_and_add_rank(dimension, higher_is_better):
if higher_is_better:
countries.sort(lambda x,y: cmp(x[dimension], y[dimension]), reverse=True)
else:
countries.sort(lambda x,y: cmp(x[dimension], y[dimension]), reverse=False)
for i in xrange(len(countries)):
countries[i][dimension+"_Rank"] = i+1
print countries[i]
# adding ranks
for d in DIMENSIONS_TO_RANK:
new_legend.append(d['name']+"_Rank")
sort_and_add_rank(d['name'], d['higher_is_better'])
# write to output
with open(WRITE_PATH, 'w') as new_file:
csv_writer = csv.writer(new_file)
csv_writer.writerow(new_legend)
for country in countries:
csv_row = []
for dimension in new_legend:
csv_row.append(country[dimension])
csv_writer.writerow(csv_row)
new_file.close()
|
StarcoderdataPython
|
4877121
|
<reponame>grice/RNAtools
import RNAtools.partAlign as m2
import os
import math
from pathlib import Path
import pytest
filepath = os.path.dirname(__file__)
data_dir = Path(f'{filepath}/../data')
@pytest.mark.skip(reason="Currently fails, not sure why though.")
def test_partAlign():
"""
Tests partition aligner..
"""
# seq1 = f'{filepath}/../data/sequence_hsa-mir-380.txt'
seq1 = data_dir / 'sequence_hsa-mir-380.txt'
# seq2 = f'{filepath}/../data/sequence_hsa-mir-383.txt'
seq2 = data_dir / 'sequence_hsa-mir-383.txt'
x = m2.RNA(seq1)
y = m2.RNA(seq2)
assert (x.count == 37)
assert (y.count == 55)
scoreMax = m2.align_RNA_partition(x, y)
assert (math.floor(scoreMax) == 192)
|
StarcoderdataPython
|
8136074
|
<gh_stars>10-100
import json
from django.db import models
from openhumans.models import OpenHumansMember
CERTAINTY_CHOICES = [
(1, "Random guess"),
(2, "Very uncertain"),
(3, "Unsure"),
(4, "Somewhat certain"),
(5, "Very certain"),
]
class RetrospectiveEvent(models.Model):
member = models.ForeignKey(OpenHumansMember, on_delete=models.CASCADE)
date = models.DateField()
certainty = models.IntegerField(choices=CERTAINTY_CHOICES)
notes = models.TextField(
blank=True,
help_text="Notes about this illness, e.g. do you know or believe it was a cold, flu, or coronavirus infection?",
)
published = models.BooleanField(default=False)
def as_json(self):
data = {
analysis.graph_type: json.loads(analysis.graph_data)
for analysis in self.retrospectiveeventanalysis_set.all()
}
data["sickness_event"] = [
{
"timestamp": self.date.isoformat(),
"data": {"certainty": self.certainty, "notes": self.notes},
}
]
return json.dumps(data, sort_keys=True)
class RetrospectiveEventAnalysis(models.Model):
event = models.ForeignKey(RetrospectiveEvent, on_delete=models.CASCADE)
graph_data = models.TextField()
graph_type = models.TextField(default="")
@property
def member(self):
return self.event.member
|
StarcoderdataPython
|
3293765
|
"""
Serializer for a request user's information
"""
# stdlib
from typing import Dict
# lib
from rest_framework import serializers
# local
from api.models import Settings
__all__ = [
'SettingsSerializer',
]
NOTIFICATION_VALUES = {True, False}
THEMES = {
'beta',
'blue',
'green',
'purple',
'red',
'traffic',
'trans',
}
class SettingsSerializer(serializers.ModelSerializer):
class Meta:
model = Settings
fields = ['notifications', 'theme']
def validate_notifications(self, notifications: Dict[str, bool]) -> Dict[str, bool]:
"""
Ensure that the notifications dict sent by the user only contains valid keys
"""
for key, value in notifications.items():
# Check that the key is in the allowed strings, and the value is a valid bool
if key not in Settings.NOTIFICATIONS:
raise serializers.ValidationError(f'"{key}" is not a valid choice.')
if value not in NOTIFICATION_VALUES:
raise serializers.ValidationError(f'"{key}" does not have a boolean for a value.')
return notifications
def validate_theme(self, theme: str) -> str:
"""
Ensure the theme is in the set of allowed themes
"""
if theme not in THEMES:
raise serializers.ValidationError(f'"{theme}" is not a valid choice.')
return theme
|
StarcoderdataPython
|
1834427
|
import random
from datetime import datetime
start_time = datetime.now()
def merge_sort(arr):
if len(arr) <= 1:
return arr
middle = len(arr) // 2
left = merge_sort(arr[:middle])
right = merge_sort(arr[middle:])
return merge(left, right)
def merge(left, right):
result = []
while len(left) > 0 and len(right) > 0: #пока не пустые
if left[0] <= right [0]: #выбор наименьшего элемента из этих частей
result.append(left[0])
left = left[1:]
else:
result.append(right[0])
right = right[1:]
if len(left)>0: #если в левой что-то осталось
result += left #добавляем к результату
if len(right)>0:
result += right
return result
arr = [random.randint(0, 1000) for i in range(1000)]
print ('Исходный массив: \n' ,arr)
print ('\nОтсортированный массив: ', merge_sort(arr))
end_time = datetime.now()
print('\n\nПрошло времени: {}'.format(end_time - start_time))
|
StarcoderdataPython
|
8003463
|
<filename>plugin.git.browser/github/downloader.py
# -*- coding: utf-8 -*-
'''*
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*'''
import sys
import time
import xbmcgui
import requests
from commoncore import kodi
from commoncore import zipfile
from .github_api import get_version_by_name, get_version_by_xml
class downloaderException(Exception):
pass
def format_status(cached, total, speed):
cached = kodi.format_size(cached)
total = kodi.format_size(total)
speed = kodi.format_size(speed, 'B/s')
return "%s of %s at %s" % (cached, total, speed)
def test_url(url):
r = requests.head(url)
return r.status_code == requests.codes.ok
def download(url, addon_id, destination, unzip=False, quiet=False):
version = None
filename = addon_id + '.zip'
r = requests.get(url, stream=True)
kodi.log("Download: %s" % url)
if r.status_code == requests.codes.ok:
temp_file = kodi.vfs.join(kodi.get_profile(), "downloads")
if not kodi.vfs.exists(temp_file): kodi.vfs.mkdir(temp_file, recursive=True)
temp_file = kodi.vfs.join(temp_file, filename)
try:
total_bytes = int(r.headers["Content-Length"])
except:
total_bytes = 0
block_size = 1000
cached_bytes = 0
if not quiet:
pb = xbmcgui.DialogProgress()
pb.create("Downloading",filename,' ', ' ')
kodi.sleep(150)
start = time.time()
is_64bit = sys.maxsize > 2**32
if unzip and not is_64bit: zip_content = b''
with open(temp_file, 'wb') as f:
for block in r.iter_content(chunk_size=block_size):
if not block: break
if not quiet and pb.iscanceled():
raise downloaderException('Download Aborted')
return False
cached_bytes += len(block)
f.write(block)
if unzip and not is_64bit: zip_content += block
if total_bytes > 0:
delta = int(time.time() - start)
if delta:
bs = int(cached_bytes / (delta))
else: bs = 0
if not quiet:
percent = int(cached_bytes * 100 / total_bytes)
pb.update(percent, "Downloading",filename, format_status(cached_bytes, total_bytes, bs))
if not quiet: pb.close()
if unzip:
if is_64bit:
zip_ref = zipfile.ZipFile(temp_file, 'r')
else:
if kodi.strings.PY2:
import StringIO
zip_ref = zipfile.ZipFile(StringIO.StringIO(zip_content))
else:
from io import BytesIO
zip_ref = zipfile.ZipFile(BytesIO(zip_content))
zip_ref.extractall(destination)
zip_ref.close()
kodi.vfs.rm(temp_file, quiet=True)
try:
xml = kodi.vfs.read_file(kodi.vfs.join(destination, kodi.vfs.join(addon_id, 'addon.xml')), soup=True)
version = get_version_by_xml(xml)
if not version:
version = get_version_by_name(filename)
except:
kodi.log("Unable to fine version from addon.xml for addon: %s" % addon_id)
else:
kodi.vfs.mv(temp_file, kodi.vfs.join(destination, filename))
else:
kodi.close_busy_dialog()
raise downloaderException(r.status_code)
return version
|
StarcoderdataPython
|
162680
|
<filename>insert_DataBase_robot.py
import pyautogui
import time
import pyperclip
# Fazer o código para cada tipo de bd;
pyautogui.PAUSE = 2.0
pyautogui.press('win')
pyautogui.write('pgadmin4')
pyautogui.press('enter')
|
StarcoderdataPython
|
1854484
|
<reponame>elipavlov/transport-nov-parser
# coding=utf-8
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
import six
import math
from django.db import models
class EnumBase(six.with_metaclass(ABCMeta, object)):
@property
@abstractmethod
def as_tuple(self):
raise NotImplemented()
@classmethod
def as_reverse_tuple(cls):
return tuple([(item[1], item[0]) for item in cls.as_tuple])
@classmethod
def as_dict(cls):
return dict(cls.as_tuple)
@classmethod
def as_reverse_dict(cls):
return dict(cls.as_revert_tuple())
class GeoDirections(EnumBase):
NORTH = 'n'
NORTHEAST = 'ne'
EAST = 'e'
SOUTHEAST = 'se'
SOUTH = 's'
SOUTHWEST = 'sw'
WEST = 'w'
NORTHWEST = 'nw'
as_tuple = (
(NORTH, "Север"),
(NORTHEAST, "Северо-восток"),
(EAST, "Восток"),
(SOUTHEAST, "Юго-восток"),
(SOUTH, "Юг"),
(SOUTHWEST, "Юго-запад"),
(WEST, "Запад"),
(NORTHWEST, "Северо-запад"),
)
@classmethod
def normalize_angle(cls, angle):
if angle < 0:
return 360 + (angle % 360)
else:
return angle % 360
@classmethod
def from_angle(cls, angle):
step = 360/len(cls.as_tuple)
angle = cls.normalize_angle(angle)
comp_angle = round(angle + step/2, 1)
e = 0.1
x = 0
for key, repr in cls.as_tuple:
if x <= comp_angle < x + step:
return key
x += step
else:
return cls.NORTH
class Directions(EnumBase):
FORWARD = 'forward'
BACKWARD = 'backward'
CIRCULAR = 'circular'
as_tuple = (
(FORWARD, "Вперёд"),
(BACKWARD, "Назад"),
(CIRCULAR, "Кольцевой"),
)
class DataProviderTypes(EnumBase):
TWOGIS_ROUTE_API = '2gis_route_api'
ROUTES_HTML_PAGE = 'routes_html_page'
ROUTE_HTML_PAGE = 'route_html_page'
as_tuple = (
(TWOGIS_ROUTE_API, "2GIS route API"),
(ROUTES_HTML_PAGE, "Routes HTML-page"),
(ROUTE_HTML_PAGE, "Route HTML-page"),
)
class Point(object):
lon = 0.0
lat = 0.0
@property
def length(self):
return math.hypot(self.lon, self.lat)
def distance_to(self, other):
return math.hypot(self.lon-other.lon, self.lat-other.lat)
def angle(self, other):
cos_a = (self * other)/(self.length * other.length)
# return math.degrees(math.acos(cos_a))
return math.degrees(cos_a)
def __init__(self, lon=0.0, lat=0.0, repr=None):
if repr is not None:
lst = repr.replace('POINT', '').\
replace('(', '')\
.replace(')', '')\
.split(' ')
self.lon = float(lst[0])
self.lat = float(lst[1])
else:
self.lon = lon
self.lat = lat
def __add__(self, other):
return Point(
self.lon + other.lon,
self.lat + other.lat)
def __sub__(self, other):
return Point(
self.lon - other.lon,
self.lat - other.lat)
def __mul__(self, other):
return self.lon * other.lon + self.lat * other.lat
def __lshift__(self, other):
diff = self - other
return Point(
self.lon+diff.lon,
self.lat+diff.lat)
def __rshift__(self, other):
lon = self.lon-other.lon
lat = self.lat-other.lat
return Point(
self.lon-lon,
self.lat-lat)
def __sub__(self, other):
return Point(
self.lon - other.lon,
self.lat - other.lat)
def __repr__(self):
return 'POINT({lon:.13f} {lat:.13f})'.format(**self.__dict__)
def __str__(self):
return 'Point(lon: {lon:.6f}, lat: {lat:.6f})'.format(**self.__dict__)
def __unicode__(self):
return unicode(str(self))
class RouteTypes(EnumBase):
BUS = 'bus'
TROLLEYBUS = 'trolleybus'
as_tuple = (
(BUS, "Автобус"),
(TROLLEYBUS, "Троллейбус"),
)
class NameAlias(models.Model):
name = models.CharField(max_length=100)
class META:
abstract = True
class Platform(models.Model):
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=200, blank=True, default='')
description = models.TextField(blank=True, default='')
geo_direction = models.CharField(max_length=16, blank=True,
choices=GeoDirections.as_tuple)
def get_queryset(self, request):
qs = super(Platform, self).get_queryset(request)
qs = qs.annotate(models.Count('stops'))
return qs
def __eq__(self, other):
if self.pk:
return super(Platform, self).__eq__(other)
else:
return self.name.strip().lower() == other.name.strip().lower()
def __str__(self):
return self.name
class PlatformAlias(NameAlias):
platform = models.ForeignKey(Platform, related_name='aliases')
class Stop(models.Model):
platform = models.ForeignKey(Platform, on_delete=models.CASCADE,
related_name='stops')
longitude = models.FloatField(blank=True, null=True,
help_text='Longitude in WGS84 system')
latitude = models.FloatField(blank=True, null=True,
help_text='Latitude in WGS84 system')
alias = models.OneToOneField(PlatformAlias, null=True,
on_delete=models.SET_NULL,
related_name='stop')
class META:
unique_together = ('platform', 'longitude', 'latitude')
def __eq__(self, other):
if self.pk:
return super(Stop, self).__eq__(other)
else:
return self.platform == other.platform\
and self.longitude == other.longitude\
and self.latitude == other.latitude
def __repr__(self):
return '{} {}'.format(self.platform, Point(self.longitude, self.latitude))
def __str__(self):
return self.__repr__()
class Route(models.Model):
name = models.CharField(max_length=32)
code = models.CharField(max_length=32, unique=True)
type = models.CharField(
max_length=32,
choices=RouteTypes.as_tuple,
default=RouteTypes.BUS,
)
canceled = models.DateField(blank=True, null=True, default=None)
class META:
unique_together = ("name", "type")
def __str__(self):
return '%s (%s)' % (self.name, self.type)
class DataProviderUrl(models.Model):
link = models.URLField(help_text='The link for getting data through certain API')
type = models.CharField(max_length=32, choices=DataProviderTypes.as_tuple)
coding = models.CharField(max_length=32, default='utf-8')
route = models.ForeignKey(Route, related_name='data_providers',
blank=True, null=True, on_delete=models.CASCADE)
route_code = models.CharField(max_length=32, blank=True, default='')
class META:
unique_together = ("name", "type")
def __str__(self):
return 'route: %s, type: %s' % (self.route, self.type)
class RouteWeekDimension(models.Model):
weekend = models.BooleanField(default=False)
weekday = models.SmallIntegerField(
default=1,
help_text="Day of week from 1 to 7, first is monday")
def __str__(self):
return 'day: %s%s' % (self.weekday, ' we' if self.weekend else '')
class RoutePoint(models.Model):
week_dimension = models.ForeignKey(RouteWeekDimension)
route = models.ForeignKey(Route,
on_delete=models.CASCADE,
related_name='route_points')
stop = models.ForeignKey(Stop,
on_delete=models.CASCADE,
related_name='route_points')
time = models.TimeField(blank=True, null=True)
skip = models.BooleanField(default=False)
lap = models.SmallIntegerField(default=-1)
lap_start = models.BooleanField(default=False)
direction = models.CharField(max_length=16, choices=Directions.as_tuple)
order = models.SmallIntegerField(default=999)
geo_direction = models.CharField(max_length=16, blank=True,
choices=GeoDirections.as_tuple)
angle = models.FloatField(default=0.0)
on_demand = models.BooleanField(default=False)
class META:
unique_together = (('week_dimension', 'route', 'platform'),
('lap', 'order'))
def next_stop(self):
raise NotImplemented()
def prev_stop(self):
raise NotImplemented()
def __str__(self):
return '%s %s %s' % (self.route, self.stop, self.week_dimension)
class RouteDateDimension(RouteWeekDimension):
year = models.SmallIntegerField()
month = models.SmallIntegerField()
day = models.SmallIntegerField()
week = models.SmallIntegerField()
date = models.DateTimeField()
class RouteSchedule(RoutePoint):
date_dimension = models.ForeignKey(RouteDateDimension,
related_name='route_schedules',
on_delete=models.CASCADE)
|
StarcoderdataPython
|
6499357
|
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
class MyAccountManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password=None):
if not email:
raise ValueError('User must have an email address')
if not username:
raise ValueError('User must have an username')
user = self.model(
email=self.normalize_email(email),
username=username,
first_name=first_name,
last_name=last_name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, first_name, last_name, email, username, password):
user = self.create_user(
email=self.normalize_email(email),
username=username,
password=password,
first_name=first_name,
last_name=last_name
)
user.is_admin = True
user.is_active = True
user.is_staff = True
user.is_superadmin = True
user.save(using=self._db)
return user
class Account(AbstractUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=50, unique=True)
email = models.EmailField(max_length=100, unique=True)
phone_number = models.CharField(max_length=50)
# required
data_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
objects = MyAccountManager()
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
|
StarcoderdataPython
|
173413
|
<reponame>Fritzenator/hashcode-2020
import numpy as np
import numba
@numba.jit(nopython=True)
def knapsack(values, weights, max_weight):
"""
Returns tuple (total summed value, chosen items)
"""
t = np.zeros((len(values), max_weight + 1), dtype=np.float64)
# Fill-in the value table using the recurrence formula
for i in range(len(values)):
for w in range(max_weight + 1):
# If weight is above the sub-problem max weight, we can only not-choose
# Thus we take the value of the previous best solution for sub-problem without
# this item
if weights[i] > w:
# Check bounds. For sub-problem with 0 items we must have 0 value
if i > 0:
t[i, w] = t[i - 1, w]
else:
t[i, w] = 0
# If we can choose, see if the choice yields a higher value
else:
# If chosen the value is value of previous best with
# the weight subtracted plus the value of this item
value_if_chosen = t[i - 1, w - weights[i]] + values[i]
# Previous row, current column is the value of the sub-problem
# for same max weight but without this item (item not chosen)
value_if_ignored = t[i - 1, w]
t[i, w] = max(value_if_chosen, value_if_ignored)
# print(t)
# Backtrack from the value table to find the items chosen
chosen_items = []
w = max_weight
for i in range(len(values) - 1, -1, -1):
# If we are in first row (sub-problem with only one item)
# we need to check if the value is greater than 0
# if it is, it means the item 0 was chosen
if i == 0 and t[i, w] != 0:
chosen_items.append(0)
# If we have higher value for choosing than for non-choosing
# It means we have chosen i
elif t[i, w] > t[i - 1, w]:
chosen_items.append(i)
w = w - weights[i]
# Otherwise do nothing as i was not chosen
return t[-1, -1], chosen_items
# Find minimal number of coins needed using DP
# Backtracking to find the actual coins is not yet implemented
@numba.jit(nopython=True)
def change_minimal_coins(money, coins):
array = [money + 1 for _ in range(money + 1)]
array[0] = 0
for i in range(1, money + 1):
array[i] = 1 + min([array[i - c] for c in coins if i - c >= 0])
# print(array)
return array[-1]
|
StarcoderdataPython
|
6435029
|
<reponame>caiges/populous<gh_stars>1-10
import unittest
import os
import time
from PIL import Image
from django.conf import settings
from populous.thumbnail.base import Thumbnail
from populous.thumbnail.main import DjangoThumbnail, get_thumbnail_setting
from populous.thumbnail.processors import dynamic_import, get_valid_options
from populous.thumbnail.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME, THUMB_NAME, PIC_SIZE
PROCESSORS = dynamic_import(get_thumbnail_setting('PROCESSORS'))
VALID_OPTIONS = get_valid_options(PROCESSORS)
class ThumbnailTest(BaseTest):
def testThumbnails(self):
# Thumbnail
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 1,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb)
# Cropped thumbnail
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 2,
requested_size=(240, 240), opts=['crop'])
self.verify_thumbnail((240, 240), thumb)
# Thumbnail with altered JPEG quality
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 3,
requested_size=(240, 240), quality=95)
self.verify_thumbnail((240, 180), thumb)
def testRegeneration(self):
# Create thumbnail
thumb_name = THUMB_NAME % 4
thumb_size = (240, 240)
thumb = Thumbnail(source=PIC_NAME, dest=thumb_name,
requested_size=thumb_size)
self.images_to_delete.add(thumb_name)
thumb_mtime = os.path.getmtime(thumb_name)
time.sleep(1)
# Create another instance, shouldn't generate a new thumb
thumb = Thumbnail(source=PIC_NAME, dest=thumb_name,
requested_size=thumb_size)
self.assertEqual(os.path.getmtime(thumb_name), thumb_mtime)
# Recreate the source image, then see if a new thumb is generated
Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG')
thumb = Thumbnail(source=PIC_NAME, dest=thumb_name,
requested_size=thumb_size)
self.assertNotEqual(os.path.getmtime(thumb_name), thumb_mtime)
class DjangoThumbnailTest(BaseTest):
def setUp(self):
super(DjangoThumbnailTest, self).setUp()
# Add another source image in a sub-directory for testing subdir and
# basedir.
self.sub_dir = os.path.join(settings.MEDIA_ROOT, 'test_thumbnail')
try:
os.mkdir(self.sub_dir)
except OSError:
pass
self.pic_subdir = os.path.join(self.sub_dir, RELATIVE_PIC_NAME)
Image.new('RGB', PIC_SIZE).save(self.pic_subdir, 'JPEG')
self.images_to_delete.add(self.pic_subdir)
def testFilenameGeneration(self):
basename = RELATIVE_PIC_NAME.replace('.', '_')
# Basic filename
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Changed quality and cropped
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), opts=['crop'],
quality=95)
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_crop_q95.jpg'
self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
# All options on
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), opts=VALID_OPTIONS)
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_bw_autocrop_crop_upscale_detail_sharpen_q85.jpg'
self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
# Different basedir
basedir = 'sorl-thumbnail-test-basedir'
self.change_settings.change({'BASEDIR': basedir})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(basedir, self.sub_dir, basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Different subdir
self.change_settings.change({'BASEDIR': '', 'SUBDIR': 'subdir'})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT,
os.path.basename(self.sub_dir), 'subdir',
basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Different prefix
self.change_settings.change({'SUBDIR': '', 'PREFIX': 'prefix-'})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(self.sub_dir, 'prefix-'+basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
def tearDown(self):
super(DjangoThumbnailTest, self).tearDown()
subdir = os.path.join(self.sub_dir, 'subdir')
if os.path.exists(subdir):
os.rmdir(subdir)
os.rmdir(self.sub_dir)
|
StarcoderdataPython
|
9706338
|
<reponame>KazakovDenis/django-extensions
# -*- coding: utf-8 -*-
from django.contrib.auth.mixins import UserPassesTestMixin
class ModelUserFieldPermissionMixin(UserPassesTestMixin):
model_permission_user_field = 'user'
def get_model_permission_user_field(self):
return self.model_permission_user_field
def test_func(self):
model_attr = self.get_model_permission_user_field()
current_user = self.request.user
return current_user == getattr(self.get_queryset().first(), model_attr)
|
StarcoderdataPython
|
279446
|
"""
The PSD Submodule
=================
The PSD submodule provides implementations of various particle size
distributions for the use in scattering calculations.
In addition to that, :code:`artssat.scattering.psd.arts` subpackage defines
the interface for PSDs in ARTS, while the :code:`artssat.scattering.psd.data`
subpackage provides functionality for the handling of PSD data.
"""
from artssat.scattering.psd.d14 import D14, D14N, D14MN
from artssat.scattering.psd.my05 import MY05
from artssat.scattering.psd.ab12 import AB12
from artssat.scattering.psd.binned import Binned
from artssat.scattering.psd.fixed_shape import FixedShape
|
StarcoderdataPython
|
5142008
|
import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="madlib_generator",
version="0.1.1",
author="Adrian-at-CrimsonAuzre",
author_email="<EMAIL>",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Adrian-at-CrimsonAuzre/madlib_generator",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
include_package_data=True,
package_data={'madlib': ['data/*.json.gz']},
)
|
StarcoderdataPython
|
5073707
|
from keras.layers import Embedding
# The Embedding layer takes at least two arguments:
# the number of possible tokens, here 1000 (1 + maximum word index),
# and the dimensionality of the embeddings, here 64.
embedding_layer = Embedding(1000, 64)
from keras.datasets import imdb
from keras import preprocessing
# Number of words to consider as features
max_features = 10000
# Cut texts after this number of words
# (among top max_features most common words)
maxlen = 20
# Load the data as lists of integers.
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# This turns our lists of integers
# into a 2D integer tensor of shape `(samples, maxlen)`
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)
from keras.models import Sequential
from keras.layers import Flatten, Dense
model = Sequential()
# We specify the maximum input length to our Embedding layer
# so we can later flatten the embedded inputs
model.add(Embedding(10000, 8, input_length=maxlen))
# After the Embedding layer,
# our activations have shape `(samples, maxlen, 8)`.
# We flatten the 3D tensor of embeddings
# into a 2D tensor of shape `(samples, maxlen * 8)`
model.add(Flatten())
# We add the classifier on top
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train,
epochs=10,
batch_size=32,
validation_split=0.2)
import os
imdb_dir = '/home/yangsen/workspace/ai_algorithm/data/aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 100 # We will cut reviews after 100 words
training_samples = 200 # We will be training on 200 samples
validation_samples = 10000 # We will be validating on 10000 samples
max_words = 10000 # We will only consider the top 10,000 words in the dataset
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(labels)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# Split the data into a training set and a validation set
# But first, shuffle the data, since we started from data
# where sample are ordered (all negative first, then all positive).
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples: training_samples + validation_samples]
y_val = labels[training_samples: training_samples + validation_samples]
|
StarcoderdataPython
|
3373759
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mlrun
from mlrun.utils import parse_versioned_object_uri
from ..config import config
feature_separator = "."
expected_message = f"in the form feature-set{feature_separator}feature[ as alias]"
def parse_feature_string(feature):
"""parse feature string into feature set name, feature name, alias"""
# expected format: <feature-set>.<name|*>[ as alias]
if feature_separator not in feature:
raise mlrun.errors.MLRunInvalidArgumentError(
f"feature {feature} must be {expected_message}"
)
splitted = feature.split(feature_separator)
if len(splitted) > 2:
raise mlrun.errors.MLRunInvalidArgumentError(
f"feature {feature} must be {expected_message}, cannot have more than one '.'"
)
feature_set = splitted[0]
feature_name = splitted[1]
splitted = feature_name.split(" as ")
if len(splitted) > 1:
return feature_set.strip(), splitted[0].strip(), splitted[1].strip()
return feature_set.strip(), feature_name.strip(), None
def get_feature_set_by_uri(uri, project=None):
"""get feature set object from db by uri"""
db = mlrun.get_run_db()
default_project = project or config.default_project
project, name, tag, uid = parse_versioned_object_uri(uri, default_project)
return db.get_feature_set(name, project, tag, uid)
def get_feature_vector_by_uri(uri):
"""get feature vector object from db by uri"""
db = mlrun.get_run_db()
project, name, tag, uid = parse_versioned_object_uri(uri, config.default_project)
return db.get_feature_vector(name, project, tag, uid)
|
StarcoderdataPython
|
396212
|
<gh_stars>1-10
from django.shortcuts import render, get_object_or_404 as G404
from .models import (
PageSkin as S,
PageNames as P,
AboutPageNames,
RotatorEditorPageNames,
)
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rekruter.models import (
RotationPlan,
RotationStep,
RotationSubStep,
)
from rotator.models import (
Crop,
CropFamily,
CropDataSource as CDS,
CropDataFamilySource as FDS,
CropDataTagSource as TDS,
CropTag,
CropInteraction,
CropBookString,
)
from crop_rotator.settings import LANGUAGES as L
from core.classes import (
PageElement as pe,
PageLoad,
# count_sources_pages,
)
from core.snippets import (
flare,
check_slaves,
slice_list_3,
summarize_plans,
list_crops_to,
none_ify,
remove_repeating,
)
from operator import attrgetter
from random import shuffle
from django.views import View
import copy
# Widok strony domowej.
def home(request):
pe_rp_published = RotationPlan.objects.filter(published=True)
pe_rp_shuffled = list(pe_rp_published)
shuffle(pe_rp_shuffled) # Losuje z widocznych na głównej.
pe_rp_shuffled = pe_rp_shuffled[:4]
plans_list = summarize_plans(pe_rp_shuffled, RotationStep, RotationSubStep)
context = {
"rotation_plans": plans_list,
}
pl = PageLoad(P, L)
context_lazy = pl.lazy_context(skins=S, context=context)
template = "strona/home.html"
return render(request, template, context_lazy)
# Widok "O programie"
def about(request):
pe_apn = pe(AboutPageNames).baseattrs
# policz rodziny niebędące "slaves"
crf = CropFamily.objects.filter(is_family_slave=False)
num_families = len(crf)
# policz wszystkie rośliny uprawne
pe_c = pe(Crop).allelements
num_crops = len(pe_c)
# policz wszystkie kategorie
pe_ctag = pe(CropTag).allelements
num_categories = len(pe_ctag)
# policz wszystkie interakcje
pe_interact = pe(CropInteraction).allelements
num_interactions = len(pe_interact)
# policz wszystkie źródła
pe_sources = pe(CropBookString).allelements
num_sources = len(pe_sources)
context = {
"about_us": pe_apn,
"num_families": num_families,
"num_crops": num_crops,
"num_categories": num_categories,
"num_interactions": num_interactions,
"num_sources": num_sources,
}
pl = PageLoad(P, L)
context_lazy = pl.lazy_context(skins=S, context=context)
template = "strona/about.html"
return render(request, template, context_lazy)
# Widok "O źródłach"
def about_sources(request):
pe_sources = pe(CropBookString).allelements
# count_sources_pages(CDS)
context = {
"sources": pe_sources,
}
pl = PageLoad(P, L)
context_lazy = pl.lazy_context(skins=S, context=context)
template = "strona/about_sources.html"
return render(request, template, context_lazy)
# Widok strony "O nawozach"
def fertilize(request):
pe_apn = pe(AboutPageNames).baseattrs
context = {
"about_us": pe_apn,
}
pl = PageLoad(P, L)
context_lazy = pl.lazy_context(skins=S, context=context)
template = "strona/about.html"
return render(request, template, context_lazy)
# Spis wszystkich rodzin, bez "nibyrodzin" (typu owies u wiechlinowatych).
class AllPlantFamilies(View):
crf = CropFamily.objects.filter(is_family_slave=False)
redirect_link = "family"
template = "strona/all_plant_families.html"
def get(self, request, *args, **kwargs):
sl3 = slice_list_3(self.crf)
context = {
"redir": self.redirect_link,
"families": self.crf,
"ml1": sl3[0],
"ml2": sl3[1],
}
pl = PageLoad(P, L)
context_lazy = pl.lazy_context(skins=S, context=context)
return render(request, self.template, context_lazy)
# Spis wszystkich tagów.
class AllTags(AllPlantFamilies):
redirect_link = "tag"
crf = CropTag.objects.all()
# Spis wszystkich roślin.
class AllCrops(AllPlantFamilies):
redirect_link = "crop"
crf = Crop.objects.all()
# Bazowy widok strony podglądu interakcji. Domyślnie wyświetla "Crop"
# TODO: Przywrócić do stanu, żeby było czytelne.
@method_decorator(cache_page(1), name='dispatch')
class InteractionPage(View):
is_family = False
is_tag = False
base_item = Crop
template = "strona/crop.html"
def get(self, request, crop_id, *args, **kwargs):
myitem = pe(self.base_item)
house = []
crop_family_from = []
crop_family_to = []
crop_tags_from = []
crop_tags_to = []
crop_from = []
crop_to = []
crop_to_new = []
crop_family_to_new = []
crop_tags_from_new = []
crop_tags_to_new = []
# Crop
if not self.is_family and not self.is_tag:
pe_c_id = myitem.by_id(G404=G404, id=crop_id)
c_family = pe_c_id.family
family_id = c_family.id
crop_id = pe_c_id.id
crop_from = none_ify(
pe_c_id.crop_relationships.all().filter(
is_server_generated=False).exclude(interaction_sign=0)
)
crop_to_c = Crop.objects.filter(
crop_relationships__about_crop=crop_id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
family_to_c = CropFamily.objects.filter(
crop_relationships__about_crop=crop_id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
tag_to_c = CropTag.objects.filter(
crop_relationships__about_crop=crop_id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
crop_to = list_crops_to(
pe_c_id, crop_to_c, family_to_c, tag_to_c, "crop")
remove_repeating(crop_to_new, crop_to)
pe_cds = CDS.objects.filter(from_crop=crop_id)
master_family = pe_c_id.family.name
if pe_c_id.family.is_family_slave and not pe_c_id.is_fertilizer and not pe_c_id.is_crop_mix:
master_family = pe_c_id.family.family_master.name
# Family
if self.is_family:
family_id = crop_id
c_family = myitem.by_id(G404=G404, id=family_id)
c_family_sub = False
# Crop, Family
if not self.is_tag:
crop_family_from = none_ify(c_family.crop_relationships.all())
crop_to_f = Crop.objects.filter(
crop_relationships__about_family=family_id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
family_to_f = CropFamily.objects.filter(
crop_relationships__about_family=family_id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
tag_to_f = CropTag.objects.filter(
crop_relationships__about_family=family_id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
crop_family_to = list_crops_to(
c_family, crop_to_f, family_to_f, tag_to_f, "family")
remove_repeating(crop_family_to_new, crop_family_to)
# Crop, Tag
if self.is_family:
if c_family.is_family_slave:
sub_id = c_family.family_master.id
c_family_sub = myitem.by_id(G404=G404, id=sub_id)
family_slav_list = check_slaves(
c_family,
c_family_sub,
c_family.is_family_slave,
)
pe_cds = FDS.objects.filter(from_family=family_id)
pe_c_id = c_family
for item in family_slav_list:
pe_c_all = Crop.objects.filter(family=item.id)
for crop_object in pe_c_all:
house.append(crop_object)
master_family = family_slav_list[0]
# Tag
if self.is_tag:
tag = myitem.by_id(G404=G404, id=crop_id)
for relationship in tag.crop_relationships.all():
crop_tags_from.append((tag, relationship))
remove_repeating(crop_tags_from_new, crop_tags_from)
crop_to_t = Crop.objects.filter(
crop_relationships__about_tag=tag.id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
family_to_t = CropFamily.objects.filter(
crop_relationships__about_tag=tag.id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
tag_to_t = CropTag.objects.filter(
crop_relationships__about_tag=tag.id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
crop_tags_to_0 = list_crops_to(
tag, crop_to_t, family_to_t, tag_to_t, "tag")
for item in crop_tags_to_0:
crop_tags_to.append(item)
remove_repeating(crop_tags_to_new, crop_tags_to)
pe_cds = TDS.objects.filter(from_tag=crop_id)
master_family = tag
pe_c_id = tag
family_slav_list = Crop.objects.filter(tags=tag.id)
for item in family_slav_list:
house.append(item)
# Crop
if not self.is_family and not self.is_tag:
for tag in pe_c_id.tags.all():
for relationship in tag.crop_relationships.all():
crop_tags_from.append((tag, relationship))
remove_repeating(crop_tags_from_new, crop_tags_from)
crop_to_t = Crop.objects.filter(
crop_relationships__about_tag=tag.id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
family_to_t = CropFamily.objects.filter(
crop_relationships__about_tag=tag.id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
tag_to_t = CropTag.objects.filter(
crop_relationships__about_tag=tag.id,
crop_relationships__is_server_generated=False,
).exclude(crop_relationships__interaction_sign=0)
crop_tags_to_0 = list_crops_to(
tag, crop_to_t, family_to_t, tag_to_t, "tag")
for item in crop_tags_to_0:
crop_tags_to.append(item)
remove_repeating(crop_tags_to_new, crop_tags_to)
house = sorted(house, key=attrgetter('name'))
sl3 = slice_list_3(house)
translatables = pe(RotatorEditorPageNames).baseattrs
context = {
"family": master_family,
"crop": pe_c_id,
"sources": pe_cds,
"translatables": translatables,
"crop_from": crop_from,
"crop_to": crop_to_new,
"house": house,
"ml1": sl3[0],
"ml2": sl3[1],
"crop_family_from": crop_family_from,
"crop_family_to": crop_family_to_new,
"crop_tags_from": crop_tags_from_new,
"crop_tags_to": crop_tags_to_new,
}
pl = PageLoad(P, L)
context_lazy = pl.lazy_context(skins=S, context=context)
return render(request, self.template, context_lazy)
# Widok podglądu interakcji dla danej rodziny.
@method_decorator(cache_page(360), name='dispatch')
class InteractionFamily(InteractionPage):
is_family = True
base_item = CropFamily
template = "strona/family.html"
# Widok podglądu interakcji dla danego taga.
@method_decorator(cache_page(360), name='dispatch')
class InteractionTag(InteractionPage):
is_tag = True
base_item = CropTag
template = "strona/family.html"
|
StarcoderdataPython
|
3543699
|
""" this script extracts a list of custom dimensions in a Google Analytics property
using the Management API and exports to CSV
"""
import argparse
import config
import csv
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
from six.moves import range
def get_service(api_name, api_version, scope, key_file_location, service_account_email):
credentials = ServiceAccountCredentials.from_p12_keyfile(service_account_email, key_file_location, scopes=scope)
http = credentials.authorize(httplib2.Http())
service = build(api_name, api_version, http=http)
return service
def main():
# Refer to the config.py settings file for credentials
service_account_email = config.apiSettings['service_account_email']
key_file_location = config.apiSettings['key_file_location']
print("Reading custom dimensions from property")
scope = ['https://www.googleapis.com/auth/analytics.readonly']
service = get_service('analytics', 'v3', scope, key_file_location, service_account_email)
print("Analyzing available accounts.")
properties = service.management().webproperties().list(accountId='~all').execute()
propertiesList = properties.get("items")
for property in propertiesList:
print ("Exporting:\t"+property["id"]+"\t"+property["name"])
csvname = "exports/"+property["id"] + ".csv"
pchunks = property["id"].split("-")
dimensions = service.management().customDimensions().list(
accountId=pchunks[1],
webPropertyId=property["id"],
).execute()
dimList = dimensions.get("items")
with open(csvname, 'w', newline='') as csvfile:
dimdump = csv.writer(csvfile, delimiter=",")
dimdump.writerow(["Index","Scope","Name","Active"])
for dimension in dimList:
dimdump.writerow([str(dimension["index"]),dimension["scope"],dimension["name"],str(dimension["active"])])
print ("\nDone.\n")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8029692
|
<reponame>object-oriented-human/competitive
print(list(input()).index('F')+1)
|
StarcoderdataPython
|
6620535
|
<filename>app/settings.py
import os
# Amount of seconds before a player can win, this functions as a buffer, so that nobody wins by "accident"
# Used by register_card() in office_game.py
GAME_START_TIME_BUFFER = int(os.environ.get('OG_GAME_START_TIME_BUFFER', 10))
# Amount of seconds before a new card registration times out
GAME_CARD_REGISTRATION_TIMEOUT = int(os.environ.get('OG_GAME_CARD_REGISTRATION_TIMEOUT', 60 * 60))
# Amount of seconds before another player has to register their card to start a new game
GAME_PLAYER_REGISTRATION_TIMEOUT = int(os.environ.get('OG_GAME_PLAYER_REGISTRATION_TIMEOUT', 30))
# Amount of seconds before a game session runs out (in the case when players forget to register a winner)
GAME_SESSION_TIME = int(os.environ.get('OG_GAME_SESSION_TIME', 15 * 60))
# Firebase details
FIREBASE_API_KEY = os.environ.get('OG_FIREBASE_API_KEY', None)
FIREBASE_DATABASE_URL = os.environ.get('OG_FIREBASE_DATABASE_URL', None)
FIREBASE_STORAGE_BUCKET = os.environ.get('OG_FIREBASE_STORAGE_BUCKET', None)
FIREBASE_AUTH_DOMAIN = os.environ.get('OG_FIREBASE_AUTH_DOMAIN', None)
FIREBASE_TYPE = os.environ.get('OG_FIREBASE_TYPE', 'service_account')
FIREBASE_PROJECT_ID = os.environ.get('OG_FIREBASE_PROJECT_ID', None)
FIREBASE_PRIVATE_KEY_ID = os.environ.get('OG_FIREBASE_PRIVATE_KEY_ID', None)
FIREBASE_PRIVATE_KEY = os.environ.get('OG_FIREBASE_PRIVATE_KEY', None)
FIREBASE_CLIENT_EMAIL = os.environ.get('OG_FIREBASE_CLIENT_EMAIL', None)
FIREBASE_CLIENT_ID = os.environ.get('OG_FIREBASE_CLIENT_ID', None)
FIREBASE_AUTH_URI = os.environ.get('OG_FIREBASE_AUTH_URI', 'https://accounts.google.com/o/oauth2/auth')
FIREBASE_TOKEN_URI = os.environ.get('OG_FIREBASE_TOKEN_URI', 'https://accounts.google.com/o/oauth2/token')
FIREBASE_AUTH_PROVIDER_X509_CERT_URL = os.environ.get(
'OG_FIREBASE_AUTH_PROVIDER_X509_CERT_URL',
'https://www.googleapis.com/oauth2/v1/certs'
)
FIREBASE_CLIENT_X509_CERT_URL = os.environ.get('OG_FIREBASE_CLIENT_X509_CERT_URL', None)
# Reader details
READER_VENDOR_ID = os.environ.get('OG_READER_VENDOR_ID', '0xffff')
READER_PRODUCT_ID = os.environ.get('OG_READER_PRODUCT_ID', '0x0035')
# Sentry details
SENTRY_DSN = os.environ.get('OG_SENTRY_DSN', None)
# Slack details
# Notify Slack regarding game events?
SLACK_MESSAGES_ENABLED = os.environ.get('OG_SLACK_MESSAGES_ENABLED', True)
SLACK_TOKEN = os.environ.get('OG_SLACK_TOKEN', None)
SLACK_DEV_CHANNEL = os.environ.get('OG_SLACK_DEV_CHANNEL', '#kontorspill_dev')
SLACK_CHANNEL = os.environ.get('OG_SLACK_CHANNEL', '#kontorspill')
SLACK_USERNAME = os.environ.get('OG_SLACK_USERNAME', 'Kontor Spill')
SLACK_AVATAR_URL = os.environ.get('OG_SLACK_AVATAR_URL', None)
SLACK_DEFAULT_USER_AVATAR_URL = os.environ.get('OG_SLACK_DEFAULT_USER_AVATAR_URL', 'https://capralifecycle.github.io/office-games-viewer/capra.png')
SLACK_SYNC_INTERVAL = os.environ.get('OG_SLACK_SYNC_INTERVAL', 3600)
|
StarcoderdataPython
|
6660586
|
"""Supervisr PowerDNS DB Router"""
class PowerDNSRouter:
"""
A router to control all database operations on models in the
PowerDNS application.
"""
# pylint: disable=unused-argument
def db_for_read(self, model, **hints):
"""Attempts to read auth models go to PowerDNS."""
if model._meta.app_label == 'supervisr_provider_nix_dns':
return 'powerdns'
return None
# pylint: disable=unused-argument
def db_for_write(self, model, **hints):
"""Attempts to write auth models go to PowerDNS."""
if model._meta.app_label == 'supervisr_provider_nix_dns':
return 'powerdns'
return None
# pylint: disable=unused-argument
def allow_relation(self, obj1, obj2, **hints):
"""Allow relations if a model in the auth app is involved."""
if obj1._meta.app_label == 'supervisr_provider_nix_dns' or \
obj2._meta.app_label == 'supervisr_provider_nix_dns':
return True
return None
# pylint: disable=unused-argument, invalid-name
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""Make sure the auth app only appears in the 'PowerDNS' database."""
if db == 'powerdns':
return app_label == 'supervisr_provider_nix_dns'
return None
|
StarcoderdataPython
|
11364402
|
<reponame>internap/redlock-fifo
# Copyright 2016 Internap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from time import sleep
import mock
import redlock
from redlock_fifo.fifo_redlock import FIFORedlock
from tests import test_extendable_redlock
from tests.testutils import FakeRedisCustom, get_servers_pool, TestTimer, ThreadCollection
class FIFORedlockTest(test_extendable_redlock.ExtendableRedlockTest):
@mock.patch('redis.StrictRedis', new=FakeRedisCustom)
def setUp(self):
self.redlock = FIFORedlock(get_servers_pool(active=1, inactive=0))
self.redlock_with_51_servers_up_49_down = FIFORedlock(get_servers_pool(active=51, inactive=49))
self.redlock_with_50_servers_up_50_down = FIFORedlock(get_servers_pool(active=50, inactive=50))
@mock.patch('redis.StrictRedis', new=FakeRedisCustom)
def test_call_order_orchestrated(self,
critical_section=lambda lock_source, lock: None,
default_ttl=100):
connector = FIFORedlock(get_servers_pool(active=1, inactive=0),
fifo_queue_length=3,
fifo_retry_count=10,
fifo_retry_delay=0,
retry_delay=.1)
test_timer = TestTimer()
shared_memory = []
def thread_function(name, lock_source):
lock = lock_source.lock('test_call_order_orchestrated', ttl=default_ttl)
self.assertTrue(lock)
shared_memory.append((name, test_timer.get_elapsed()))
critical_section(lock_source, lock)
thread_collection = ThreadCollection()
thread_collection.start(thread_function, 'Thread A', connector)
sleep(0.05)
thread_collection.start(thread_function, 'Thread B', connector)
sleep(0.051)
thread_collection.start(thread_function, 'Thread C', connector)
thread_collection.join()
actual_order = [entry[0] for entry in shared_memory]
actual_times = [entry[1] for entry in shared_memory]
self.assertEquals(['Thread A', 'Thread B', 'Thread C'], actual_order)
self.assertAlmostEqual(0, actual_times[0], delta=0.03)
self.assertAlmostEqual(0.15, actual_times[1], delta=0.03)
self.assertAlmostEqual(0.3, actual_times[2], delta=0.03)
def test_call_order_orchestrated_with_unlock(self):
def critical_section(lock_source, lock):
sleep(0.1)
lock_source.unlock(lock)
self.test_call_order_orchestrated(critical_section=critical_section, default_ttl=1000)
@mock.patch('redis.StrictRedis', new=FakeRedisCustom)
def test_locks_are_released_when_position0_could_not_be_reached(self):
connector = FIFORedlock([{'host': 'localhost', 'db': 'mytest'}],
fifo_retry_delay=0)
lock_A = connector.lock('pants', 10000)
self.assertIsInstance(lock_A, redlock.Lock)
lock_B = connector.lock('pants', 10000)
self.assertEqual(lock_B, False)
connector.unlock(lock_A)
for server in connector.servers:
self.assertEqual(server.keys(), [])
@mock.patch('redis.StrictRedis', new=FakeRedisCustom)
def test_ephemeral_locks_use_the_ephemeral_ttl_while_regular_locks_have_requested_ttl(self):
"""
pants
-----
A # A locks
A B # B locks, now second in queue
A B C # C locks, now third in queue
A B C D # D locks, now fourth in queue. D is retrying much faster than anyone else and with unlimited retries.
A B C D # After 1 second, the situation is still the same.
A C D # B dies unexpectedly, redis removes it due to short TTL
A C D # C and D advance one spot
C D # A unlocks
C D # Within 1 second, C becomes first place
D # C unlocks
D # Within 1 second, D becomes first place
# If D got first place, that means C hasn't correctly secured it's old position while trying to get a new one
"""
fifo_ephemeral_ttl_ms = 500
connector = FIFORedlock([{'host': 'localhost'}],
fifo_ephemeral_ttl_ms=fifo_ephemeral_ttl_ms)
locks = dict()
# A locks
locks['A'] = connector.lock('pants', 15000)
self.assertIsInstance(locks['A'], redlock.Lock)
for server in connector.servers:
self.assertAlmostEqual(server.pttl('pants'), 15000, 500)
# B locks, now second in queue
def get_lock_b(connector):
try:
locks['B'] = connector.lock('pants', 20000)
except:
pass
connector2 = FIFORedlock([{'host': 'localhost'}],
fifo_ephemeral_ttl_ms=fifo_ephemeral_ttl_ms)
thread_B = threading.Thread(target=get_lock_b, args=(connector2, ))
thread_B.start()
# C locks, now third in queue
def get_lock_c(connector):
locks['C'] = connector.lock('pants', 30000)
thread_C = threading.Thread(target=get_lock_c, args=(connector, ))
thread_C.start()
# D locks, now fourth in queue. D is retrying much faster than anyone else.
def get_lock_d(connector):
locks['D'] = connector.lock('pants', 25000)
connector3 = FIFORedlock([{'host': 'localhost'}],
fifo_retry_delay=0,
fifo_retry_count=2000,
fifo_ephemeral_ttl_ms=1001)
thread_D = threading.Thread(target=get_lock_d, args=(connector3, ))
thread_D.start()
# B dies unexpectedly, redis removes it due to short TTL
connector2.lock_instance = mock.Mock()
connector2.lock_instance.side_effect = Exception
# C and D advance one spot
# A unlocks
connector.unlock(locks['A'])
# Within 2 second, C becomes first place
sleep(2)
for server in connector.servers:
self.assertIn('C', locks)
self.assertEqual(server.get('pants'), locks['C'].key)
# C unlocks
connector.unlock(locks['C'])
# Within 2 second, D becomes first place
sleep(2)
for server in connector.servers:
self.assertIn('D', locks)
self.assertEqual(server.get('pants'), locks['D'].key)
thread_B.join()
thread_C.join()
thread_D.join()
|
StarcoderdataPython
|
6671463
|
import configparser
from mirage.tables.helpUtils import UUID, isHexadecimal, isPrintable
class Attribute:
def __init__(self, ATThandle=None, ATTtype=None, ATTvalue=None):
self.ATThandle = ATThandle
self.ATTvalue = ATTvalue
self.ATTtype = ATTtype
def __str__(self):
return '''
ATThandle -> {0}
ATTtype -> {1}
ATTvalue -> {2}
'''.format(self.ATThandle, self.ATTtype, self.ATTvalue)
def __eq__(self, other):
if type(other) is type(self):
return self.ATThandle == other.ATThandle and self.ATTvalue == other.ATTvalue and self.ATTtype == other.ATTtype
return False
class Service():
def __init__(self, beginHandle: int = -1, endHandle: int = -1, uuidValue: int = -1, serviceType: str = None):
self.beginHandle = beginHandle
self.endHandle = endHandle
self.uuidValue = UUID(data=uuidValue)
self.serviceType = serviceType
def __str__(self):
return '''
Begin Handle -> {0}
End Handle -> {1}
UUID Value -> {2}
ServiceType -> {3}
'''.format(self.beginHandle, self.endHandle, self.uuidValue, self.serviceType)
def __eq__(self, other):
if type(other) is type(self):
return self.endHandle == other.endHandle and self.uuidValue == other.uuidValue and self.serviceType == other.serviceType
return False
class Characteristic():
def __init__(self, declarationHandle: int = -1, uuid: int = -1, valueHandle: int = -1, value: hex = -1, permissions: list = None):
self.declarationHandle = declarationHandle
self.uuid = UUID(data=uuid)
self.valueHandle = valueHandle
self.value = value
self.permissions = permissions
def __str__(self):
return '''
Declaration Handle -> {0}
UUID -> {1}
Value Handle -> {2}
Value -> {3}
Permissions -> {4}
'''.format(self.declarationHandle, self.uuid, self.valueHandle, self.value, self.permissions)
def __eq__(self, other):
if type(other) is type(self):
return self.uuid == other.uuid and self.value == other.value and self.valueHandle == other.valueHandle
return False
class Descriptor():
def __init__(self, handle: int, uuid: int, value: hex):
self.handle = handle
self.uuid = UUID(data=uuid)
self.value: UUID = value
def __str__(self):
return '''
Handle -> {0}
UUID -> {1}
Value -> {2}
'''.format(self.handle, self.uuid, self.value)
def __eq__(self, other):
if type(other) is type(self):
return self.handle == other.handle and self.uuid == other.uuid and self.value == other.value
return False
|
StarcoderdataPython
|
11316965
|
<reponame>AndreAloise77/TccJogosTestes
import os
from datetime import datetime
from typing import Dict, List
# Import Tree
from xml.etree.ElementTree import ElementTree
# Import Services
import Services.AGatsService
import Services.ExtractProvDataService
import Services.GatsService
# Import Utils
import Utils.UtilitiesProvConstants
import Utils.UtilitiesFilePathConstants
import Utils.UtilitiesIO
# Import Models
from Models.Graph.AGats import AGats
from Models.Graph.Gats import Gats
from Models.Graph.Graph import Graph
from Models.Provenience.ProvEdge import ProvEdge
from Models.Provenience.ProvVertex import ProvVertex
# CONSTANTS
AGATS_SERVICE = Services.AGatsService
EXTRACT_PROV_DATA_SERVICE = Services.ExtractProvDataService
GATS_SERVICE = Services.GatsService
UTILITIES_CONSTANTS = Utils.UtilitiesProvConstants.UtilitiesProvConstants
UTILITIES_IO = Utils.UtilitiesIO
UTILITIES_FILE_PATH_CONSTANTS = Utils.UtilitiesFilePathConstants.UtilitiesFilePathConstants
AGATS_FILENAME: str = 'Agats'
GATS_FILENAME: str = 'Gats'
class DataManipulationService:
def __init__(self):
self.graph: Graph = Graph()
self.read_session_time: str = ''
# Change call for the desired functionality
def main(self):
self.__create_agats()
self.__import_agats()
def __create_agats(self):
self.__set_session_time()
loop: int = 1
session_list: List[str] = []
gats_to_agats: Gats = \
self.__create_gats(loop, UTILITIES_FILE_PATH_CONSTANTS.READ_PROV_FILES_DIRECTORY, session_list)
agats: AGats = AGats()
agats.graph_agats = gats_to_agats.graph_gats
agats.set_common_edges_on_creation(session_list)
agats.set_invalid_edges_from_file(UTILITIES_FILE_PATH_CONSTANTS.INVALID_EDGE_FILENAME)
should_paint: bool = True
self.__export_agats(agats, should_paint)
self.__ask_for_invalid_edges_to_user()
def __import_agats(self):
self.__set_session_time()
folder_name_to_read: str = \
UTILITIES_IO.get_dir_base_name(UTILITIES_FILE_PATH_CONSTANTS.READ_OUT_PUT_AGATS_DIRECTORY)
agats: AGats = AGats()
agats.folder_name = folder_name_to_read
path_structure: str = UTILITIES_FILE_PATH_CONSTANTS.FORMAT_FILE_STRUCTURE
graph: Graph = agats.import_agats_file(path_structure, AGATS_FILENAME)
self.graph = graph
loop: int = 1
gats_file_name: str = '{} {}'.format(GATS_FILENAME, loop)
loop += 1
gats_to_export: Gats = Gats()
gats_to_export.graph_gats = graph
self.__export_gats(gats_to_export, gats_file_name)
session_list: List[str] = [gats_file_name]
gats_to_agats: Gats() = \
self.__create_gats(loop, UTILITIES_FILE_PATH_CONSTANTS.READ_NEW_PROV_FILES_DIRECTORY, session_list)
agats.graph_agats = gats_to_agats.graph_gats
agats.set_common_edges_from_file_read(session_list)
should_paint: bool = True
self.__export_agats(agats, should_paint)
self.__ask_for_invalid_edges_to_user()
def __create_gats(self, loop: int, directory: str, session_list: List[str]) -> Gats:
list_prov_files: List[str] = self.__get_file_path_list(directory)
gats_to_agats: Gats = Gats()
for path in list_prov_files:
file_name: str = '{} {}'.format(GATS_FILENAME, loop)
loop += 1
self.__create_graph(self.graph, path, file_name)
graph_to_export: Gats = Gats()
self.__create_graph(graph_to_export, path, file_name)
gats_to_agats.graph_gats = self.graph
self.graph.empty_lists()
session_list.append(file_name)
gats_to_export: Gats = Gats()
gats_to_export.graph_gats = graph_to_export
self.__export_gats(gats_to_export, file_name)
return gats_to_agats
def __export_agats(self, agats: AGats, should_color_graph: bool):
AGATS_SERVICE.export_agats(self.read_session_time, agats,
UTILITIES_FILE_PATH_CONSTANTS.FORMAT_FILE_STRUCTURE,
AGATS_FILENAME, should_color_graph)
def __export_gats(self, gats: Gats, file_name: str):
GATS_SERVICE.export_gats(self.read_session_time, gats,
UTILITIES_FILE_PATH_CONSTANTS.FORMAT_FILE_STRUCTURE,
file_name)
def __set_session_time(self):
time: datetime = datetime.now()
date_format: str = '%Y-%m-%d_%H-%M-%S'
str_time: str = time.strftime(date_format)
self.read_session_time = str_time
@staticmethod
def __read_session(graph: Graph, path: str, file_name: str):
prov_service = EXTRACT_PROV_DATA_SERVICE
tree: ElementTree = prov_service.get_tree_from_filename(path)
vertex_dictionary: Dict[str, ProvVertex] = prov_service.get_tree_vertices_dictionary(tree)
edge_dictionary: Dict[str, ProvEdge] = prov_service.get_tree_edges_dictionary(tree, vertex_dictionary)
edge_dictionary_filtered = \
prov_service.filter_edge_dict_by_type_and_label(edge_dictionary, UTILITIES_CONSTANTS.ACTIVITY)
graph.add_edges_to_graph(edge_dictionary_filtered, file_name)
def __create_graph(self, graph: Graph, path: str, file_name: str):
self.__read_session(graph, path, file_name)
@staticmethod
def __get_file_path_list(directory: str) -> List[str]:
file_path_list: List[str] = UTILITIES_IO.get_fullname_from_all_files_in_dir(directory)
return file_path_list
@staticmethod
def __ask_for_invalid_edges_to_user():
has_invalid_edge: bool = True
filename = UTILITIES_FILE_PATH_CONSTANTS.READ_INVALID_EDGES_FILE_NAME
file_path_and_name = os.path.join(UTILITIES_FILE_PATH_CONSTANTS.READ_INVALID_EDGES_FILES_DIRECTORY, filename)
has_invalid_edge_message: str = "O modelo possui alguma aresta (edge) inválida não informada? (S/s ou N/n): "
has_invalid_node_edges_message: str = \
"Entre com os IDs das regiões (nodes) que possuem uma aresta (edge) inválida (Ex:01 -> 02):"
invalid_entry_message: str = "Por favor, entre com uma resposta válida (S/s ou N/n)"
with open(file_path_and_name, 'a') as file:
while has_invalid_edge:
invalid_edge_response = input(has_invalid_edge_message)
has_invalid: bool = (invalid_edge_response == 'S') or (invalid_edge_response == 's')
has_valid: bool = (invalid_edge_response == 'N') or (invalid_edge_response == 'n')
if has_valid:
has_invalid_edge = False
elif has_invalid:
invalid_edge = input(has_invalid_node_edges_message)
file.write(invalid_edge)
file.write('\n')
else:
print(invalid_entry_message)
# Method that show how many coded lines were made on the following projects
@staticmethod
def __show_lines_on_all_projects():
model_lines = EXTRACT_PROV_DATA_SERVICE.item_line_count(UTILITIES_FILE_PATH_CONSTANTS.MODELS_DIRECTORY)
interface_lines = EXTRACT_PROV_DATA_SERVICE.item_line_count(UTILITIES_FILE_PATH_CONSTANTS.INTERFACE_DIRECTORY)
services_lines = EXTRACT_PROV_DATA_SERVICE.item_line_count(UTILITIES_FILE_PATH_CONSTANTS.SERVICES_DIRECTORY)
utils_lines = EXTRACT_PROV_DATA_SERVICE.item_line_count(UTILITIES_FILE_PATH_CONSTANTS.UTILS_DIRECTORY)
resp = model_lines + interface_lines + services_lines + utils_lines
print('\nModels Total Lines: {}\nInterface Total Lines: {}\nServices Total Lines: {}\nUtils Total Lines: {}'
.format(model_lines, interface_lines, services_lines, utils_lines))
print('\nApplication Total Lines: {}'.format(resp))
|
StarcoderdataPython
|
4942374
|
<reponame>Lumonk/CNNs.PyTorch<gh_stars>1-10
from .transform import RandomLighting
from .lrscheduler import LRScheduler, LRSequential
from .sgd import SGD
from .label_smoothing import CrossEntropyLoss_LS
|
StarcoderdataPython
|
6661171
|
from more_itertools import ilen
from my.spotify import playlists, songs, Playlist, Song
def test_spotify():
items = list(playlists())
assert len(items) > 0
plist = items[0]
assert isinstance(plist, Playlist)
songs = plist.songs
assert len(songs) > 0
assert isinstance(songs[0], Song)
def test_songs():
assert ilen(songs()) > 1
|
StarcoderdataPython
|
4971322
|
from pylons import tmpl_context as c
from pylons import app_globals as g
from pylons.i18n import _
from r2.config import feature
from r2.controllers import add_controller
from r2.controllers.reddit_base import RedditController
from r2.lib.errors import errors
from r2.lib.require import require, RequirementException
from r2.lib.validator import (
json_validate,
validate,
validatedForm,
VBoolean,
VExistingUname,
VGold,
VJSON,
VModhash,
VUser,
)
from reddit_gold.models import SnoovatarsByAccount
from reddit_gold.pages import (
GoldInfoPage,
Snoovatar,
SnoovatarProfilePage,
)
from reddit_gold.validators import VSnooColor
@add_controller
class GoldController(RedditController):
def GET_about(self):
return GoldInfoPage(
_("gold"),
show_sidebar=False,
page_classes=["gold-page-ga-tracking"]
).render()
def GET_partners(self):
self.redirect("/gold/about", code=301)
@validate(
vuser=VExistingUname("username"),
)
def GET_snoovatar(self, vuser):
if not vuser or vuser._deleted or not vuser.gold:
self.abort404()
snoovatar = SnoovatarsByAccount.load(vuser, "snoo")
user_is_owner = c.user_is_loggedin and c.user == vuser
if not user_is_owner:
if not snoovatar or not snoovatar["public"]:
self.abort404()
return SnoovatarProfilePage(
user=vuser,
content=Snoovatar(
editable=user_is_owner,
snoovatar=snoovatar,
username=vuser.name,
),
).render()
@add_controller
class GoldApiController(RedditController):
@validatedForm(
VUser(),
VGold(),
VModhash(),
public=VBoolean("public"),
snoo_color=VSnooColor("snoo_color"),
unvalidated_components=VJSON("components"),
)
def POST_snoovatar(self, form, jquery, public, snoo_color, unvalidated_components):
if form.has_errors("components",
errors.NO_TEXT,
errors.TOO_LONG,
errors.BAD_STRING,
):
return
if form.has_errors("snoo_color", errors.BAD_CSS_COLOR):
return
try:
tailors = g.plugins["gold"].tailors_data
validated = {}
for tailor in tailors:
tailor_name = tailor["name"]
component = unvalidated_components.get(tailor_name)
# if the tailor requires a selection, ensure there is one
if not tailor["allow_clear"]:
require(component)
# ensure this dressing exists
dressing = component.get("dressingName")
if dressing:
for d in tailor["dressings"]:
if dressing == d["name"]:
break
else:
raise RequirementException
validated[tailor_name] = component
except RequirementException:
c.errors.add(errors.INVALID_SNOOVATAR, field="components")
form.has_errors("components", errors.INVALID_SNOOVATAR)
return
SnoovatarsByAccount.save(
user=c.user,
name="snoo",
public=public,
snoo_color=snoo_color,
components=validated,
)
|
StarcoderdataPython
|
4892153
|
<gh_stars>10-100
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from requre.online_replacing import record_requests_for_all_methods
from tests.integration.pagure.base import PagureTests
from ogr.exceptions import OperationNotSupported, PagureAPIException
@record_requests_for_all_methods()
class Forks(PagureTests):
def test_fork(self):
assert self.ogr_fork.exists()
assert self.ogr_fork.is_fork
fork_description = self.ogr_fork.get_description()
assert fork_description
a = self.ogr_fork.parent
assert a
is_forked = a.is_forked()
assert is_forked and isinstance(is_forked, bool)
fork = a.get_fork(create=False)
assert fork
assert fork.is_fork
urls = fork.get_git_urls()
assert "{username}" not in urls["ssh"]
def test_fork_in_str(self):
str_representation = str(self.ogr_fork)
assert 'username="' in str_representation
assert "is_fork=True" in str_representation
def test_nonexisting_fork(self):
ogr_project_non_existing_fork = self.service.get_project(
namespace=None,
repo="ogr-tests",
username="qwertzuiopasdfghjkl",
is_fork=True,
)
assert not ogr_project_non_existing_fork.exists()
with self.assertRaises(PagureAPIException) as ex:
ogr_project_non_existing_fork.get_description()
assert "Project not found" in ex.exception.pagure_error
def test_fork_property(self):
fork = self.ogr_project.get_fork()
assert fork
assert fork.get_description()
def test_create_fork(self):
"""
Remove your fork of ogr-tests https://pagure.io/fork/$USER/ogr-tests
before regeneration data.
But other tests needs to have already existed user fork.
So regenerate data for other tests, remove data file for this test
and regenerate it again.
"""
not_existing_fork = self.ogr_project.get_fork(create=False)
assert not not_existing_fork
assert not self.ogr_project.is_forked()
old_forks = self.ogr_project.service.user.get_forks()
self.ogr_project.fork_create()
assert self.ogr_project.get_fork().exists()
assert self.ogr_project.is_forked()
new_forks = self.ogr_project.service.user.get_forks()
assert len(old_forks) == len(new_forks) - 1
def test_create_fork_with_namespace(self):
with self.assertRaises(OperationNotSupported):
self.ogr_project.fork_create(namespace="some_random_namespace")
|
StarcoderdataPython
|
9725970
|
#----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from common.IR.graph import GraphClass, NodeClass
from tensorflow.core.framework.node_def_pb2 import NodeDef
from tensorflow.core.framework import attr_value_pb2
class TensorflowGraphNode(NodeClass):
def __init__(self, layer):
super(TensorflowGraphNode, self).__init__(layer)
self.in_nodes = list()
self.out_nodes = list()
self._scope = str()
@property
def scope(self):
return self._scope
@scope.setter
def scope(self, scope):
self._scope = scope
@property
def name(self):
return self.layer.name
@property
def type(self):
return self.layer.op
@property
def tf_layer(self):
return self.layer
def get_attr(self, name, default_value = None):
if name in self.layer.attr:
attr = self.layer.attr[name]
field = attr.WhichOneof('value')
val = getattr(attr, field) if field else default_value
if isinstance(val, attr_value_pb2.AttrValue.ListValue):
return list(val.ListFields()[0][1])
else:
return val.decode('utf-8') if isinstance(val, bytes) else val
else:
return default_value
class TensorflowGraph(GraphClass):
multi_tensor_type = [
"Slice",
"Split",
"Unpack"
]
def __init__(self, model):
# sanity check.
pass
super(TensorflowGraph, self).__init__(model)
self.model = model
def build(self):
for i, layer in enumerate(self.model.node):
self.layer_map[layer.name] = TensorflowGraphNode(layer)
self.layer_name_map[layer.name] = layer.name
for pred in layer.input:
if pred not in self.layer_map:
if not pred.split(':')[0] in self.layer_map: #test
new_node = NodeDef()
new_node.name = pred
new_node.op = "NoOp"
self.layer_map[pred] = TensorflowGraphNode(new_node)
self.layer_name_map[pred] = pred
self.tf_make_connection(pred, layer.name)
super(TensorflowGraph, self).build()
def tf_make_connection(self, src, dst):
if ':' not in src and self.get_node(src).type in self.multi_tensor_type:
src += ':0'
self._make_connection(src, dst)
src_node = self.get_node(src.split(':')[0])
dst_node = self.get_node(dst.split(':')[0])
if not src_node in self.layer_map[dst.split(':')[0]].in_nodes:
self.layer_map[dst.split(':')[0]].in_nodes.append(src_node)
if not dst_node in self.layer_map[src.split(':')[0]].out_nodes:
self.layer_map[src.split(':')[0]].out_nodes.append(dst_node)
|
StarcoderdataPython
|
167559
|
from django.urls import reverse
from django.test import TestCase
from stacks.models import Stack
from people.models import Person
from rest_framework.test import APIClient
from dashboards.models import Dashboard
from domain_mappings.models import DomainMapping, MappingType
from owf_groups.models import OwfGroupPeople
requests = APIClient()
class StacksApiTests(TestCase):
fixtures = ['resources/fixtures/default_data.json', ]
def tearDown(self):
requests.logout()
def setUp(self):
self.admin_user = Person.objects.get(pk=1)
self.regular_user = Person.objects.get(pk=2)
self.stack = Stack.create(self.regular_user, {
'name': 'test stack 1',
'description': 'test description 1'
})
def test_user_can_create_stack(self):
requests.login(email='<EMAIL>', password='password')
url = reverse('stacks-list')
create_stack_payload = {
'name': 'test stack 2',
'description': 'testing user can create a stack'
}
response = requests.post(url, create_stack_payload)
user_id = 2 # coming from the fixture that creates default users
self.assertEqual(response.status_code, 201)
self.assertTrue(response.data['id'])
self.assertTrue(response.data['default_group'])
self.assertEqual(response.data['name'], create_stack_payload['name'])
self.assertEqual(response.data['description'], create_stack_payload['description'])
self.assertTrue(response.data['stack_context'])
self.assertEqual(response.data['owner']['id'], user_id)
def test_owner_of_stack_can_share_stack(self):
regular_user = Person.objects.get(pk=2)
stack = Stack.create(regular_user, {
'name': 'test stack 1',
'description': 'test description 1'
})
requests.login(email='<EMAIL>', password='password')
url = reverse('stacks-share', args=(f'{stack.id}',))
response = requests.post(url)
self.assertEqual(response.status_code, 204)
def test_nonowner_of_stack_cannot_share_stack(self):
regular_user = Person.objects.get(pk=2)
stack = Stack.create(regular_user, {
'name': 'test stack 1',
'description': 'test description 1'
})
requests.login(email='<EMAIL>', password='password')
url = reverse('stacks-share', args=f'{stack.id}')
response = requests.post(url)
self.assertEqual(response.status_code, 403)
def test_user_can_restore_stack(self):
admin_user = Person.objects.get(pk=1)
regular_user = Person.objects.get(pk=2)
stack = Stack.create(admin_user, {
'name': 'test share stack',
'description': 'test description 1'
})
stack.default_group.add_user(regular_user)
regular_user.sync_dashboards()
requests.login(email='<EMAIL>', password='password')
url = reverse('stacks-restore', args=(f'{stack.id}',))
response = requests.post(url)
self.assertEqual(response.status_code, 200)
def test_admin_can_delete_stack(self):
dashboard_ids_for_stack = list(Dashboard.objects.filter(stack=self.stack).values_list("id", flat=True))
stack_default_group_id = self.stack.default_group.id
requests.login(email='<EMAIL>', password='password')
url = reverse('admin_stacks-detail', args=(f'{self.stack.id}',))
response = requests.delete(url)
self.assertEqual(response.status_code, 204)
# check that all dashboards associated with the stack are deleted
self.assertFalse(Dashboard.objects.filter(stack=self.stack).exists())
# check that all domain mappings for dashboards associated with the stack are deleted
self.assertFalse(DomainMapping.objects.filter(
src_id__in=dashboard_ids_for_stack,
src_type=MappingType.dashboard)
)
self.assertFalse(DomainMapping.objects.filter(
dest_id__in=dashboard_ids_for_stack,
dest_type=MappingType.dashboard)
)
# check that all domain mappings for widgets assigned to the stack are deleted
self.assertFalse(DomainMapping.objects.filter(src_id=stack_default_group_id, src_type=MappingType.group))
def test_nonadmin_can_delete_stack(self):
requests.login(email='<EMAIL>', password='password')
url = reverse('stacks-detail', args=(f'{self.stack.id}',))
reponse = requests.delete(url)
self.assertFalse(OwfGroupPeople.objects.filter(
group=self.stack.default_group,
person=self.regular_user).exists()
)
|
StarcoderdataPython
|
1685523
|
import pygame as pg
from pygame.math import Vector2
from time import time
from math import sin, cos, pi, sqrt
from random import random
from enum import Enum, auto
from app import config
from app.utils.functions import distance, sign, collide_rect
from app.game.sprite import VectoredSprite
# Directions
RIGHT = 1
LEFT = -1
UP = -1
DOWN = 1
class CollideDirection(Enum):
TOP = auto()
BOTTOM = auto()
LEFT = auto()
RIGHT = auto()
CENTER = auto()
# Coordinates
X = 0
Y = 1
class MaterialObject(VectoredSprite):
"""
Basic class of material object sprite.
Moves, affected by gravity,
stops when falls on an edge or on a platform.
"""
def __init__(self,
game: "Game object",
pos: Vector2,
size: Vector2,
gravity: int,
*groups: list[pg.sprite.Group]):
"""
Initialize sprite
"""
super().__init__(pos, size, game.material_objects, *groups)
# Save game object
self.game: "Game object" = game
# Initialize object's speed
self.speed: Vector2 = Vector2(0, 0)
# Track collide direction
self.collide_direction: CollideDirection = None
# Save gravity value
self.gravity: int = gravity
# Initialize delta-time mechanizm
self.last_tick: float = time()
self.dt = 0
# Initialize some fields used by child classes
self.on_edge: bool = False
def update_dt(self):
"""
Update delta-time to apply tick
"""
now: float = time()
self.dt: float = now - self.last_tick
self.last_tick = now
def update(self):
"""
Update sprite
"""
super().update()
self.update_dt()
# Initialize variables to look if X or Y delta can be applied
x_can_move: bool = True
y_can_move: bool = True
has_collision = False
# Apply speed and gravity
new_pos: Vector2 = Vector2(self.pos)
new_speed: Vector2 = Vector2(self.speed)
new_pos += self.speed * self.dt
new_speed.y += self.gravity * self.dt
new_pos.y += (self.gravity * self.dt ** 2) / 2
# Collide with edges
# Floor
if new_pos.y + self.size.y > config.GAME_SIZE.y:
new_pos.y = config.GAME_SIZE.y - self.size.y
self.on_edge = True
y_can_move = False
has_collision = True
# Ceil
if new_pos.y < 0:
new_pos.y = 0
self.on_edge = True
y_can_move = False
has_collision = True
# Right
if new_pos.x + self.size.x > config.GAME_SIZE.x:
new_pos.x = config.GAME_SIZE.x - self.size.x
self.on_edge = True
x_can_move = False
has_collision = True
# Left
if new_pos.x < 0:
new_pos.x = 0
self.on_edge = True
x_can_move = False
has_collision = True
movement_direction: (int, int) = (
sign(new_pos.x - self.pos.x),
sign(new_pos.y - self.pos.y)
)
# Platforms
for platform in self.game.platforms:
new_x_collide, new_y_collide = collide_rect(platform.rect, pg.sprite.Rect(*new_pos, *self.size))
old_x_collide, old_y_collide = collide_rect(platform.rect, pg.sprite.Rect(*self.pos, *self.size))
if not (new_x_collide and new_y_collide):
continue
has_collision = True
if movement_direction[X] == LEFT:
if old_y_collide:
x_can_move = False
self.pos.x = platform.pos.x + platform.size.x
if movement_direction[X] == RIGHT:
if old_y_collide:
x_can_move = False
self.pos.x = platform.pos.x - self.size.x
if movement_direction[Y] == DOWN:
if old_x_collide:
y_can_move = False
self.pos.y = platform.pos.y - self.size.y
if movement_direction[Y] == UP:
if old_x_collide:
y_can_move = False
self.pos.y = platform.pos.y + platform.size.y
self.on_collide()
# Apply changes
if not x_can_move:
self.speed.x = 0
else:
self.speed.x = new_speed.x
self.pos.x = new_pos.x
if not y_can_move:
self.speed.y = 0
else:
self.speed.y = new_speed.y
self.pos.y = new_pos.y
if not y_can_move and movement_direction[Y] == DOWN:
self.collide_direction = CollideDirection.BOTTOM
elif not y_can_move and movement_direction[Y] == UP:
self.collide_direction = CollideDirection.TOP
elif not x_can_move and movement_direction[X] == RIGHT:
self.collide_direction = CollideDirection.RIGHT
elif not x_can_move and movement_direction[X] == LEFT:
self.collide_direction = CollideDirection.LEFT
elif not has_collision:
self.collide_direction = None
if self.on_land:
self.speed.x = 0
self.speed.y = 0
@property
def on_land(self):
return self.collide_direction == CollideDirection.BOTTOM
def on_collide(self):
pass
class Player(MaterialObject):
"""
Player sprite
"""
def __init__(self,
game: "Game object",
pos: Vector2,
color: (int, int, int),
shortcuts: dict[str, int]):
"""
Initialize Player sprite
"""
super().__init__(game,
pos,
config.PLAYER_SIZE,
config.PLAYER_GRAVITY,
game.players)
# Fill image with color and save color
self.image.fill(color)
self.color = color
# Set default direction
self.direction = RIGHT
# Save shortcuts
self.shortcuts = shortcuts
# Initialize shoot timeout mechanizm
self.shoot_from_time = 0
self.bombs = []
def update(self):
"""
Update Player sprite
"""
super().update()
self.handle_controls()
def handle_controls(self):
"""
Handle player controls
"""
pressed = pg.key.get_pressed()
if pressed[self.shortcuts['JUMP']] and self.on_land:
self.speed.y = config.PLAYER_JUMP * UP
if pressed[self.shortcuts['RIGHT']] and not pressed[self.shortcuts['LEFT']]:
self.speed.x = config.PLAYER_SPEED * RIGHT
if not pressed[self.shortcuts['RIGHT']] and pressed[self.shortcuts['LEFT']]:
self.speed.x = config.PLAYER_SPEED * LEFT
# Calculate direction
if self.speed.x > 0:
self.direction = RIGHT
elif self.speed.x < 0:
self.direction = LEFT
if pressed[self.shortcuts['SHOOT']]:
self.shoot()
if pressed[self.shortcuts['BOMB']]:
self.launch_rocket()
def shoot(self):
"""
Shoot action
"""
# Be affected by shoot timeout
if not self.shoot_from_time <= time():
return
self.shoot_from_time = time() + config.SHOOT_COOLDOWN
self.bombs.append(Bullet(
self.game,
self
))
def launch_rocket(self):
if not self.shoot_from_time <= time():
return
self.shoot_from_time = time() + config.SHOOT_COOLDOWN
self.bombs.append(Rocket(
self.game,
self
))
def kill(self):
super().kill()
while self.bombs:
self.bombs.pop().boom()
class Projectile(MaterialObject):
def __init__(self,
game: "Game object",
color: (int, int, int),
pos: Vector2,
size: Vector2,
speed: Vector2,
gravity: int,
is_killing: bool,
can_lie: bool,
shooter: Player):
super().__init__(game, pos, size, gravity)
# Fill surface with color
self.image.fill(color)
# Set speed
self.speed = speed
# Save whether to kill players
self.is_killing: bool = is_killing
# Save whether can lie
self.can_lie = can_lie
self.shooter = shooter
def update(self):
super().update()
if self.on_edge:
self.when_on_edge()
if self.on_land:
self.when_on_land()
for player in pg.sprite.spritecollide(self, self.game.players, False):
self.on_collide_player(player)
def when_on_edge(self):
if not self.can_lie:
self.kill()
def when_on_land(self):
if not self.can_lie:
self.kill()
def on_collide(self):
if not self.can_lie:
self.kill()
def on_collide_player(self, player: Player):
if self.is_killing and player != self.shooter:
player.kill()
class Bomb(Projectile):
def boom(self):
super().kill()
for i in range(config.N_PARTICLES):
FireParticle(self.game,
self.pos + (
Vector2(0, self.size.y)
if self.collide_direction == CollideDirection.TOP else
Vector2(0, -self.size.y)
if self.collide_direction == CollideDirection.BOTTOM else
Vector2(-self.size.x, 0)
if self.collide_direction == CollideDirection.RIGHT else
Vector2(self.size.x, 0)
if self.collide_direction == CollideDirection.LEFT else
Vector2(0)
),
random() * pi
if self.collide_direction == CollideDirection.BOTTOM else
random() * pi + pi
if self.collide_direction == CollideDirection.TOP else
random() * pi + pi / 2
if self.collide_direction == CollideDirection.RIGHT else
random() * pi - pi / 2
if self.collide_direction == CollideDirection.LEFT else
random() * 2 * pi,
self.shooter
)
class Bullet(Bomb):
def __init__(self,
game: "Game object",
shooter: Player):
super().__init__(game,
tuple(map(lambda x: x * 0.6, list(shooter.color))),
shooter.rect.topleft - Vector2(config.BULLET_SIZE.x + 1, 0)
if shooter.direction == LEFT
else shooter.rect.topright + Vector2(1, 0),
config.BULLET_SIZE,
Vector2(config.BULLET_SPEED * shooter.direction, 0).rotate(config.SHOOT_ANGLE * -shooter.direction),
config.BULLET_GRAVITY,
True,
False,
shooter)
def kill(self):
self.boom()
class FireParticle(Projectile):
"""
Fire particle sprite
"""
def __init__(self,
game: "Game object",
pos: Vector2,
angle: int,
shooter: Player):
super().__init__(game,
config.PARTICLE_COLOR,
pos,
config.PARTICLE_SIZE,
Vector2(-config.PARTICLE_SPEED, 0).rotate_rad(angle),
config.PARTICLE_GRAVITY,
True,
False,
shooter)
class Rocket(Bomb):
def __init__(self,
game: "Game object",
shooter: Player):
super().__init__(game,
config.ROCKET_COLOR,
Vector2(shooter.pos),
config.ROCKET_SIZE,
Vector2(config.ROCKET_SPEED, 0),
0,
False,
False,
shooter)
self.shooter: Player = shooter
self.speed.rotate_ip(self.speed.angle_to(self.get_target_direction()))
def get_target_direction(self):
min_distance: Vector2 = None
nearest_player: Player = None
for player in self.game.players:
if player == self.shooter:
continue
if nearest_player == None \
or min_distance.length() > distance(self, player).length():
min_distance = distance(player, self)
nearest_player = player
return min_distance
def update(self):
super().update()
angle: float = self.get_target_direction().angle_to(Vector2(1, 0)) \
- self.speed.angle_to(Vector2(1, 0))
if 180 >= angle % 360 > 0:
self.image.fill((255, 0, 0))
self.speed.rotate_ip(-config.ROCKET_ROTATION * self.dt)
elif 180 < angle % 360:
self.image.fill((0, 0, 255))
self.speed.rotate_ip(config.ROCKET_ROTATION * self.dt)
def on_collide_player(self, player: Player):
if player != self.shooter:
self.kill()
def kill(self):
self.boom()
|
StarcoderdataPython
|
1796471
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - cologler <<EMAIL>>
# ----------
#
# ----------
'''
NimbleText template:
@property
def <% $0.toWords().replace(/ /gm, '_').toLowerCase() %>(self):
return self._get('$0')
'''
class _BaseModel:
def __init__(self, data: dict):
if not isinstance(data, dict):
raise TypeError
self._data = data
def _get(self, key): return self._data.get(key)
|
StarcoderdataPython
|
3515736
|
"""This file is part of the trivago/rebase library.
# Copyright (c) 2018 trivago N.V.
# License: Apache 2.0
# Source: https://github.com/trivago/rebase
# Version: 1.2.2
# Python Version: 3.6
# Author: <NAME> <<EMAIL>>
"""
import uuid
from typing import Any, Dict, List
import logging
import simplejson as json
class Object(object):
def __dir__(self):
"""Return a list of attributes for the class.
Returns: a list of attributes for the class.
"""
return [*self._attributes.keys(), *['attributes', 'classname']]
def __init__(self, **kwargs):
"""Initialize the object with the given attributes.
If this method is overridden in the child, the parent implementation
should be called in order to properly assign attributes.
By default attributes are assigned dynamically to the object. But to
have more control in child classes (such as remapping), it is
recommended to override the `Object.properties()` method.
Args:
**kwargs: Arbitrary keyword argument which by default becomes an
attribute of the object unless specified otherwise in
`Object.properties()`.
```python
print [i*2 for i in range(1,10)]
```
"""
self._raw_attributes = kwargs
self._attributes = {}
self._id = None
self._init_attributes()
def __getattr__(self, attr_name: str) -> Any:
"""Return the value of an object attribute.
Do not call this method directly as it is a python magic function that
will be implicitly called when executing `value = object.attribute` or
`value = getattr(object, 'attribute')`
Args:
attr_name (string): the attribute name
Returns:
Any: the value of the attribute
Raises:
AttributeError: If attribute is undefinied in `Object.properties()`
"""
if attr_name in self._properties():
return super().__getattr__(attr_name)
elif attr_name not in self.properties():
raise AttributeError(
f'Getting unknown property: `{self.classname}.{attr_name}`.')
return self._attributes.get(attr_name)
def __setattr__(self, attr_name: str, value: Any):
"""Set the value of an object attribute.
Do not call this method directly as it is a python magic function that
will be implicitly called when executing `object.attribute = value` or
`setattr(object, 'attribute', value)`
Args:
attr_name (string): the attribute name
value (Any): the attribute value
Raises:
AttributeError: If attribute is undefinied in `Object.properties()`
"""
if attr_name in self._properties():
super().__setattr__(attr_name, value)
elif attr_name not in self.properties():
raise AttributeError(
f'Setting unknown property: `{self.classname}.{attr_name}`.')
else:
attr = self.properties().get(attr_name)
if isinstance(attr, tuple) and value is not None:
k, v = attr
if type(value) != v:
raise AttributeError(
f'`Value for {self.classname}.{attr_name}` should be of type {v}; {type(value)} provided.')
self._attributes.update({attr_name: value})
def __str__(self) -> str:
"""Return a string representation of the object in json.
Returns: a string representation of the object in json format.
"""
return json.dumps(self._debug(), use_decimal=True)
def __repr__(self) -> str:
"""Return the representation of the object at creation.
Returns:
string: the representation with constructor arguments
"""
return '{classname}(**{args})'.format(
classname=self.classname,
args=self._raw_attributes
)
def _debug(self) -> Dict[str, Any]:
return {
'_id': self.get_id(),
'attributes': self.attributes
}
def _enforce_data_type(self, data: Any, data_type: type) -> Any:
try:
if data is not None:
if isinstance(data_type, type) and isinstance(data_type(), Object):
return data_type(**data)
elif data_type in (bool, str, int, float, complex, list, tuple, range, set, dict) or callable(data_type):
return data_type(data)
except TypeError:
return data
return data
def _init_attributes(self):
"""Perform the mapping of attributes based `Object.properties()`.
Returns:
void
"""
for k, v in self.properties().items():
if isinstance(v, str):
logging.debug('Key: %s is being parsed as `str` with value: %s', k, v)
self._attributes.setdefault(
k, self._get_attr_recurse(v, self._raw_attributes))
elif isinstance(v, tuple):
logging.debug('Key: %s is being parsed as `tuple` with value: %s', k, v)
attribute, data_type = v
data = None
if attribute and isinstance(attribute, str):
data = self._get_attr_recurse(
attribute, self._raw_attributes)
elif callable(attribute):
data = attribute()
self._attributes.setdefault(k,
self._enforce_data_type(
data, data_type))
elif isinstance(v, type) and isinstance(v(), Object):
logging.debug('Key: %s is being parsed as `rebase.core.Object` with value: %s', k, v)
self._attributes.setdefault(k, v(**self._raw_attributes))
elif callable(v):
logging.debug('Key: %s is being parsed as `callable` with value: %s', k, v)
self._attributes.setdefault(k, v())
elif k in self._raw_attributes:
logging.debug('Key: %s is being parsed as `raw_attributes` with value: %s', k, v)
self._attributes.setdefault(k, self._raw_attributes.get(k))
else:
logging.debug('Key: %s is being parsed as `raw_attributes` with value: %s', k, v)
self._attributes.setdefault(k, self._raw_attributes.get(k, v))
def _get_attr_recurse(self, attr, obj, idx=0):
if isinstance(obj, Object):
return self._get_attr_recurse(attr, obj.attributes, idx)
elif obj is None:
return None
attr_list = attr.split('.')
key = attr_list.pop(idx)
if key not in obj:
return None
if len(attr_list) == idx:
return obj.get(key)
else:
return self._get_attr_recurse(attr, obj.get(key), idx+1)
def _properties(self) -> List[str]:
return ['_id', '_attributes', '_raw_attributes']
@property
def attributes(self) -> Dict[str, Any]:
"""Return the attributes of the object based on `Object.properties()`.
Return:
dict: a dictionary of the attributes of the object
"""
return self.get(*self._attributes)
@property
def classname(self) -> str:
"""Return the qualified name of this class.
Returns:
string: the qualified name of this class
"""
return self.__class__.__name__
def get(self, *attrs) -> Dict[str, Any]:
"""Return a dict of the attribute names passed as arguments.
Args:
attrs (list): comma separated name of attributes for the object
Returns:
dict: the attributes of the object if set
"""
return {
k: v.attributes
if isinstance(v, Object) else [
x.attributes
if isinstance(x, Object) else x
for x in v
]
if isinstance(v, list) else {
x: y.attributes
if isinstance(y, Object) else y
for x, y in v.items()
}
if isinstance(v, dict) else {
x.get_id(): x.attributes
for x in v
}
if isinstance(v, set) else v
for k, v in self._attributes.items() if k in attrs
}
def get_id(self):
"""Generate and return the unique id of the object.
Returns:
string: the unique id generated by uuid
"""
if not self._id:
self._id = str(uuid.uuid4())
return self._id
def properties(self) -> Dict[str, Any]:
"""Return the mapping of properties passed to the constructor.
This method can be overridden if you want more customised _properties
and do advanced mapping of your attributes.
Returns:
dict: the mapped properties passed to constructor
"""
return self._raw_attributes.get('properties') or {
k: k for k, v in self._raw_attributes.items()
}
|
StarcoderdataPython
|
3535032
|
"""Sample module"""
import logging
def sample_func(say=True):
"""Sample func"""
logging.debug("Enter sample_func()")
if say:
logging.info("Sample func")
return True
if __name__ == "__main__":
sample_func()
|
StarcoderdataPython
|
1859073
|
<gh_stars>100-1000
"""Tasks are how scheduler identifies and executes your application."""
|
StarcoderdataPython
|
4959436
|
<reponame>ysd1123/BiliSpider
'''
内置 data() 函数,以字典形式返回参数 uuid 所对应的Bilibili 用户的数据。
video_view:视频浏览
article_view:文章浏览
like:总点赞
'''
import requests
def data(uuid):
def be_simple(orig_dict):
simple_dict = {'video_view': orig_dict['archive']['view'],
'article_view': orig_dict['article']['view'],
'likes': orig_dict['likes']}
return simple_dict
url = 'https://api.bilibili.com/x/space/upstat?mid='+str(uuid)
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}
Up_data = requests.get(url, headers=headers)
Up_data.encoding = 'utf-8'
dict_Updata = Up_data.json()
if dict_Updata['code'] == 0:
return be_simple(dict_Updata['data'])
else:
return 'Up 404'
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
11350240
|
<reponame>l33tdaima/l33tdaima<gh_stars>1-10
class Solution:
def checkValidStringV1(self, s: str) -> bool: # backtrack
WILDCARD = ["(", "", ")"]
def backtrack(wip, t):
for i in range(len(t)):
if t[i] == "*": # wildcard
for w in WILDCARD:
if backtrack(wip, w + t[i + 1 :]):
return True
return False
if t[i] == "(":
wip += 1
else: # ")"
wip -= 1
if wip < 0:
return False
return wip == 0
return backtrack(0, s)
def checkValidStringV2(self, s: str) -> bool:
lower, upper = 0, 0
for c in s:
if c == "(":
lower, upper = lower + 1, upper + 1
elif c == ")":
if lower > 0:
lower -= 1
upper -= 1
else:
if lower > 0:
lower -= 1
upper += 1
if upper < 0:
return False
return lower == 0
# TESTS
tests = [
["", True],
["*", True],
["(", False],
[")", False],
["()", True],
["(*)", True],
["(*))", True],
["(*()", True],
[")*()", False],
[
"(((((*(()((((*((**(((()()*)()()()*((((**)())*)*)))))))(())(()))())((*()()(((()((()*(())*(()**)()(())",
False,
],
]
for t in tests:
sol = Solution()
actual = sol.checkValidStringV2(t[0])
print("Is", t[0], "valid parenthesis string? ->", actual)
assert actual == t[1]
|
StarcoderdataPython
|
139059
|
import imp
import sys, pygame
from pygame.locals import * # Needed for Key Constants
pygame.init() # Initializes Pygame
# Declarations
size = width, height = 640, 480 # Defines Windows Size
speed = [0, 0] # X and Y Speeds
black = 0, 0, 0 # Represents black colour as RGB
# Sets Windows Size
screen = pygame.display.set_mode(size)
# Clock to cap FPS
clock = pygame.time.Clock()
# Creates ball sprite
ball = pygame.image.load("intro_ball.gif")
ballrect = ball.get_rect()
# Create Text Surface
font = pygame.font.SysFont(None, 24)
text = font.render('If you come for the king, you better not miss', True, (50, 50, 50))
# Music
pygame.mixer.music.load("music/DukeNukem.mp3")
pygame.mixer.music.set_volume(1)
pygame.mixer.music.play()
# Game Loop
while 1:
# Handle Events
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Handle Key Presses
keys = pygame.key.get_pressed()
if keys[K_UP] or keys[K_w]: # Up
speed = [0, -10]
ballrect = ballrect.move(speed)
if keys[K_DOWN] or keys[K_s]: # Down
speed = [0, 10]
ballrect = ballrect.move(speed)
if keys[K_LEFT] or keys[K_a]: # Left
speed = [-10, 0]
ballrect = ballrect.move(speed)
if keys[K_RIGHT] or keys[K_d]: # Right
speed = [10, 0]
ballrect = ballrect.move(speed)
# Draw to Screen
screen.fill(black)
screen.blit(text, (150, 230))
screen.blit(ball, ballrect)
# Updates Display
pygame.display.flip()
clock.tick(30) # Caps FPS at 30
|
StarcoderdataPython
|
8133953
|
<gh_stars>0
#!/usr/bin/env python
# $Id$
""" solutions"""
import puzzler
from puzzler.puzzles.pentahexes import PentahexesTriangle1
puzzler.run(PentahexesTriangle1)
|
StarcoderdataPython
|
242651
|
<reponame>hyzyla/directory.org.ua
"""Init tables
Revision ID: 8512fa6a4a52
Revises:
Create Date: 2022-01-22 17:42:33.225268
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"katottg",
sa.Column("code", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("level", sa.Integer(), nullable=False),
sa.Column("parent_id", sa.String(), nullable=True),
sa.Column(
"category",
sa.Enum(
"region",
"special",
"district",
"community",
"city",
"urban_village",
"village",
"small_village",
"municipal_district",
name="katottgcategory",
),
nullable=False,
),
sa.Column("children_count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["parent_id"],
["katottg.code"],
),
sa.PrimaryKeyConstraint("code"),
)
op.create_index(op.f("ix_katottg_category"), "katottg", ["category"], unique=False)
op.create_index(op.f("ix_katottg_code"), "katottg", ["code"], unique=False)
op.create_index(op.f("ix_katottg_level"), "katottg", ["level"], unique=False)
op.create_index(op.f("ix_katottg_name"), "katottg", ["name"], unique=False)
op.create_index(
op.f("ix_katottg_parent_id"), "katottg", ["parent_id"], unique=False
)
op.create_table(
"koatuu",
sa.Column("code", sa.String(), nullable=False),
sa.Column(
"category",
sa.Enum(
"village",
"small_village",
"urban_village",
"city",
"municipal_district",
name="koatuucategory",
),
nullable=True,
),
sa.Column("name", sa.String(), nullable=False),
sa.Column("katottg_code", sa.String(), nullable=True),
sa.Column("katottg_name", sa.String(), nullable=True),
sa.Column(
"katottg_category",
sa.Enum(
"region",
"special",
"district",
"community",
"city",
"urban_village",
"village",
"small_village",
"municipal_district",
name="katottgcategory",
),
nullable=True,
),
sa.PrimaryKeyConstraint("code"),
)
op.create_index(op.f("ix_koatuu_category"), "koatuu", ["category"], unique=False)
op.create_index(op.f("ix_koatuu_code"), "koatuu", ["code"], unique=False)
op.create_index(
op.f("ix_koatuu_katottg_category"), "koatuu", ["katottg_category"], unique=False
)
op.create_index(
op.f("ix_koatuu_katottg_code"), "koatuu", ["katottg_code"], unique=False
)
op.create_index(
op.f("ix_koatuu_katottg_name"), "koatuu", ["katottg_name"], unique=False
)
op.create_index(op.f("ix_koatuu_name"), "koatuu", ["name"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_koatuu_name"), table_name="koatuu")
op.drop_index(op.f("ix_koatuu_katottg_name"), table_name="koatuu")
op.drop_index(op.f("ix_koatuu_katottg_code"), table_name="koatuu")
op.drop_index(op.f("ix_koatuu_katottg_category"), table_name="koatuu")
op.drop_index(op.f("ix_koatuu_code"), table_name="koatuu")
op.drop_index(op.f("ix_koatuu_category"), table_name="koatuu")
op.drop_table("koatuu")
op.drop_index(op.f("ix_katottg_parent_id"), table_name="katottg")
op.drop_index(op.f("ix_katottg_name"), table_name="katottg")
op.drop_index(op.f("ix_katottg_level"), table_name="katottg")
op.drop_index(op.f("ix_katottg_code"), table_name="katottg")
op.drop_index(op.f("ix_katottg_category"), table_name="katottg")
op.drop_table("katottg")
# ### end Alembic commands ###
|
StarcoderdataPython
|
12824545
|
# encoding : UTF-8
from Engine.Display import debug3D_utils
from Engine.Collisions import AABBCollider
from Settings import *
import pygame as pg
from math import sqrt
from Engine.Actions import ActionObject
from Game.character_states import *
class Character(ActionObject):
def __init__(self, position=None, player_id=PlayerId.PLAYER_ID_1, max_velocity=None, jump_velocity=None):
ActionObject.__init__(self, player_id)
self._position = Vector3(position) if position is not None else Vector3()
self.previous_position = Vector3(self._position)
self.w = CHARACTER_W
self.h = CHARACTER_H
self.collider_relative_position = Vector3()
self.collider = None
self.is_colliding_ball = False
self.max_velocity = max_velocity if max_velocity is not None else RUN_SPEED # m/s
self.jump_velocity = jump_velocity if jump_velocity is not None else JUMP_VELOCITY # m/s
self.velocity = Vector3()
self.direction = Vector3()
self.team = Team()
self.state = Idling(self)
self.set_default_collider()
# sprite
self.rect = pg.Rect(0, 0, 0, 0)
self.rect_shadow = pg.Rect(0, 0, 0, 0)
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
self.collider.center = self._position + self.collider_relative_position
def draw_debug(self):
prev_rect = self.rect
prev_shadow_rect = self.rect_shadow
ground_pos = Vector3(self.position)
ground_pos.z = 0
self.rect_shadow = debug3D_utils.draw_horizontal_ellipse(ground_pos, self.w / 2)
self.rect = self.collider.draw_debug()
return [prev_shadow_rect.union(self.rect_shadow), prev_rect.union(self.rect)]
def move_rel(self, dxyz, free_displacement=FREE_DISPLACEMENT):
"""
Move object with a certain displacement.
:param pygame.Vector3 dxyz: displacement
:param bool free_displacement: True if displacement will be not limited on court
:return: None
"""
self.position += Vector3(dxyz)
if not free_displacement:
self.limit_displacement_on_court()
def move(self, direction, dt, free_displacement=FREE_DISPLACEMENT):
"""
Move object along a specified direction and amount of time.
The amplitude of displacement is dependant from :
- :var direction: magnitude
- :var self.max_velocity:
- :var dt:
:param pygame.Vector3 direction: direction of displacement
:param float dt: amount of time in ms. Usually, dt is the time between 2 frames
:param bool free_displacement: True if displacement will be not limited on court
:return: None
"""
dxyz = 0.001 * dt * direction * self.max_velocity
self.move_rel(dxyz, free_displacement)
def limit_displacement_on_court(self):
"""
Limit displacement on court. Called by move_rel method.
:return: None
"""
new_pos = self.position
# net
if self.team.id == TeamId.LEFT:
if self.collider.get_bound_coords(axis=1, m_to_p=True) + self.collider_relative_position.y > 0:
new_pos.y = -self.collider.size3.y / 2 - self.collider_relative_position.y
else:
if self.collider.get_bound_coords(axis=1, m_to_p=False) + self.collider_relative_position.y < 0:
new_pos.y = self.collider.size3.y / 2 - self.collider_relative_position.y
# out of court
f = 1.5
game_engine = Engine.game_engine.GameEngine.get_instance()
court = game_engine.court
if self.team.id == TeamId.LEFT:
new_pos.y = max(-f * court.w / 2, new_pos.y)
else:
new_pos.y = min(f * court.w / 2, new_pos.y)
new_pos.x = max(-f * court.h / 2, new_pos.x)
new_pos.x = min(f * court.h / 2, new_pos.x)
self.position = new_pos
def update_actions(self, action_events, **kwargs):
dt = kwargs["dt"] if "dt" in kwargs.keys() else 0
filtered_action_events = self.filter_action_events_by_player_id(action_events)
# state machine :
# run current state
self.state.run(filtered_action_events, dt=dt)
# eventually switch state
self.state = self.state.next(filtered_action_events, dt=dt)
def update_physics(self, dt, free_displacement=FREE_DISPLACEMENT):
self.previous_position = Vector3(self.position)
self.velocity += Vector3(0, 0, -0.001 * dt * G)
self.move_rel(0.001 * dt * self.velocity, free_displacement)
def get_hands_position(self):
"""
Return hands position of character in world coordinates.
:return: hands position
:rtype pygame.Vector3:
"""
dh = Vector3(0, 0, self.h)
dh.y = self.w / 2
if not self.team.id == TeamId.LEFT:
dh.y *= -1
return self.position + dh
def set_default_collider(self):
"""
Set default AABB Collider.
:return: None
"""
self.collider_relative_position = Vector3(0, 0, self.h / 2)
collider_size3 = Vector3(self.w, self.w, self.h)
self.collider = AABBCollider(self._position + self.collider_relative_position, collider_size3)
def set_diving_collider(self, direction):
"""
Set AABB Collider during diving.
:param pygame.Vector3 direction: direction of diving
:return: None
"""
dive_direction = Vector3(direction)
collider_size3 = Vector3()
collider_size3.x = max(self.w, self.h * abs(dive_direction.x))
collider_size3.y = max(self.w, self.h * abs(dive_direction.y))
collider_size3.z = self.w
collider_rel_center = Vector3(self.h / 2 * dive_direction.x,
self.h / 2 * dive_direction.y,
self.w / 2)
if dive_direction.x < 0:
collider_rel_center.x += self.w / 2
elif dive_direction.x > 0:
collider_rel_center.x -= self.w / 2
if dive_direction.y < 0:
collider_rel_center.y += self.w / 2
elif dive_direction.y > 0:
collider_rel_center.y -= self.w / 2
self.collider_relative_position = collider_rel_center
self.collider = AABBCollider(self._position + self.collider_relative_position, collider_size3)
def reset(self):
self.set_default_collider()
self.velocity = Vector3()
def is_state_type_of(self, state_type):
return self.state.__class__.type == state_type
def get_time_to_run_to(self, target_position, origin_pos=None):
"""
Give time that takes character by running from an origin to a target position.
Time is processed with displacements in 8 possible directions.
:param pygame.Vector3 target_position: target position
:param pygame.Vector3 origin_pos: origin position. Current character position is default value.
:return: given time in sec
:rtype: float
"""
if origin_pos is None:
origin_pos = self.position
# absolute delta position
delta_pos = target_position - origin_pos
delta_pos = Vector3([abs(delta_pos[i]) for i in (0, 1, 2)])
# diagonal travel
dist_on_each_axis = min(delta_pos.x, delta_pos.y)
diagonal_time = 1.4142 * dist_on_each_axis / self.max_velocity
# orthogonal travel
direct_time = (max(delta_pos.x, delta_pos.y) - dist_on_each_axis) / self.max_velocity
return diagonal_time + direct_time
def get_time_to_jump_to_height(self, h):
"""
Give time that takes the top of character reaches a specific height by jumping.
Given time is processed in ascending phase.
:param float h: height at which time is given
:return: given time is sec or None if there is no solution
:rtype: float or None
"""
# at t=t1, self.position.z(0) + self.h = h
# -G / 2 * t1**2 + self.jump_velocity * t1 + self.h - h = 0
a, b, c = -G/2, self.jump_velocity, self.h - h
delta = b**2 - 4 * a * c
if delta >= 0:
return (-b + sqrt(delta)) / (2 * a) #
else:
return None
def get_max_height_jump(self):
"""
Give max height reached by top of character by jumping.
:return: max height reached
:rtype: float
"""
a, b, c = -G/2, self.jump_velocity, self.h
delta = b**2 - 4 * a * c
return -delta / (4 * a)
class Team:
def __init__(self, team_id=TeamId.NONE, characters_list=None):
self.characters = characters_list
self.score = 0
self.id = team_id
self.set_team_to_characters()
def reset(self, **kwargs):
k = kwargs.keys()
self.characters = kwargs["characters"] if "characters" in k else None
self.set_team_to_characters()
self.score = kwargs["score"] if "score" in k else 0
self.id = kwargs["score"] if "score" in k else TeamId.NONE
def add_score(self, val=1):
self.score += val
def set_team_to_characters(self):
if self.characters is not None:
for ch in self.characters:
ch.team = self
|
StarcoderdataPython
|
9760166
|
from typing import Any
from boa3.builtin import public
from boa3.builtin.nativecontract.stdlib import StdLib
@public
def deserialize_arg(arg: bytes) -> Any:
return StdLib.deserialize(arg)
|
StarcoderdataPython
|
6570635
|
from distutils.core import setup
setup(name='point-to-define',
version='1.0',
packages=['point_to_define'],
)
|
StarcoderdataPython
|
1844142
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-11-13 15:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0007_auto_20161113_1346'),
]
operations = [
migrations.AlterUniqueTogether(
name='connexionrecord',
unique_together=set([]),
),
migrations.RemoveField(
model_name='connexionrecord',
name='comptoir',
),
migrations.DeleteModel(
name='ConnexionRecord',
),
]
|
StarcoderdataPython
|
4875313
|
# ===============================================================================
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits, on_trait_change, Str, Int, Float, Button
# from traitsui.api import View, Item, Group, HGroup, VGroup
# ============= standard library imports ========================
# ============= local library imports ==========================
# from agilent_gp_actuator import AgilentGPActuator
# from pychron.hardware.arduino.arduino_gp_actuator import ArduinoGPActuator
# from argus_gp_actuator import ArgusGPActuator
from __future__ import absolute_import
import time
from pychron.hardware.core.abstract_device import AbstractDevice
PACKAGES = dict(AgilentGPActuator='pychron.hardware.agilent.agilent_gp_actuator',
ArduinoGPActuator='pychron.hardware.arduino.arduino_gp_actuator',
QtegraGPActuator='pychron.hardware.actuators.qtegra_gp_actuator',
PychronGPActuator='pychron.hardware.actuators.pychron_gp_actuator',
NGXGPActuator='pychron.hardware.actuators.ngx_gp_actuator',
NMGRLFurnaceActuator='pychron.hardware.actuators.nmgrl_furnace_actuator',
DummyGPActuator='pychron.hardware.actuators.dummy_gp_actuator',
RPiGPIO='pychron.hardware.rpi_gpio')
class Actuator(AbstractDevice):
"""
"""
_type = None
def load_additional_args(self, config):
"""
"""
# self._cdevice=None
# if config.has_option('General','subsystem'):
# # if a subsystem is specified than the physical actuator is part of a larger
# # subsystem. ex One arduino can have a actuator subsystem and a data logging system
# #if a subsystem is specified dont want to create our on instance of a GPActuator
# pass
klass = name = self.config_get(config, 'General', 'type')
if 'qtegra' in klass.lower():
klass = 'QtegraGPActuator'
self._type = klass
if klass is not None:
if 'subsystem' in klass:
pass
else:
factory = self.get_factory(PACKAGES[klass], klass)
self.debug('constructing cdevice: name={}, klass={}'.format(name, klass))
self._cdevice = factory(name=name,
application=self.application,
configuration_dir_name=self.configuration_dir_name)
return True
def open_channel(self, *args, **kw):
"""
"""
if self._cdevice is not None:
r = self._cdevice.open_channel(*args, **kw)
if self.simulation:
time.sleep(0.005)
return r
else:
return True
def close_channel(self, *args, **kw):
"""
"""
if self._cdevice is not None:
r = self._cdevice.close_channel(*args, **kw)
if self.simulation:
time.sleep(0.005)
return r
else:
return True
def get_channel_state(self, *args, **kw):
"""
"""
if self._cdevice is not None:
r = self._cdevice.get_channel_state(*args, **kw)
if self.simulation:
time.sleep(0.005)
return r
# ============= EOF ====================================
|
StarcoderdataPython
|
9777722
|
<filename>Exercicio_em_python/Dados_pessoais.py
n = int(input("Quantas pessoas serão digitadas? "))
altura = [0 for x in range(n)]
genero = [0 for x in range(n)]
for i in range(n):
altura[i] = float(input(f"Altura da {i+1}a pessoa: "))
genero[i] = str(input(f"Genero da {i+1}a pessoa: "))
menor = altura[0]
maior = altura[0]
for i in range(n):
if menor > altura[i]:
menor = altura[i]
else:
if maior < altura[i]:
maior = altura[i]
print(f"Menor altura = {menor:.2f}")
print(f"Maior altura = {maior:.2f}")
mulher = 0
homem = 0
for i in range(n):
if genero[i] == 'f':
mulher = mulher + altura[i]
else:
homem = homem + 1
media = mulher / n
print(f"Media das alturas das mulheres = {media:.2f}")
print(f"Numero de homens = {homem:}")
|
StarcoderdataPython
|
8193258
|
import gym
import yaml
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader
from src.atari_archive.utils.data import EnvDataset, Summary
from src.atari_archive.utils.networks import ConvEncoder
from src.atari_archive.utils.preprocess import preprocess_state
from src.atari_archive.agents import OfflineDQNAgent
def main():
with open('config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
train(cfg)
def train(cfg: dict):
print('Loading environment {}.'.format(cfg['ATARI_ENV']))
env = gym.make(cfg['ATARI_ENV'])
env.reset()
observation_space = env.observation_space.shape
action_space = 3
action_map = {0: 0, 1: 2, 2: 3}
state = torch.zeros((1, 16))
print('Creating Agent.')
agent = OfflineDQNAgent(observation_space, action_space)
summary = Summary(cfg['SUMMARY_PATH'], agent.name)
agent.print_model()
agent.add_summary_writer(summary)
print('Initializing Dataloader.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Utilizing device {}'.format(device))
training_data = EnvDataset(cfg['TRAIN_DATA_PATH'])
data_loader = DataLoader(dataset=training_data,
batch_size=cfg['BATCH_SIZE'],
shuffle=True,
num_workers=4,
pin_memory=True)
print('Initializing Encoder.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = ConvEncoder()
encoder.load_state_dict(torch.load(cfg['AUTO_SAVE_PATH'] + '/encoder.pt',
map_location=device))
encoder.to(device)
encoder.eval()
print('Start training with {} epochs'.format(cfg['EPOCHS']))
for e in range(1, cfg['EPOCHS'] + 1):
for i_batch, sample_batched in enumerate(tqdm(data_loader)):
agent.learn(sample_batched)
summary.adv_step()
rewards = []
mean_reward = []
counter = 0
while counter < cfg['EVAL_EPISODES']:
action = agent.act(state)
if cfg['EVAL_RENDER']:
env.render()
state, reward, done, _ = env.step(action_map[int(action)])
state = preprocess_state(state).to(device)
state, _, _ = encoder.encode(state)
rewards.append(reward)
if done:
env.reset()
mean_reward.append(sum(rewards))
rewards = []
counter += 1
agent.save(e)
summary.add_scalar('Episode Reward', np.mean(mean_reward))
summary.adv_episode()
summary.writer.flush()
print('Closing environment.')
env.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5148663
|
# -*- coding: utf-8 -*-
"""
Model Map table air_lekeage_building_distribution
:author: <NAME>
:version: 0.1
:date: 15 Dec. 2017
"""
__docformat__ = "restructuredtext"
class AirLekeageBuildingDistribution():
""" DB Entity air_lekeage_building_distribution to Python object AirLekeageBuildingDistribution """
def __init__(self):
self.__id = 0
self.__walls_perc = 0.0
self.__ceiling_perc = 0.0
self.__windows_doors_perc = 0.0
def __str__(self):
return "id:" + str(self.id) + " walls_perc:" + str(self.walls_perc) + " period_id:" + str(self.period_id) + " air_lekeage:" + str(self.air_lekeage)
@property
def id(self):
return self.__id
@id.setter
def id(self, val):
self.__id = val
@property
def walls_perc(self):
return self.__walls_perc
@walls_perc.setter
def walls_perc(self, val):
self.__walls_perc = float(val.replace(",",".")) if isinstance(val,str) else float(val)
@property
def ceiling_perc(self):
return self.__ceiling_perc
@ceiling_perc.setter
def ceiling_perc(self, val):
self.__ceiling_perc = float(val.replace(",",".")) if isinstance(val,str) else float(val)
@property
def windows_doors_perc(self):
return self.__windows_doors_perc
@windows_doors_perc.setter
def windows_doors_perc(self, val):
self.__windows_doors_perc = float(val.replace(",",".")) if isinstance(val,str) else float(val)
|
StarcoderdataPython
|
3306067
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('fleet', '0013_auto_20170814_1138'),
]
operations = [
migrations.AlterField(
model_name='historicalvehicle',
name='licence_plate',
field=models.CharField(max_length=20, verbose_name='vehicle registration', db_index=True),
),
migrations.AlterField(
model_name='historicalvehicle',
name='model_year',
field=models.CharField(default=None, max_length=4, null=True, verbose_name='year model', blank=True),
),
migrations.AlterField(
model_name='vehicle',
name='licence_plate',
field=models.CharField(unique=True, max_length=20, verbose_name='vehicle registration'),
),
migrations.AlterField(
model_name='vehicle',
name='make_n_model',
field=models.ForeignKey(verbose_name='make & model', blank=True, to='fleet.VehicleMakeAndModel', null=True),
),
migrations.AlterField(
model_name='vehicle',
name='model_year',
field=models.CharField(default=None, max_length=4, null=True, verbose_name='year model', blank=True),
),
]
|
StarcoderdataPython
|
12827571
|
<reponame>RumbleDB/rumbleml-experiments
# sklearn core
from pyspark.ml import Pipeline
# Preprocessing
from pyspark.ml.feature import StandardScaler, MaxAbsScaler, PCA, VectorAssembler, Imputer, OneHotEncoder
# Models
from pyspark.ml.regression import LinearRegression
from pyspark.ml.classification import LogisticRegression, RandomForestClassifier, LinearSVC, NaiveBayes, MultilayerPerceptronClassifier
def get_clf(mode, **kwargs):
'''
Code returning mllib classifier for pipelines
'''
if mode == 'logistic':
max_iter = kwargs.get('max_iter', 5)
model = LogisticRegression(featuresCol="transformed_features", maxIter=max_iter)
elif mode=='RandomForest':
n_estimators = kwargs.get('n_estimators', 5)
model = RandomForestClassifier(featuresCol="transformed_features", numTrees=n_estimators)
elif mode=='LinearSVC':
max_iter = kwargs.get('max_iter', 5)
model = LinearSVC(featuresCol="transformed_features", maxIter=max_iter)
elif mode=='NB':
model = NaiveBayes(featuresCol="transformed_features")
elif mode=='linear':
model = LinearRegression(featuresCol="transformed_features")
elif 'NN' in mode:
solver = kwargs.get('solver', 'sgd')
hidden_layer_sizes = kwargs.get('hidden_layer_sizes', (20,))
if isinstance(hidden_layer_sizes, list):
hidden_layer_sizes = list(hidden_layer_sizes)
activation = kwargs.get('activation', 'relu')
learning_rate_init = kwargs.get('learning_rate', 0.001)
max_iter = kwargs.get('max_iter', 5000)
if mode=='NN':
model = MultilayerPerceptronClassifier(solver=solver, layers=hidden_layer_sizes, stepSize=learning_rate_init,
maxIter=max_iter)
return model
def get_pipe_ops(mode, inputCol="features", outputCol="transformed_features"):
if mode == 'pipe_0':
# just the classifier
vecAssembler = VectorAssembler(outputCol=outputCol)
vecAssembler.setInputCols([inputCol])
ops = [vecAssembler]
elif mode == 'pipe_1':
# 1-step scaler (*map)
scaler = MaxAbsScaler(inputCol=inputCol, outputCol=outputCol)
ops = [scaler]
# elif mode == 'pipe_2':
# 2-step function scaler (*map)
# def logVar(x):
# return MaxAbsScaler(np.log(x))
# ops = [('logscaler', FunctionTransformer(logVar))]
elif mode == 'pipe_3':
# dimensionality reduction (*map)
pca = PCA(k=2, inputCol=inputCol, outputCol=outputCol)
ops = [pca]
# elif mode == 'pipe_4':
# k-means (fork)
# union = FeatureUnion([("indicator", MissingIndicator()),
# ("kmeans", KMeans(random_state=0))])
# ops = [('union', union)]
elif mode == 'pipe_5':
# TODO
# multiple dimensionality reductions (fork)
pca = PCA(k=2, inputCol=inputCol, outputCol="pca_output")
#svd = SVD()
#lda = LDA()
vecAssembler = VectorAssembler(outputCol=outputCol)
vecAssembler.setInputCols(["pca_output"])
ops = [pca, vecAssembler]
# elif mode == 'pipe_6':
# # image blurring operator
# grayify = RGB2GrayTransformer()
# def gaussian_blur(x):
# return skimage.filters.gaussian(x)
# ops = [('grayify', grayify), ('blur', FunctionTransformer(gaussian_blur))]
# elif mode == 'pipe_7':
# # complex image processing operators
# grayify = RGB2GrayTransformer()
# hogify = HogTransformer(
# pixels_per_cell=(4, 4),
# cells_per_block=(2,2),
# orientations=9,
# block_norm='L2-Hys'
# )
# ops = [('grayify', grayify), ('hogify', hogify)]
else:
raise ValueError("Invalid mode!")
return ops
def create_numerical_pipeline(ops_mode, imputer=True, clf_mode='logistic', **kwargs):
ops = get_pipe_ops(ops_mode)
clf = get_clf(clf_mode, **kwargs)
# vecAssembler = VectorAssembler(outputCol="data")
# vecAssembler.setInputCols(["col_0", "col_1", "col_2", "col_3", "col_4", "col_5", "col_6", "col_7", "col_8", "col_9", "col_10", "col_11", "col_12", "col_13"])
# ops = [vecAssembler] + ops
if imputer:
imp = Imputer(strategy='mean')
ops = [imp] + ops
ops = ops + [clf]
pipe = Pipeline(stages=ops)
return pipe
def create_tabular_pipeline(num_mode, outputCols="output", categorical_ix=["cat_features"], numerical_ix=["num_features"], imputer=True, clf_mode='logistic', **kwargs):
num_ops = get_pipe_ops(num_mode, outputCols=outputCols)
# imp = Imputer(strategy='categorical') - mllib doesn't support categorical input
cat_one_hot = OneHotEncoder(inputCols=categorical_ix, outputCols="cat_features")
ops = [cat_one_hot] + num_ops
if imputer:
num_imputer = Imputer(inputCols=numerical_ix, strategy='median', outputCols='data')
ops = [num_imputer] + ops
clf = get_clf(clf_mode)
vecAssembler = VectorAssembler(outputCols=outputCols)
vecAssembler.setInputCols(["cat_output"])
ops = ops + [clf]
pipe = Pipeline(stages=ops)
return pipe
|
StarcoderdataPython
|
8125505
|
lanches = 'hambúrguer', 'suco', 'refri', 'sorvete'
# COM FOR PODEMOS IMPRIMIR TODOS ELEMENTOS SEPARADAMENTE, JÁ QUE ELE ACEITA O range() OU UMA VARÍAVEL
for comida in lanches:
print(f'Eu comi {comida}.')
# COMO PODEMOS FATIAR AS TUPLAS, HÁ OUTRA MANEIRA DE MOSTRAR OS ELEMENTOS
for c in range(len(lanches)):# O LEN RETORNA A QUANTIDADE DE TERMOS DA TUPLAS
print(f'O {lanches[c]} ESTÁ NA POSIÇÃO {c}')# POSSIBILITANDO TAMBÉM MOSTRAR SUA POSIÇÃO
for pos, comida in enumerate(lanches):# O ENUMERATE TAMBÉM RETORNA A POSIÇÃO PARA O pos ENQUANTO O lanches RETORNA PARA comida
print(f'O {comida} ESTÁ NA POSIÇÃO {pos}')
|
StarcoderdataPython
|
3575575
|
import numpy
from scipy.ndimage import zoom
from dexp.utils import xpArray
from dexp.utils.backends import Backend, NumpyBackend
def warp(
image: xpArray,
vector_field: xpArray,
vector_field_upsampling: int = 2,
vector_field_upsampling_order: int = 1,
mode: str = "border",
image_to_backend: bool = False,
internal_dtype=None,
):
"""
Applies a warp transform (piece wise linear or constant) to an image based on a vector field.
Only implemented for 1d, 2d, and 3d images.
Parameters
----------
image : image to warp
vector_field : vector field to warp inoput image with. The vector field is an array of
dimension n+1 where n is the dimension of the input image.
The first n dimensions can be of arbirary lengths, and the last vector is the warp vector
for each image region that the first
vector_field_upsampling : upsampling factor for teh vector field (best use a power of two)
vector_field_upsampling_order : upsampling order: 0-> nearest, 1->linear, 2->quadratic, ... (uses scipy zoom)
mode : How to handle warping that reaches outside of the image bounds,
can be: 'clamp', 'border', 'wrap', 'mirror'
image_to_backend : By default one can directly copy a numpy array to texture memory, if needed,
this option let's one first more the data to a cupy array before moving to texture memory. Not recommended.
internal_dtype : internal dtype. Right now the dtype must be float32 because of CUDa texture dtype limitations.
Returns
-------
Warped image
"""
if not (image.ndim + 1 == vector_field.ndim or (image.ndim == 1 and vector_field.ndim == 1)):
raise ValueError("Vector field must have one additional dimension")
if internal_dtype is None:
internal_dtype = numpy.float32
if type(Backend.current()) is NumpyBackend:
internal_dtype = numpy.float32
original_dtype = image.dtype
if vector_field_upsampling != 1:
# Note: unfortunately numpy does support float16 zooming, and cupy does not support high-order zooming...
vector_field = Backend.to_numpy(vector_field, dtype=numpy.float32)
if image.ndim > 1:
vector_field = zoom(
vector_field, zoom=(vector_field_upsampling,) * image.ndim + (1,), order=vector_field_upsampling_order
)
else:
vector_field = zoom(vector_field, zoom=(vector_field_upsampling,), order=vector_field_upsampling_order)
# we can actually directly copy from numpy to texture mem!
if image_to_backend:
image = Backend.to_backend(image, dtype=internal_dtype)
image = image.astype(dtype=internal_dtype, copy=False)
vector_field = Backend.to_backend(vector_field, dtype=internal_dtype)
from dexp.utils.backends import CupyBackend
if type(Backend.current()) is NumpyBackend:
raise NotImplementedError("Warping not yet implemented for the Numpy backend.")
elif type(Backend.current()) is CupyBackend:
params = (image, vector_field, mode)
if image.ndim == 1:
from dexp.processing.interpolation._cupy.warp_1d import _warp_1d_cupy
result = _warp_1d_cupy(*params)
elif image.ndim == 2:
from dexp.processing.interpolation._cupy.warp_2d import _warp_2d_cupy
result = _warp_2d_cupy(*params)
elif image.ndim == 3:
from dexp.processing.interpolation._cupy.warp_3d import _warp_3d_cupy
result = _warp_3d_cupy(*params)
else:
raise NotImplementedError("Warping for ndim>3 not implemented.")
result = result.astype(original_dtype, copy=False)
return result
|
StarcoderdataPython
|
5048568
|
<reponame>aeko-empt/ovs-dbg
import ovs_dbg.ofparse.ofp # noqa: F401
import ovs_dbg.ofparse.dp # noqa: F401
|
StarcoderdataPython
|
6614319
|
#!/usr/bin/env python3
import requests, datetime
from time import sleep
urls = ['http://cpt.hopper.pw:[email protected]/nic/update']
# basic auth to hopper.pw updates
while True:
for url in urls:
try:
r = requests.get(url, auth=('cpt.hopper.pw', 'LBbRhmu3gV'))
print("response @", datetime.datetime.now(), ":", r.text)
except requests.exceptions.RequestException as e:
print(e, file=sys.stderr) # print to stderr
sleep(300) # sleep for five minutes
|
StarcoderdataPython
|
12845876
|
"""
Train and eval functions used in main.py
"""
import os
import torch
from torch.utils.data import DataLoader, DistributedSampler
import math
import sys
import time
import datetime
from typing import Iterable
from pathlib import Path
import json
import random
import numpy as np
import torch
import wandb
from dataset.evaluator import SmoothedValue, MetricLogger
from model.detr import build_model
from dataset.construction_dataset import build_dataset
from dataset.evaluator import collate_fn, evaluate, save_on_master
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(False) # missing some deterministic impl
device = torch.device("cuda:0")
class Args:
pass
args = Args()
# Postitional encoding
args.position_embedding = "sine"
# CNN Backbone
args.backbone = "resnet50"
args.dilation = None
# Hungarian matcher
args.set_cost_class = 1
args.set_cost_bbox = 5
args.set_cost_giou = 2
# Transformer
args.hidden_dim = 256
args.dropout = 0.1
args.nheads = 8
args.dim_feedforward = 2048
args.enc_layers = 6
args.dec_layers = 6
args.pre_norm = None
# DETR
args.num_queries = 100
args.aux_loss = True # calculate loss at eache decoder layer
args.masks = True
args.frozen_weights = None
args.bbox_loss_coef = 5
args.mask_loss_coef = 1
args.dice_loss_coef = 1
args.giou_loss_coef = 2
args.eos_coef = 0.1
# Dataset
args.dataset_file = "coco_panoptic" # construction
args.coco_path = "./data"
args.coco_panoptic_path = "./data"
# Training
args.lr = 1e-4
args.weight_decay = 1e-4
args.lr_backbone = 0 # 0 means frozen backbone
args.batch_size = 3
args.epochs = 2
args.lr_drop = 200
args.clip_max_norm = 0.1
args.output_dir = "out_dir"
args.eval = False
# !mkdir out_dir/panoptic_eval -p
try:
os.mkdir("out_dir/panoptic_eval")
except Exception as e:
pass
# set if you plan to log on wandb
ENABLE_WANDB = True
# if set not train from scratch (detre pretrained on COCO)
used_artifact = None # "2_2_attentionfreeze_aux:latest"
# set if starting a new run
wandb_experiment_name = "2_2_1_transf_unfreeze_aux"
# set to None if starting a new run
run_id = None
if ENABLE_WANDB:
import wandb
if run_id is not None:
wandb.init(project="detr", id=run_id, resume="allow")
else:
wandb.init(project="detr", name=wandb_experiment_name)
wandb.config.position_embedding = args.position_embedding
wandb.config.backbone = args.backbone
wandb.config.dilation = args.dilation
wandb.config.set_cost_class = args.set_cost_class
wandb.config.set_cost_bbox = args.set_cost_bbox
wandb.config.set_cost_giou = args.set_cost_giou
wandb.config.hidden_dim = args.hidden_dim
wandb.config.dropout = args.dropout
wandb.config.nheads = args.nheads
wandb.config.dim_feedforward = args.dim_feedforward
wandb.config.enc_layers = args.enc_layers
wandb.config.dec_layers = args.dec_layers
wandb.config.pre_norm = args.pre_norm
wandb.config.num_queries = args.num_queries
wandb.config.aux_loss = args.aux_loss
wandb.config.masks = args.masks
wandb.config.frozen_weights = args.frozen_weights
wandb.config.bbox_loss_coef = args.bbox_loss_coef
wandb.config.mask_loss_coef = args.mask_loss_coef
wandb.config.dice_loss_coef = args.dice_loss_coef
wandb.config.giou_loss_coef = args.giou_loss_coef
wandb.config.eos_coef = args.eos_coef
wandb.config.lr = args.lr
wandb.config.weight_decay = args.weight_decay
wandb.config.lr_backbone = args.lr_backbone
wandb.config.batch_size = args.batch_size
wandb.config.epochs = args.epochs
wandb.config.lr_drop = args.lr_drop
wandb.config.clip_max_norm = args.clip_max_norm
def freeze_attn(model, args):
for i in range(args.dec_layers):
for param in model.detr.transformer.decoder.layers[i].self_attn.parameters():
param.requires_grad = False
for param in model.detr.transformer.decoder.layers[
i
].multihead_attn.parameters():
param.requires_grad = False
for i in range(args.enc_layers):
for param in model.detr.transformer.encoder.layers[i].self_attn.parameters():
param.requires_grad = False
def freeze_decoder(model, args):
for param in model.detr.transformer.decoder.parameters():
param.requires_grad = False
def freeze_first_layers(model, args):
for i in range(args.enc_layers // 2):
for param in model.detr.transformer.encoder.layers[i].parameters():
param.requires_grad = False
for i in range(args.dec_layers // 2):
for param in model.detr.transformer.decoder.layers[i].parameters():
param.requires_grad = False
def build_pretrained_model(args):
pre_trained = torch.hub.load(
"facebookresearch/detr",
"detr_resnet50_panoptic",
pretrained=True,
return_postprocessor=False,
num_classes=250,
)
model, criterion, postprocessors = build_model(args)
model.detr.backbone.load_state_dict(pre_trained.detr.backbone.state_dict())
model.detr.bbox_embed.load_state_dict(pre_trained.detr.bbox_embed.state_dict())
model.detr.query_embed.load_state_dict(pre_trained.detr.query_embed.state_dict())
model.detr.input_proj.load_state_dict(pre_trained.detr.input_proj.state_dict())
model.detr.transformer.load_state_dict(pre_trained.detr.transformer.state_dict())
model.bbox_attention.load_state_dict(pre_trained.bbox_attention.state_dict())
model.mask_head.load_state_dict(pre_trained.mask_head.state_dict())
freeze_attn(model, args)
return model, criterion, postprocessors
def train_one_epoch(
model: torch.nn.Module,
criterion: torch.nn.Module,
data_loader: Iterable,
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
max_norm: float = 0,
):
model.train()
criterion.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter(
"class_error", SmoothedValue(window_size=1, fmt="{value:.2f}")
)
header = "Epoch: [{}]".format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(
loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict
)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = loss_dict
loss_dict_reduced_unscaled = {
f"{k}_unscaled": v for k, v in loss_dict_reduced.items()
}
loss_dict_reduced_scaled = {
k: v * weight_dict[k]
for k, v in loss_dict_reduced.items()
if k in weight_dict
}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(
loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled
)
metric_logger.update(class_error=loss_dict_reduced["class_error"])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if ENABLE_WANDB:
wandb.log(loss_dict_reduced)
wandb.log({"loss": loss_value})
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def train():
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
model, criterion, postprocessors = build_pretrained_model(args)
model.to(device)
if ENABLE_WANDB:
wandb.watch(model)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("number of params:", n_parameters)
param_dicts = [
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" not in n and p.requires_grad
]
},
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" in n and p.requires_grad
],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(
param_dicts, lr=args.lr, weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
if ENABLE_WANDB and used_artifact is not None:
artifact = wandb.use_artifact(used_artifact)
artifact_dir = artifact.download()
checkpoint = torch.load(artifact_dir + "/checkpoint.pth")
model.load_state_dict(checkpoint["model"])
if run_id is not None:
optimizer.load_state_dict(checkpoint["optimizer"])
# lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint["epoch"]
else:
start_epoch = 0
dataset_train = build_dataset(image_set="train", args=args)
dataset_val = build_dataset(image_set="val", args=args)
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True
)
data_loader_train = DataLoader(
dataset_train,
batch_sampler=batch_sampler_train,
collate_fn=collate_fn,
num_workers=4,
)
data_loader_val = DataLoader(
dataset_val,
args.batch_size,
sampler=sampler_val,
drop_last=False,
collate_fn=collate_fn,
num_workers=4,
)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location="cpu")
model_without_ddp.detr.load_state_dict(checkpoint["model"])
output_dir = Path(args.output_dir)
if args.eval:
test_stats = evaluate(
model, criterion, postprocessors, data_loader_val, device, args.output_dir
)
print(test_stats)
return
print("Start training")
start_time = time.time()
for epoch in range(start_epoch + 1, args.epochs):
train_stats = train_one_epoch(
model,
criterion,
data_loader_train,
optimizer,
device,
epoch,
args.clip_max_norm,
)
lr_scheduler.step()
if args.output_dir:
checkpoint_path = output_dir / "checkpoint.pth"
save_on_master(
{
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
if ENABLE_WANDB:
artifact = wandb.Artifact(wandb_experiment_name, type="model")
artifact.add_file(checkpoint_path)
wandb.log_artifact(artifact)
test_stats = evaluate(
model, criterion, postprocessors, data_loader_val, device, args.output_dir
)
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
**{f"test_{k}": v for k, v in test_stats.items()},
"epoch": epoch,
"n_parameters": n_parameters,
}
if ENABLE_WANDB:
wandb.log(test_stats)
if args.output_dir:
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
|
StarcoderdataPython
|
6701938
|
<reponame>SHI3DO/Tennessine
def prod(amount):
A = [["IronIngot"], "Constructor", [amount], [amount/15], 1]
return A
|
StarcoderdataPython
|
4912152
|
<reponame>dutxubo/nni
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
_counter = 0
def global_mutable_counting():
global _counter
_counter += 1
return _counter
class AverageMeter:
def __init__(self, name):
self.name = name
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val):
self.val = val
self.sum += val
self.count += 1
self.avg = self.sum / self.count
def __str__(self):
return '{name} {val:4f} ({avg:4f})'.format(**self.__dict__)
def summary(self):
return '{name}: {avg:4f}'.format(**self.__dict__)
class AverageMeterGroup:
def __init__(self):
self.meters = {}
def update(self, data):
for k, v in data.items():
if k not in self.meters:
self.meters[k] = AverageMeter(k)
self.meters[k].update(v)
def __str__(self):
return ' '.join(str(v) for v in self.meters.values())
def summary(self):
return ' '.join(v.summary() for v in self.meters.values())
class StructuredMutableTreeNode:
def __init__(self, mutable):
self.mutable = mutable
self.children = []
def add_child(self, mutable):
self.children.append(StructuredMutableTreeNode(mutable))
return self.children[-1]
def type(self):
return type(self.mutable)
def __iter__(self):
return self.traverse()
def traverse(self, order="pre", deduplicate=True, memo=None):
if memo is None:
memo = set()
assert order in ["pre", "post"]
if order == "pre":
if self.mutable is not None:
if not deduplicate or self.mutable.key not in memo:
memo.add(self.mutable.key)
yield self.mutable
for child in self.children:
for m in child.traverse(order=order, deduplicate=deduplicate, memo=memo):
yield m
if order == "post":
if self.mutable is not None:
if not deduplicate or self.mutable.key not in memo:
memo.add(self.mutable.key)
yield self.mutable
def fill_zero_grads(grads, weights):
ret = []
for grad, weight in zip(grads, weights):
if grad is not None:
ret.append(grad)
else:
ret.append(tf.zeros_like(weight))
return ret
|
StarcoderdataPython
|
276330
|
<reponame>pomes/valiant<filename>tests/repositories/pypi/__init__.py
"""PyPi Repo tests.
Copyright (c) 2020 The Valiant Authors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import os
from typing import Dict
import py # https://py.readthedocs.io/en/latest/index.html
import pytest
from ... import MockResponse, MonkeyPatch # noqa: F401
from .validation import DATAFILE_VALIDATION # noqa: F401
_dir = os.path.dirname(os.path.realpath(__file__))
TEST_FILE_DIR = py.path.local(_dir) / "test-data"
# Setup the datafiles: https://pypi.org/project/pytest-datafiles/
FIXTURE_DIR = py.path.local(_dir) / "package-data"
_json_files = [
os.path.join(FIXTURE_DIR / j) for j in os.listdir(FIXTURE_DIR) if j[-4:] == "json"
]
ALL_PKG_FILES = pytest.mark.datafiles(*_json_files)
# End setup
def load_test_json_data(path: py.path, filename: str) -> Dict: # noqa: ANN
with open(path / filename, "r") as f:
package_data = json.load(f)
return package_data
|
StarcoderdataPython
|
393806
|
# -*- coding: utf-8 -*-
from setuptools import setup
import os
readmefile = os.path.join(os.path.dirname(__file__), "README.md")
with open(readmefile) as f:
readme = f.read()
setup(
name='jumanpp-batch',
version='0.1.2',
description='Apply juman++ to batch inputs in parallel',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/kota7/jumanpp-batch',
#download_url='',
long_description=readme,
long_description_content_type="text/markdown",
#packages=[],
py_modules=['jumanpp_batch'],
install_requires=['jaconv', 'ushlex'],
test_require=['parameterized'],
package_data={},
entry_points={},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Natural Language :: Japanese',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
test_suite='tests'
)
|
StarcoderdataPython
|
1825824
|
<filename>colcon_acceleration/subverb/platform.py<gh_stars>1-10
# ____ ____
# / /\/ /
# /___/ \ / Copyright (c) 2021, Xilinx®.
# \ \ \/ Author: <NAME> <<EMAIL>>
# \ \
# / /
# /___/ /\
# \ \ / \
# \___\/\___\
#
# Licensed under the Apache License, Version 2.0
#
import os
from colcon_core.plugin_system import satisfies_version
from colcon_acceleration.subverb import AccelerationSubverbExtensionPoint, get_vitis_dir
from colcon_acceleration import __version__
class PlatformSubverb(AccelerationSubverbExtensionPoint):
"""Report the platform enabled in the deployed firmware."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(AccelerationSubverbExtensionPoint.EXTENSION_POINT_VERSION, "^1.0")
def main(self, *, context): # noqa: D102
"""Platform enabled
NOTE: firmware is board-specific. Consult the README of
acceleration_firmware_kv260 and/or change branch as per your
hardware/board requirements.
NOTE 2: Location, syntax and other related matters are defined
within the `acceleration_firmware_kv260` package. Refer to it for more
details.
"""
print(self.get_platform())
|
StarcoderdataPython
|
4868717
|
<reponame>rmm-ch/ho-distribute
#!/usr/bin/env python3
'''
plot data from a triplet of log files - assumes already converted to CSV
example usage:
$ ipython
> %run csv_to_graph.py --logdir <path to where your csv files were saved>
'''
import argparse
import os.path, fnmatch
import pandas as pd
import matplotlib.pyplot as plt
#import numpy as np
def lookup_logs(pth, prefix=None):
''' assumes one set of logs in any given directory.'''
fn_scd, fn_hdc, fn_bat = None, None, None
if prefix is None:
prefix = ""
for fn in os.listdir(pth):
if fnmatch.fnmatch(fn, "{}*datscd.csv".format(prefix)):
fn_scd = os.path.join(pth, fn)
if fnmatch.fnmatch(fn, "{}*dathdc.csv".format(prefix)):
fn_hdc = os.path.join(pth, fn)
if fnmatch.fnmatch(fn, "{}*batlvl.csv".format(prefix)):
fn_bat = os.path.join(pth, fn)
return (fn_scd, fn_hdc, fn_bat)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verb', action='store_true')
parser.add_argument('-d', '--logdir', type=str, required=True)
args = parser.parse_args()
# lazy / fragile lookup of log filenames
fn_scd, fn_hdc, fn_bat = lookup_logs(args.logdir)
# load the scd and hdc2010 data
df_scd = pd.read_csv(fn_scd)
df_hdc = pd.read_csv(fn_hdc)
# turn the unix timestamps into interpretable datetime objects
df_scd["timestamp"] = pd.to_datetime(df_scd.timestamp, unit='s')
df_hdc["timestamp"] = pd.to_datetime(df_hdc.timestamp, unit='s')
# do some simple plots
axs_s = df_scd.plot(x="timestamp", y=["T", "RH", "CO2"], color="C0", subplots=True)
#axs_h = df_hdc.plot(x="timestamp", y=["T", "RH", ], subplots=True)
df_hdc.plot(x="timestamp", y="T", ax=axs_s[0], c='C3', label="T hdc2010")
df_hdc.plot(x="timestamp", y="RH", ax=axs_s[1], c='C3', label="RH hdc2010")
units = ["Temperature ($^o$C)", "Rel. Humidity (%)", "CO$_2$ (ppm)"]
for i, a in enumerate(axs_s):
a.set_ylabel(units[i])
a.grid(True)
plt.tight_layout()
|
StarcoderdataPython
|
8129192
|
from AST import(And,Or,Arrow,Not,Var,true,false, Pred, Forall, Exists)
from Exceptions import(LexException, ParseException)
from enum import Enum
def parse(text):
return expr(lex(text))
####################################################################
# Lexer
# converts a string of characters into a list of tokens
# so "a && b -> T"
# becomes [Token(TVAR,0,"a"), Token(TAND,2), Token(TVAR,5,"b"),
# Token(TARROW,7), Token(TTRUE,10)]
####################################################################
class TType(Enum):
TTRUE = "T"
TFALSE = "F"
TNOT = "~"
TAND = "&&"
TOR = "||"
TARROW = "->"
TVAR = "<var>"
TLPAREN = "("
TRPAREN = ")"
TEOF = "<EOF>"
TEX = "EX"
TFA = "FA"
TDOT = "."
TCOMMA = ","
def __init__(self, n):
self.n = n
def __str__(self):
return self.n
class Token():
def __init__(self, ttype, pos, val):
self.ttype = ttype
self.pos = pos
self.val = val
def __str__(self):
return str(self.ttype)
def alpha(c):
return 'a' <= c <= 'z' or 'A' <= c <= 'Z'
def lex(text):
i = 0
tokens = []
while i < len(text):
c = text[i]
if i+1 < len(text):
n = text[i+1]
else:
n = ""
if c in " \r\n\t": # skip whitespace
pass
elif c+n == "FA":
tokens.append(Token(TType.TFA,i,"FA"))
i += 1
elif c+n == "EX":
tokens.append(Token(TType.TEX,i,"EX"))
i += 1
elif c == 'T':
tokens.append(Token(TType.TTRUE,i,"T"))
elif c == 'F':
tokens.append(Token(TType.TFALSE,i,"F"))
elif c == '~':
tokens.append(Token(TType.TNOT,i,"~"))
elif c == '(':
tokens.append(Token(TType.TLPAREN,i,"("))
elif c == ')':
tokens.append(Token(TType.TRPAREN,i,")"))
elif c == '.':
tokens.append(Token(TType.TDOT,i,"."))
elif c == ',':
tokens.append(Token(TType.TCOMMA,i,","))
elif c+n == "||":
tokens.append(Token(TType.TOR,i,"||"))
i += 1
elif c+n == "&&":
tokens.append(Token(TType.TAND,i,"&&"))
i += 1
elif c+n == "->":
tokens.append(Token(TType.TARROW,i,"->"))
i += 1
elif alpha(c):
j = 0
var = ""
while i+j < len(text) and alpha(text[i+j]):
var += text[i+j]
j += 1
tokens.append(Token(TType.TVAR,i,var))
i += (j-1)
else:
raise LexException(i,c)
i += 1
tokens.append(Token(TType.TEOF,i,"<EOF>"))
return tokens
# E => FA x . E | EX x . E | I
# I => O -> I
# O => A || O
# A => N && A
# N => !L
# L => var | T | F | (E)
def expr(tokens):
follow = [TType.TEOF, TType.TRPAREN]
e = None
if tokens[0].ttype == TType.TFA:
if tokens[1].ttype == TType.TVAR:
if tokens[2].ttype == TType.TDOT:
tokens.pop(0)
v = tokens.pop(0).val
tokens.pop(0)
e = Forall(v,expr(tokens))
else:
raise ParseException(tokens[2].pos,[TType.TDOT],tokens[2].val)
else:
raise ParseException(tokens[1].pos,[TType.TVAR],tokens[1].val)
elif tokens[0].ttype == TType.TEX:
if tokens[1].ttype == TType.TVAR:
if tokens[2].ttype == TType.TDOT:
tokens.pop(0)
v = tokens.pop(0).val
tokens.pop(0)
e = Exists(v,expr(tokens))
else:
raise ParseException(tokens[2].pos,[TType.TDOT],tokens[2].val)
else:
raise ParseException(tokens[1].pos,[TType.TVAR],tokens[1].val)
else:
e = arrow_expr(tokens)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return e
def arrow_expr(tokens):
follow = [TType.TEOF, TType.TRPAREN]
lhs = or_expr(tokens)
if tokens[0].ttype == TType.TARROW:
tokens.pop(0)
rhs = arrow_expr(tokens)
lhs = Arrow(lhs, rhs)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return lhs
def or_expr(tokens):
follow = [TType.TEOF, TType.TRPAREN, TType.TARROW]
lhs = and_expr(tokens)
while tokens[0].ttype == TType.TOR:
tokens.pop(0)
rhs = and_expr(tokens)
lhs = Or(lhs, rhs)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return lhs
def and_expr(tokens):
follow = [TType.TEOF, TType.TRPAREN, TType.TARROW, TType.TOR]
lhs = not_expr(tokens)
while tokens[0].ttype == TType.TAND:
tokens.pop(0)
rhs = not_expr(tokens)
lhs = And(lhs, rhs)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return lhs
def not_expr(tokens):
follow = [TType.TEOF, TType.TRPAREN, TType.TARROW, TType.TOR, TType.TAND]
e = None
if tokens[0].ttype == TType.TNOT:
tokens.pop(0)
ne = not_expr(tokens)
e = Not(ne)
else:
e = term(tokens)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return e
def term(tokens):
first = [TType.TTRUE, TType.TFALSE, TType.TVAR, TType.TLPAREN]
follow = [TType.TEOF, TType.TRPAREN, TType.TARROW, TType.TOR, TType.TAND]
e = None
if tokens[0].ttype == TType.TVAR:
e = pred(tokens)
elif tokens[0].ttype == TType.TTRUE:
e = true()
tokens.pop(0)
elif tokens[0].ttype == TType.TFALSE:
e = false()
tokens.pop(0)
elif tokens[0].ttype == TType.TLPAREN:
tokens.pop(0)
e = expr(tokens)
if tokens[0].ttype != TType.TRPAREN:
raise ParseException(tokens[0].pos,[TType.TRPAREN],tokens[0].val)
tokens.pop(0)
elif tokens[0].ttype == TType.TFA or \
tokens[0].ttype == TType.TEX:
e = expr(tokens)
else:
raise ParseException(tokens[0].pos,first,tokens[0].val)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return e
# ( v (, v)* )
def pred(tokens):
follow = [TType.TEOF, TType.TRPAREN, TType.TARROW, TType.TOR, TType.TAND]
e = None
# initial name
name = tokens.pop(0).val
# P(v {, v} )
if tokens[0].ttype == TType.TLPAREN:
tokens.pop(0)
vs = []
if tokens[0].ttype == TType.TVAR:
vs = [tokens.pop(0).val]
# P()
elif tokens[0].ttype == TType.TRPAREN:
pass
else:
raise ParseException(tokens[0].pos, [TType.TVAR], tokens[0].val)
# {, v}
while tokens[0].ttype != TType.TRPAREN:
if tokens[0].ttype == TType.TCOMMA and \
tokens[1].ttype == TType.TVAR:
tokens.pop(0)
vs.append(tokens.pop(0).val)
else:
raise ParseException(tokens[0].pos, [TType.TCOMMA], tokens[0].val)
tokens.pop(0)
e = Pred(name, vs)
# v
else:
e = Var(name)
if tokens[0].ttype not in follow:
raise ParseException(tokens[0].pos,follow,tokens[0].val)
return e
|
StarcoderdataPython
|
4882580
|
github_user = "example"
github_pass = "<PASSWORD>"
gmail_user = "<EMAIL>"
gmail_pass = "<PASSWORD>"
email_text = """\
From: {}
To: {}
Subject: Github Project \'{}\'
Hi {}!
I noticed you gave my project \'{}\' ({}) a star on Github.
First of all: thank you for that! :)
I would like to improve the project by taking into consideration your specific needs and wishes. That is why I would greatly appreciate it if you could send me a short reply with some information about yourself, why you gave a star to the project or features you would like to see included in the future.
I am also open to discuss other follow up projects or good ideas that you might have. So just hit me up!
Cheers!
ExampleName
- This email sent in an automated way.
- Check out https://github.com/RafaelKuebler/GithubSurvey for more information.
"""
|
StarcoderdataPython
|
6650114
|
<gh_stars>0
from __future__ import print_function, absolute_import, division
import os
import shutil
from os.path import join, dirname
import sys
import time
from pprint import pprint
import numpy as np
from progress.bar import Bar as Bar
from sklearn import metrics
import json
import torch
import torch.nn as nn
import torch.optim
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, ConcatDataset
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torchvision.models.resnet import ResNet, BasicBlock, Bottleneck
from src.opt import Options
import src.log as log
import src.utils as utils
from model import LinearModel, weight_init
from src.data import ToTensor, ClassificationDataset
from src.data_utils import one_hot, load_gt, mean_missing_parts, \
mpjpe_2d_openpose, calc_auc
from src.vis import create_grid
def train(train_loader, model, criterion, optimizer, num_kpts=15, num_classes=200,
lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None,
max_norm=True):
losses = utils.AverageMeter()
model.train()
errs, accs = [], []
start = time.time()
batch_time = 0
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, sample in enumerate(train_loader):
glob_step += 1
if glob_step % lr_decay == 0 or glob_step == 1:
lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma)
inputs = sample['X'].cuda()
# NOTE: PyTorch issue with dim0=1.
if inputs.shape[0] == 1:
continue
targets = sample['Y'].reshape(-1).cuda()
outputs = model(inputs)
# calculate loss
optimizer.zero_grad()
loss = criterion(outputs, targets)
losses.update(loss.item(), inputs.size(0))
loss.backward()
if max_norm:
nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
optimizer.step()
# Set outputs to [0, 1].
softmax = nn.Softmax()
outputs = softmax(outputs)
outputs = outputs.data.cpu().numpy()
targets = one_hot(targets.data.cpu().numpy(), num_classes)
errs.append(np.mean(np.abs(outputs - targets)))
accs.append(metrics.accuracy_score(
np.argmax(targets, axis=1),
np.argmax(outputs, axis=1))
)
# update summary
if (i + 1) % 100 == 0:
batch_time = time.time() - start
start = time.time()
bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
.format(batch=i + 1,
size=len(train_loader),
batchtime=batch_time * 10.0,
ttl=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg)
bar.next()
bar.finish()
err = np.mean(np.array(errs, dtype=np.float32))
acc = np.mean(np.array(accs, dtype=np.float32))
print (">>> train error: {} <<<".format(err))
print (">>> train accuracy: {} <<<".format(acc))
return glob_step, lr_now, losses.avg, err, acc
def test(test_loader, model, criterion, num_kpts=15, num_classes=2,
batch_size=64, inference=False, log=True):
losses = utils.AverageMeter()
model.eval()
errs, accs = [], []
all_outputs, all_targets = [], []
start = time.time()
batch_time = 0
if log:
bar = Bar('>>>', fill='>', max=len(test_loader))
for i, sample in enumerate(test_loader):
inputs = sample['X'].cuda()
# NOTE: PyTorch issue with dim0=1.
if inputs.shape[0] == 1:
continue
targets = sample['Y'].reshape(-1).cuda()
outputs = model(inputs)
# calculate loss
loss = criterion(outputs, targets)
losses.update(loss.item(), inputs.size(0))
# Set outputs to [0, 1].
softmax = nn.Softmax()
outputs = softmax(outputs)
outputs = outputs.data.cpu().numpy()
targets = targets.data.cpu().numpy()
all_outputs.append(outputs)
all_targets.append(targets)
# errs.append(np.mean(np.abs(outputs - targets)))
# accs.append(accuracy_score(
# np.argmax(targets, axis=1),
# np.argmax(outputs, axis=1))
# )
# update summary
if (i + 1) % 100 == 0:
batch_time = time.time() - start
start = time.time()
if log:
bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
.format(batch=i + 1,
size=len(test_loader),
batchtime=batch_time * 10.0,
ttl=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg)
bar.next()
# err = np.mean(np.array(errs))
# acc = np.mean(np.array(accs))
all_outputs = np.concatenate(all_outputs)
all_targets = np.concatenate(all_targets)
pred_values = np.amax(all_outputs, axis=1)
pred_labels = np.argmax(all_outputs, axis=1)
err = np.mean(np.abs(pred_values - all_targets))
acc = np.mean(metrics.accuracy_score(all_targets, pred_labels))
auc = calc_auc(all_targets, pred_values)
prec = metrics.average_precision_score(all_targets, pred_values)
if log:
bar.finish()
print('>>> test error: {} <<<'.format(err))
print('>>> test accuracy: {} <<<'.format(acc))
return losses.avg, err, acc, auc, prec
def extract_tb_sample(test_loader, model, batch_size):
'''
Extract 2 correct and 2 wrong samples.
'''
model.eval()
num_correct = 0
num_wrong = 0
done = False
sample_idxs = [-1] * 4
NUM = 2
for bidx, batch in enumerate(test_loader):
inputs = batch['X'].cuda()
targets = batch['Y'].reshape(-1).cuda()
outputs = model(inputs)
softmax = nn.Softmax()
outputs = softmax(outputs)
outputs = np.argmax(outputs.data.cpu().numpy(), axis=1)
targets = targets.data.cpu().numpy()
for idx in range(outputs.shape[0]):
ttl_idx = bidx * batch_size + idx
if outputs[idx] == targets[idx]:
if num_correct < NUM:
sample_idxs[num_correct] = ttl_idx
num_correct += 1
else:
if num_wrong < NUM:
sample_idxs[NUM + num_wrong] = ttl_idx
num_wrong += 1
if num_correct == NUM and num_wrong == NUM:
done = True
break
if done:
break
if not done:
print(f'>>> WARNING: Found only {num_correct}/2 '
f'correct and {num_wrong}/2 wrong samples')
return sample_idxs
def main(opt):
start_epoch = 0
acc_best = 0.
glob_step = 0
lr_now = opt.lr
# save options
log.save_options(opt, opt.ckpt)
tb_logdir = f'./exp/{opt.name}'
if os.path.exists(tb_logdir):
shutil.rmtree(tb_logdir)
writer = SummaryWriter(log_dir=f'./exp/{opt.name}')
exp_dir_ = dirname(opt.load)
# create model
print(">>> creating model")
# TODO: This is how to avoid weird data reshaping for non-3-channel inputs.
# Have ResNet model take in grayscale rather than RGB
# model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
if opt.arch == 'cnn':
model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=opt.num_classes)
else:
model = LinearModel()
model = model.cuda()
model.apply(weight_init)
print(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
# load ckpt
if opt.load:
print(">>> loading ckpt from '{}'".format(opt.load))
ckpt = torch.load(opt.load)
start_epoch = ckpt['epoch']
acc_best = ckpt['acc']
glob_step = ckpt['step']
lr_now = ckpt['lr']
model.load_state_dict(ckpt['state_dict'])
optimizer.load_state_dict(ckpt['optimizer'])
print(">>> ckpt loaded (epoch: {} | acc: {})".format(start_epoch, acc_best))
if opt.resume:
logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
else:
logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
logger.set_names(['epoch', 'lr', 'loss_train', 'err_train', 'acc_train',
'loss_test', 'err_test', 'acc_test'])
transforms = [
ToTensor(),
]
train_datasets = []
for dataset_name in opt.train_datasets:
train_datasets.append(ClassificationDataset(
name=dataset_name,
num_kpts=opt.num_kpts,
transforms=transforms,
split='train',
arch=opt.arch,
gt=opt.gt))
train_dataset = ConcatDataset(train_datasets)
train_loader = DataLoader(train_dataset, batch_size=opt.train_batch,
shuffle=True, num_workers=opt.job)
split = 'test' if opt.test else 'valid'
test_dataset = ClassificationDataset(
name=opt.test_dataset,
num_kpts=opt.num_kpts,
transforms=transforms,
split=split,
arch=opt.arch,
gt=opt.gt)
test_loader = DataLoader(test_dataset, batch_size=opt.test_batch,
shuffle=False, num_workers=opt.job)
subset_loaders = {}
for subset in test_dataset.create_subsets():
subset_loaders[subset.split] = DataLoader(subset,
batch_size=opt.test_batch, shuffle=False, num_workers=opt.job)
cudnn.benchmark = True
for epoch in range(start_epoch, opt.epochs):
torch.cuda.empty_cache()
print('==========================')
print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
if not opt.test:
glob_step, lr_now, loss_train, err_train, acc_train = \
train(train_loader, model, criterion, optimizer,
num_kpts=opt.num_kpts, num_classes=opt.num_classes,
lr_init=opt.lr, lr_now=lr_now, glob_step=glob_step,
lr_decay=opt.lr_decay, gamma=opt.lr_gamma,
max_norm=opt.max_norm)
loss_test, err_test, acc_test, auc_test, prec_test = \
test(test_loader, model, criterion, num_kpts=opt.num_kpts,
num_classes=opt.num_classes, batch_size=opt.test_batch)
## Test subsets ##
subset_losses = {}
subset_errs = {}
subset_accs = {}
subset_aucs = {}
subset_precs = {}
subset_openpose = {}
subset_missing = {}
subset_grids = {}
if len(subset_loaders) > 0:
bar = Bar('>>>', fill='>', max=len(subset_loaders))
for key_idx, key in enumerate(subset_loaders):
loss_sub, err_sub, acc_sub, auc_sub, prec_sub = test(
subset_loaders[key], model, criterion,
num_kpts=opt.num_kpts, num_classes=opt.num_classes,
batch_size=4, log=False)
subset_losses[key] = loss_sub
subset_errs[key] = err_sub
subset_accs[key] = acc_sub
subset_aucs[key] = auc_sub
subset_precs[key] = prec_sub
sub_dataset = subset_loaders[key].dataset
if sub_dataset.gt_paths is not None:
gt_X = load_gt(sub_dataset.gt_paths)
subset_openpose[key] = mpjpe_2d_openpose(
sub_dataset.X, gt_X)
subset_missing[key] = mean_missing_parts(
sub_dataset.X)
else:
subset_openpose[key] = 0.
subset_missing[key] = 0.
sample_idxs = extract_tb_sample(
subset_loaders[key],
model,
batch_size=opt.test_batch)
sample_X = sub_dataset.X[sample_idxs]
sample_img_paths = [sub_dataset.img_paths[x]
for x in sample_idxs]
if opt.arch == 'cnn':
subset_grids[key] = create_grid(
sample_X,
sample_img_paths)
bar.suffix = f'({key_idx+1}/{len(subset_loaders)}) | {key}'
bar.next()
if len(subset_loaders) > 0:
bar.finish()
###################
if opt.test:
subset_accs['all'] = acc_test
subset_aucs['all'] = auc_test
subset_precs['all'] = prec_test
report_dict = {
'acc': subset_accs,
'auc': subset_aucs,
'prec': subset_precs
}
report_idx = 0
report_path = f'report/{opt.name}-{report_idx}.json'
while os.path.exists(f'report/{opt.name}-{report_idx}.json'):
report_idx += 1
report_path = f'report/{opt.name}-{report_idx}.json'
print(f'>>> Saving report to {report_path}...')
with open(report_path, 'w') as acc_f:
json.dump(report_dict, acc_f, indent=4)
print('>>> Exiting (test mode)...')
break
# update log file
logger.append([epoch + 1, lr_now, loss_train, err_train, acc_train,
loss_test, err_test, acc_test],
['int', 'float', 'float', 'float', 'float', 'float', 'float', 'float'])
# save ckpt
is_best = acc_test > acc_best
acc_best = max(acc_test, acc_best)
if is_best:
log.save_ckpt({'epoch': epoch + 1,
'lr': lr_now,
'step': glob_step,
'acc': acc_best,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
ckpt_path=opt.ckpt,
is_best=True)
else:
log.save_ckpt({'epoch': epoch + 1,
'lr': lr_now,
'step': glob_step,
'acc': acc_best,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
ckpt_path=opt.ckpt,
is_best=False)
writer.add_scalar('Loss/train', loss_train, epoch)
writer.add_scalar('Loss/test', loss_test, epoch)
writer.add_scalar('Error/train', err_train, epoch)
writer.add_scalar('Error/test', err_test, epoch)
writer.add_scalar('Accuracy/train', acc_train, epoch)
writer.add_scalar('Accuracy/test', acc_test, epoch)
for key in subset_losses:
writer.add_scalar(f'Loss/Subsets/{key}',
subset_losses[key], epoch)
writer.add_scalar(f'Error/Subsets/{key}',
subset_errs[key], epoch)
writer.add_scalar(f'Accuracy/Subsets/{key}',
subset_accs[key], epoch)
writer.add_scalar(f'OpenPose/Subsets/{key}',
subset_openpose[key], epoch)
writer.add_scalar(f'Missing/Subsets/{key}',
subset_missing[key], epoch)
if opt.arch == 'cnn':
writer.add_images(f'Subsets/{key}', subset_grids[key],
epoch, dataformats='NHWC')
logger.close()
writer.close()
if __name__ == '__main__':
option = Options().parse()
main(option)
|
StarcoderdataPython
|
8156368
|
from logics.classes.exceptions import NotWellFormed
def separate_arguments(string, comma_separator):
"""
Given a string in forrmat '(x,y,z...)' returns a list with format ['x', 'y', 'z', ...]
Takes into account nested parentheses. For instance, '(1,(2,3),4)' will return ['1', '(2,3)', '4']
WILL NOT ELIMINATE WHITESPACES, if you give it '(1, 2, 3)' it will return ['1', ' 2', ' 3']
"""
# If the string is in incorrect format, raises an error
if string[0] != '(' or string[-1] != ')':
raise NotWellFormed(f"'{string}' missing initial or final parentheses")
# If given a single argument '(x)', returns ['x']
elif ',' not in string:
return [string[1:-1]]
else:
num_parentheses_left = 0
num_parentheses_right = 0
comma_indexes = [0]
argum_list = list()
for x in range(0, len(string)):
if string[x] == '(':
num_parentheses_left += 1
elif string[x] == ')':
num_parentheses_right += 1
elif string[x] == comma_separator:
if num_parentheses_left == num_parentheses_right + 1:
argum_list.append(string[comma_indexes[-1] + 1:x])
comma_indexes.append(x)
argum_list.append(string[comma_indexes[-1] + 1:-1]) # the last argument
return argum_list
def get_main_constant(string, infix_cts, outer_parentheses=True):
"""
Searches for a constant that has 1 more left parenthesis open than right
(or the same amount if outer_parentheses is set to False)
Returns the constant and its index in the string
If it does not find one, returns None
If it finds more than one, raises NotWellFormed
"""
num_parentheses_left = 0
num_parentheses_right = 0
ct_present = False
binary_ct = None
binary_ct_index = None
for x in range(len(string)):
if string[x] == '(':
num_parentheses_left += 1
elif string[x] == ')':
num_parentheses_right += 1
else:
for infix_ct in infix_cts:
if string[x:x+len(infix_ct)] == infix_ct:
if (outer_parentheses and num_parentheses_left == num_parentheses_right + 1) or \
(not outer_parentheses and num_parentheses_left == num_parentheses_right):
# Do this instead of returning immediately to avoid things like (p v q v r)
if ct_present:
raise NotWellFormed(f"{string} contains more than one top-level binary operator")
binary_ct = infix_ct
binary_ct_index = x
ct_present = True
return binary_ct, binary_ct_index
def get_last_opening_parenthesis(string):
"""
Returns index of the the parenthesis that opens the last closing parenthesis
For example, in "∀x ∈ f(x) (Px v (Qx v Rx))" will return the index of the parenthesis before Px
"""
num_parentheses_left = 0
num_parentheses_right = 0
for char_index in range(len(string)-1, -1, -1):
if string[char_index] == '(':
num_parentheses_left += 1
if num_parentheses_left == num_parentheses_right:
return char_index
elif string[char_index] == ')':
num_parentheses_right += 1
def get_closing_parenthesis(string):
"""
Given a string that starts with a parenthesis, e.g. (1+1)v0=0
returns the index of the closing parenthesis of the initial parenthesis
"""
num_parentheses_left = 0
num_parentheses_right = 0
for char_index in range(len(string)):
if string[char_index] == '(':
num_parentheses_left += 1
elif string[char_index] == ')':
num_parentheses_right += 1
if num_parentheses_right == num_parentheses_left:
return char_index
# ----------------------------------------------------------------------------------------------------------------------
# Standard Godel encoding and decoding
def godel_encode(string):
"""Godel encoding function for the language logics.instances.predicate.languages.arithmetic_truth_language
Codes an *unparsed sentence* (the string you would give to the parser). Works as follows:
* Constant ``"0"`` is represented by 0
* Auxiliary symbols begin with 1 (e.g. ``"("`` is 19, ``")"`` is 199)
* Connectives begin with 2 (e.g. ``"~"`` is 29, ``"∧"`` is 299, ``"∨"`` is 2999)
* Quantifiers begin with 3 (e.g. ``"∀"`` is 39, ``"∃"`` is 399)
* Predicates begin with 4 (e.g. ``"="`` is 49, ``"Tr"`` is 49999)
* Variables with 5, Predicate variables with 6 (e.g. ``"x"`` is 51, ``"x1"`` is 519, ``"X"`` is 61)
* Metavariables and sentential constants begin with 7 (e.g. ``"A"`` is 79, ``"λ"`` is 79999)
* Function symbols begin with 8 (e.g. ``"s"`` is 89, ``"+"`` is 899)
Returns
-------
str
The *numeral* representing the Godel number of the sentence
Raises
------
logics.classes.exceptions.NotWellFormed
If it detects a character that is none of the above. Note that whitespace is taken as non-recognized.
Examples
--------
>>> from logics.utils.parsers.parser_utils import godel_encode
>>> godel_encode('0=0')
'0490'
>>> godel_encode('0 = 0')
Traceback (most recent call last):
...
logics.classes.exceptions.NotWellFormed: Non-recognized character in Godel encoding
>>> godel_encode('s(0)+s(0)=s(s(0))')
'891901998998919019949891989190199199'
>>> godel_encode('1+1=2') # Remember that arithmetic has only 0 as individual constant
Traceback (most recent call last):
...
logics.classes.exceptions.NotWellFormed: Non-recognized character 1 in Godel encoding
>>> godel_encode('∀x(~0=0)')
'395119290490199'
>>> godel_encode('forall x (0=0)')
Traceback (most recent call last):
...
logics.classes.exceptions.NotWellFormed: Non-recognized character f in Godel encoding
Notes
-----
You will probably not need to call this function directly, the parser will call it for you, see below.
"""
new_string = ''
current_index = 0
skip_characters = 0
for char in string:
if skip_characters != 0:
skip_characters -= 1
else:
# Constant 0 is represented by 0
if char == '0':
new_string += '0'
# Auxiliary symbols begin with 1
elif char == '(':
new_string += '19'
elif char == ')':
new_string += '199'
elif char == ',':
new_string += '1999'
# Connectives begin with 2
elif char == '~':
new_string += '29'
elif char == '∧':
new_string += '299'
elif char == '∨':
new_string += '2999'
elif char == '→':
new_string += '29999'
elif char == '↔':
new_string += '299999'
# Quantifiers begin with 3
elif char == '∀':
new_string += '39'
elif char == '∃':
new_string += '399'
elif char == '∈':
new_string += '3999'
# Predicates 4
elif char == '=':
new_string += '49'
elif char == '>':
new_string += '499'
elif char == '<':
new_string += '4999'
elif char == 'T' and string[current_index+1 == 'r']:
new_string += '49999'
skip_characters += 1
# Variables 5, Predicate variables 6
elif char == 'x' or char == 'y' or char == 'z' or char == 'X' or char == 'Y' or char == 'Z':
# begin with x 51
if char == 'x':
new_string += '51'
# begin with y 52
elif char == 'y':
new_string += '52'
# begin with z 53
elif char == 'z':
new_string += '53'
# begin with X 61
elif char == 'X':
new_string += '61'
# begin with Y 62
elif char == 'Y':
new_string += '62'
# begin with Z 63
elif char == 'Z':
new_string += '63'
var_number = 0
for char2_index in range(current_index + 1, len(string)):
if string[char2_index].isdigit():
var_number = int(string[current_index+1:char2_index+1])
skip_characters += 1
else:
break
if var_number != 0:
new_string += '9' * var_number
# Metavariables and sentential constants 7
elif char == 'A':
new_string += '79'
elif char == 'B':
new_string += '799'
elif char == 'C':
new_string += '7999'
elif char == 'λ':
new_string += '79999'
# Function symbols 8
elif char == 's':
new_string += '89'
elif char == '+':
new_string += '899'
elif char == '*' and string[current_index + 1] != '*':
new_string += '8999'
elif char == '*' and string[current_index + 1] == '*':
new_string += '89999'
skip_characters += 1
elif char == 'q' and string[current_index:current_index + 5] == 'quote':
new_string += '899999'
skip_characters += 4
else:
raise NotWellFormed(f'Non-recognized character {char} in Godel encoding')
current_index += 1
return new_string
def godel_decode(string):
"""Godel decoding function for the language logics.instances.predicate.languages.arithmetic_truth_language
Reverses the function above.
Examples
--------
>>> from logics.utils.parsers.parser_utils import godel_decode
>>> godel_decode('0490')
'0=0'
>>> godel_decode('891901998998919019949891989190199199')
's(0)+s(0)=s(s(0))'
>>> godel_decode('395119290490199')
'∀x(~0=0)'
"""
new_string = ''
current_index = 0
skip_characters = 0
for char in string:
if skip_characters != 0:
skip_characters -= 1
else:
# Individual constant 0
if char == '0':
new_string += '0'
# Auxiliary symbols 1
elif char == '1':
if string[current_index:current_index + 4] == '1999':
new_string += ','
skip_characters += 3
elif string[current_index:current_index + 3] == '199':
new_string += ')'
skip_characters += 2
elif string[current_index:current_index + 2] == '19':
new_string += '('
skip_characters += 1
else:
raise NotWellFormed(f'Incorrect Godel encoding')
# Connectives 2
elif char == '2':
if string[current_index:current_index + 6] == '299999':
new_string += '↔'
skip_characters += 5
elif string[current_index:current_index + 5] == '29999':
new_string += '→'
skip_characters += 4
elif string[current_index:current_index + 4] == '2999':
new_string += '∨'
skip_characters += 3
elif string[current_index:current_index + 3] == '299':
new_string += '∧'
skip_characters += 2
elif string[current_index:current_index + 2] == '29':
new_string += '~'
skip_characters += 1
else:
raise NotWellFormed(f'Incorrect Godel encoding')
# Quantifiers 3
elif char == '3':
if string[current_index:current_index + 4] == '3999':
new_string += '∈'
skip_characters += 3
elif string[current_index:current_index + 3] == '399':
new_string += '∃'
skip_characters += 2
elif string[current_index:current_index + 2] == '39':
new_string += '∀'
skip_characters += 1
else:
raise NotWellFormed(f'Incorrect Godel encoding')
# Predicates 4
elif char == '4':
if string[current_index:current_index + 5] == '49999':
new_string += 'Tr'
skip_characters += 4
elif string[current_index:current_index + 4] == '4999':
new_string += '<'
skip_characters += 3
elif string[current_index:current_index + 3] == '499':
new_string += '>'
skip_characters += 2
elif string[current_index:current_index + 2] == '49':
new_string += '='
skip_characters += 1
else:
raise NotWellFormed(f'Incorrect Godel encoding')
# Variables 5 and 6
elif char == '5' or char == '6':
if string[current_index:current_index + 2] == '51':
new_string += 'x'
elif string[current_index:current_index + 2] == '52':
new_string += 'y'
elif string[current_index:current_index + 2] == '53':
new_string += 'z'
elif string[current_index:current_index + 2] == '61':
new_string += 'X'
elif string[current_index:current_index + 2] == '62':
new_string += 'Y'
elif string[current_index:current_index + 2] == '63':
new_string += 'Z'
skip_characters += 1
var_number = 0
for char2_index in range(current_index + 2, len(string)):
if string[char2_index] == '9':
var_number += 1
skip_characters += 1
else:
break
if var_number != 0:
new_string += str(var_number)
# Metavariables 7
elif char == '7':
if string[current_index:current_index + 5] == '79999':
new_string += 'λ'
skip_characters += 4
elif string[current_index:current_index + 4] == '7999':
new_string += 'C'
skip_characters += 3
elif string[current_index:current_index + 3] == '799':
new_string += 'B'
skip_characters += 2
elif string[current_index:current_index + 2] == '79':
new_string += 'A'
skip_characters += 1
else:
raise NotWellFormed(f'Incorrect Godel encoding')
# Function symbols 8
elif char == '8':
if string[current_index:current_index + 6] == '899999':
new_string += 'quote'
skip_characters += 5
elif string[current_index:current_index + 5] == '89999':
new_string += '**'
skip_characters += 4
elif string[current_index:current_index + 4] == '8999':
new_string += '*'
skip_characters += 3
elif string[current_index:current_index + 3] == '899':
new_string += '+'
skip_characters += 2
elif string[current_index:current_index + 2] == '89':
new_string += 's'
skip_characters += 1
else:
raise NotWellFormed(f'Incorrect Godel encoding')
else:
raise NotWellFormed(f'Non-recognized character {char} in Godel encoding')
current_index += 1
return new_string
|
StarcoderdataPython
|
3202070
|
import functools
import inspect
import mwbot.cli as cli
import mwbot.cred as cred
import mwbot.util as util
from mwbot.bot import Task
__all__ = []
export = util.append_name_wrapper(__all__)
export(Task)
def require_task(func):
@functools.wraps(func)
def wrapper(cls, *args, **kwargs):
if type(cls) != type:
raise TypeError('Not a class')
if not issubclass(cls, Task):
raise TypeError('Not an instance of Task')
return func(cls, *args, **kwargs)
return wrapper
@export
@require_task
def main(cls):
last_frame = inspect.stack()[2][0]
if last_frame.f_globals['__name__'] == '__main__':
while True:
try:
cls().main()
break
except cred.CredNotFoundError:
try:
if cli.prompt_yn('User not found. Create new user?', default=True):
creds = cli.prompt_new_user()
creds.save()
else:
break
except cli.AbortInput:
print('')
break
return cls
@export
def arg(*args, **kwargs):
@require_task
def wrapper(cls):
if not hasattr(cls, '_arguments'):
cls._arguments = []
cls._arguments.append(cli.ArgWrapper(*args, **kwargs))
return cls
return wrapper
|
StarcoderdataPython
|
11326555
|
<gh_stars>0
"""
Модульные тесты для проверки задания №6 с сайта:
https://pythonworld.ru/osnovy/tasks.html
"""
import unittest
from prime import is_prime
class TestIsPrime(unittest.TestCase):
"""
Набор тестов для проверки поведения функции is_prime().
"""
def test_prime(self):
"""
Тестовый случай с числом, которое является простым.
"""
self.assertEqual(is_prime(107), True)
def test_not_prime(self):
"""
Тестовый случай с числом, которое не является простым.
"""
self.assertEqual(is_prime(32), False)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1743014
|
import logging
import os
import azure.functions as func
from azure.storage.blob import BlobClient, BlobProperties, BlobType, ContentSettings
def main(myblob: func.InputStream):
logging.info(f"Python blob trigger function processed blob \n"
f"Name: {myblob.name}\n"
f"Blob Size: {myblob.length} bytes")
input_account_key = os.environ["input_account_key"]
input_container_name = os.environ["input_container_name"]
output_connection_string = os.environ["output_connection_string"]
output_container_name = os.environ["output_container_name"]
logging.info(
f"Instantiate client for {myblob.name} in {input_container_name}")
in_client = BlobClient.from_blob_url(myblob.uri, input_account_key)
logging.info(
f"Get Blob Properties for {myblob.name} from {input_container_name}")
properties = in_client.get_blob_properties()
name = properties.name
content_type = properties.content_settings.content_type
logging.info(
f"Instantiate client for {myblob.name} in {output_container_name}")
out_client = BlobClient.from_connection_string(
output_connection_string,
output_container_name,
name)
logging.info(f"Copy {name}\n"
f"from {input_container_name} to {output_container_name}\n"
f"with content-type {content_type}")
out_client.upload_blob(
data=myblob,
blob_type=BlobType.BlockBlob,
length=myblob.length,
overwrite=True,
content_settings=ContentSettings(content_type=content_type))
logging.info(f"Upload complete for {output_container_name}/{name}")
|
StarcoderdataPython
|
233318
|
<filename>tests/core/helpers/test_helpers_iam.py
# -*- coding: utf-8 -*-
import pytest
from cottonformation.core import helpers
from cottonformation.tests.helpers import jprint
class TestAssumeRolePolicyBuilder:
def test_build(self):
assert helpers.iam.AssumeRolePolicyBuilder(
helpers.iam.ServicePrincipal.ec2(),
helpers.iam.ServicePrincipal.awslambda(),
helpers.iam.AccountPrincipal("111122223333", external_id="ext", mfa_auth=True),
).build() == {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::111122223333:root"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "ext"
},
"Bool": {
"aws:MultiFactorAuthPresent": "true"
}
}
}
]
}
class TestAwsManagedPolicy:
def test(self):
_ = helpers.iam.AwsManagedPolicy.AmazonEC2FullAccess
_ = helpers.iam.AwsManagedPolicy.AWSLambdaBasicExecutionRole
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
|
StarcoderdataPython
|
8053788
|
<reponame>ymarkovitch/ipp-crypto
#===============================================================================
# Copyright 2017-2019 Intel Corporation
# All Rights Reserved.
#
# If this software was obtained under the Intel Simplified Software License,
# the following terms apply:
#
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors. The
# Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed or disclosed
# in any way without Intel's prior express written permission. No license under
# any patent, copyright or other intellectual property rights in the Material
# is granted to or conferred upon you, either expressly, by implication,
# inducement, estoppel or otherwise. Any license under such intellectual
# property rights must be express and approved by Intel in writing.
#
# Unless otherwise agreed by Intel in writing, you may not remove or alter this
# notice or any other notice embedded in Materials by Intel or Intel's
# suppliers or licensors in any way.
#
#
# If this software was obtained under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#
# Intel(R) Integrated Performance Primitives (Intel(R) IPP) Cryptography
#
import sys
import os
Header = sys.argv[1] ## Intel(R) IPP Crypto dispatcher will be generated for fucntions in Header
OutDir = sys.argv[2] ## Output folder for generated files
headerID= False ## Header ID define to avoid multiple include like: #if !defined( __IPPCP_H__ )
from gen_disp_common import readNextFunction
HDR= open( Header, 'r' )
h= HDR.readlines()
HDR.close()
isFunctionFound = True
curLine = 0
FunName = ""
filename = "jmp_dynamic_lib"
DISP= open( os.sep.join([OutDir, filename + ".asm"]), 'w' )
DISP.write("""
option casemap :none
.code
IPPAPI MACRO name:req
extrn d&name&:qword
name proc public
jmp d&name&
name endp
ENDM
""")
while (isFunctionFound == True):
result = readNextFunction(h, curLine, headerID)
curLine = result['curLine']
FunName = result['FunName']
isFunctionFound = result['success']
if (isFunctionFound == True):
DISP.write("IPPAPI {}\n".format(FunName))
DISP.write("end\n")
DISP.close()
|
StarcoderdataPython
|
3366710
|
<gh_stars>1-10
import Utils
from Utils import logCall
import wx
import six
import wx.lib.intctrl
import Model
from HighPrecisionTimeEdit import HighPrecisionTimeEdit
from Undo import undo
import sys
import random
import datetime
#------------------------------------------------------------------------------------------------
class CorrectNumberDialog( wx.Dialog ):
def __init__( self, parent, entry, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, "Correct Number",
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.entry = entry
bs = wx.GridBagSizer(vgap=5, hgap=5)
self.numEdit = wx.lib.intctrl.IntCtrl( self, size=(64,-1), style=wx.TE_RIGHT, value=int(self.entry.num), allow_none=False, min=1, max=9999 )
border = 4
bs.Add( wx.StaticText( self, label = u'{}: {} {}: {}'.format(
_('Rider Lap'), self.entry.lap,
_('Race Time'), Utils.formatTime(self.entry.t, True)
)),
pos=(0,0), span=(1,2), border = border, flag=wx.GROW|wx.ALL )
bs.Add( wx.StaticText( self, label = u'{}:'.format(_("Rider"))), pos=(1,0), span=(1,1), border = border, flag=wx.LEFT|wx.TOP|wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL )
bs.Add( self.numEdit, pos=(1,1), span=(1,2), border = border, flag=wx.RIGHT|wx.TOP|wx.ALIGN_LEFT )
choices = [u'{}:'.format(_("Race Time"))]
race = Model.race
if race and race.startTime:
choices.append( _("24 hr Clock Time:") )
self.timeChoice = wx.Choice( self, -1, choices = choices )
self.timeChoiceLastSelection = 0
self.timeChoice.SetSelection( self.timeChoiceLastSelection )
self.timeChoice.Bind( wx.EVT_CHOICE, self.doTimeChoice, self.timeChoice )
self.timeMsEdit = HighPrecisionTimeEdit( self, seconds=entry.t, size=(120, -1) )
bs.Add( self.timeChoice, pos=(2,0), span=(1,1), border = border, flag=wx.ALIGN_RIGHT|wx.LEFT|wx.BOTTOM|wx.ALIGN_CENTRE_VERTICAL )
bs.Add( self.timeMsEdit, pos=(2,1), span=(1,1), border = border, flag=wx.RIGHT|wx.BOTTOM|wx.ALIGN_LEFT )
bs.Add( wx.StaticText( self, label = u'{}:'.format(_("Lap Note"))), pos=(3,0), span=(1,1), border = border, flag=wx.LEFT|wx.TOP|wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL )
self.noteEdit = wx.TextCtrl( self, size=(250,-1) )
if race:
self.noteEdit.SetValue( getattr(race, 'lapNote', {}).get( (self.entry.num, self.entry.lap), u'' ) )
bs.Add( self.noteEdit, pos=(3,1), span=(1,2), border = border, flag=wx.RIGHT|wx.TOP|wx.ALIGN_LEFT )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
border = 4
bs.Add( self.okBtn, pos=(4, 0), span=(1,1), border = border, flag=wx.ALL )
self.okBtn.SetDefault()
bs.Add( self.cancelBtn, pos=(4, 1), span=(1,1), border = border, flag=wx.ALL )
self.SetSizerAndFit(bs)
bs.Fit( self )
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def doTimeChoice( self, event ):
iSelection = event.GetSelection()
if iSelection == self.timeChoiceLastSelection:
return
if not (Model.race and Model.race.startTime):
return
dtStart = Model.race.startTime
t = self.timeMsEdit.GetSeconds()
if iSelection == 0:
# Clock time to race time.
dtInput = datetime.datetime(dtStart.year, dtStart.month, dtStart.day) + datetime.timedelta(seconds = t)
t = (dtInput - dtStart).total_seconds()
else:
# Race time to clock time.
dtInput = dtStart + datetime.timedelta( seconds = t )
t = (dtInput - datetime.datetime(dtStart.year, dtStart.month, dtStart.day)).total_seconds()
self.timeMsEdit.SetSeconds( t )
self.timeChoiceLastSelection = iSelection
def onOK( self, event ):
num = self.numEdit.GetValue()
t = self.timeMsEdit.GetSeconds()
if self.timeChoice.GetSelection() == 1 and Model.race and Model.race.startTime:
dtStart = Model.race.startTime
dtInput = datetime.datetime(dtStart.year, dtStart.month, dtStart.day) + datetime.timedelta(seconds = t)
if dtInput < dtStart:
Utils.MessageOK( self, u'\n\n'.join( [_('Cannot Enter Clock Time Before Race Start.'), _('(reminder: clock time is in 24-hour format)')] ),
_('Time Entry Error'), iconMask = wx.ICON_ERROR )
return
t = (dtInput - dtStart).total_seconds()
race = Model.race
offset = race.getStartOffset( num )
if t <= offset:
Utils.MessageOK( self, u'{}: {}\n\n{}\n{}'.format(
_('Cannot enter a time that is before the Category Start Offset'), Utils.formatTime(offset, highPrecision=True),
_('All times earlier than the Start Offset are ignored.'),
_('Please enter a time after the Start Offset.')
), _('Time Entry Error'), iconMask = wx.ICON_ERROR
)
return
race.lapNote = getattr( race, 'lapNote', {} )
if self.noteEdit.GetValue() != race.lapNote.get( (self.entry.num, self.entry.lap), u'' ) or self.entry.num != num or self.entry.t != t:
undo.pushState()
note = self.noteEdit.GetValue().strip()
if not note:
race.lapNote.pop( (self.entry.num, self.entry.lap), None )
else:
race.lapNote[(self.entry.num, self.entry.lap)] = note
if self.entry.num != num or self.entry.t != t:
rider = race.getRider( num )
if self.entry.lap != 0:
race.numTimeInfo.change( self.entry.num, self.entry.t, t )
race.deleteTime( self.entry.num, self.entry.t )
race.addTime( num, t + ((rider.firstTime or 0.0) if race.isTimeTrial else 0.0) )
else:
race.numTimeInfo.change( self.entry.num, rider.firstTime, t )
rider.firstTime = t
race.setChanged()
Utils.refresh()
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.EndModal( wx.ID_CANCEL )
#------------------------------------------------------------------------------------------------
class ShiftNumberDialog( wx.Dialog ):
def __init__( self, parent, entry, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, "Shift Time",
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.entry = entry
bs = wx.GridBagSizer(vgap=5, hgap=5)
self.numEdit = wx.lib.intctrl.IntCtrl( self, size=(40, -1),
style=wx.TE_RIGHT,
value=int(self.entry.num),
allow_none=False, min=1, max=9999 )
shiftOptions = [_('Earlier'), _('Later')]
self.shiftBox = wx.RadioBox( self, wx.ID_ANY,
_('Shift Direction'),
wx.DefaultPosition, wx.DefaultSize,
shiftOptions, 2, wx.RA_SPECIFY_COLS )
self.Bind(wx.EVT_RADIOBOX, self.updateNewTime, self.shiftBox)
self.timeMsEdit = HighPrecisionTimeEdit( self )
self.timeMsEdit.Bind( wx.EVT_TEXT, self.updateNewTime )
self.newTime = wx.StaticText( self, label = u"00:00:00")
border = 8
bs.Add( wx.StaticText( self, label = u'{}: {} {}: {}'.format(
_('Rider Lap'), self.entry.lap,
_('Race Time'), Utils.formatTime(self.entry.t,True)) ),
pos=(0,0), span=(1,2), border = border, flag=wx.GROW|wx.ALL )
bs.Add( wx.StaticText( self, label = u'{}:'.format(_("Rider"))), pos=(1,0), span=(1,1), border = border, flag=wx.LEFT|wx.TOP|wx.ALIGN_RIGHT )
bs.Add( self.numEdit, pos=(1,1), span=(1,2), border = border, flag=wx.GROW|wx.RIGHT|wx.TOP )
bs.Add( self.shiftBox, pos=(2, 0), span=(1, 2), border = border, flag=wx.GROW|wx.LEFT|wx.RIGHT|wx.BOTTOM )
bs.Add( wx.StaticText( self, label = u'{}:'.format(_("Shift Time"))), pos=(3,0), span=(1,1), border = border, flag=wx.ALIGN_RIGHT|wx.LEFT|wx.RIGHT|wx.BOTTOM )
bs.Add( self.timeMsEdit, pos=(3,1), span=(1,1), border = border, flag=wx.GROW|wx.LEFT|wx.RIGHT )
bs.Add( self.newTime, pos=(4,0), span=(1,2), border = border, flag=wx.GROW|wx.LEFT|wx.RIGHT )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
bs.Add( self.okBtn, pos=(5, 0), span=(1,1), border = border, flag=wx.ALL )
self.okBtn.SetDefault()
bs.Add( self.cancelBtn, pos=(5, 1), span=(1,1), border = border, flag=wx.ALL )
self.SetSizerAndFit(bs)
bs.Fit( self )
wx.CallAfter( self.updateNewTime )
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def getNewTime( self ):
tAdjust = self.timeMsEdit.GetSeconds() * (-1 if self.shiftBox.GetSelection() == 0 else 1)
return self.entry.t + tAdjust
def onOK( self, event ):
num = self.numEdit.GetValue()
t = self.getNewTime()
if self.entry.num != num or self.entry.t != t:
undo.pushState()
with Model.LockRace() as race:
rider = race.getRider( num )
if (self.entry.lap or 0) != 0:
race.numTimeInfo.change( self.entry.num, self.entry.t, t )
race.deleteTime( self.entry.num, self.entry.t )
race.addTime( num, t + ((rider.firstTime or 0.0) if race.isTimeTrial else 0.0) )
else:
race.numTimeInfo.change( self.entry.num, rider.firstTime, t )
rider.firstTime = t
race.setChanged()
Utils.refresh()
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.EndModal( wx.ID_CANCEL )
def updateNewTime( self, event = None ):
s = u'{}: {} {}: {}'.format(_('Was'), Utils.formatTime(self.entry.t,True), _('Now'), Utils.formatTime(self.getNewTime(),True) )
self.newTime.SetLabel( s )
#------------------------------------------------------------------------------------------------
class InsertNumberDialog( wx.Dialog ):
def __init__( self, parent, entry, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, "Insert Number",
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.entry = entry
bs = wx.GridBagSizer(vgap=5, hgap=5)
self.numEdit = wx.lib.intctrl.IntCtrl( self, style=wx.TE_RIGHT, value=int(self.entry.num), allow_none=False, min=1, max=9999 )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
border = 8
bs.Add( wx.StaticText( self, label = u'{}: {} {}: {}'.format(
_('Rider Lap'), self.entry.lap,
_('Race Time'), Utils.formatTime(self.entry.t,True)) ),
pos=(0,0), span=(1,2), border = border, flag=wx.GROW|wx.ALL )
bs.Add( wx.StaticText( self, label = u'{}:'.format(_('Original')) ),
pos=(1,0), span=(1,1), border = border, flag=wx.TOP|wx.BOTTOM|wx.LEFT|wx.ALIGN_RIGHT )
bs.Add( wx.StaticText( self, label = u'{}'.format(self.entry.num) ),
pos=(1,1), span=(1,1), border = border, flag=wx.TOP|wx.BOTTOM|wx.RIGHT|wx.ALIGN_BOTTOM )
shiftOptions = [_('Before Entry'), _('After Entry')]
self.beforeAfterBox = wx.RadioBox( self, wx.ID_ANY, _('Insert'), wx.DefaultPosition, wx.DefaultSize, shiftOptions, 2, wx.RA_SPECIFY_COLS )
bs.Add( self.beforeAfterBox, pos=(2,0), span=(1,2), border = border, flag=wx.TOP|wx.LEFT|wx.ALIGN_RIGHT )
bs.Add( wx.StaticText( self, label = u'{}'.format(_('Number')) ),
pos=(3,0), span=(1,1), border = border, flag=wx.TOP|wx.LEFT|wx.ALIGN_RIGHT )
bs.Add( self.numEdit,
pos=(3,1), span=(1,1), border = border, flag=wx.TOP|wx.RIGHT|wx.ALIGN_BOTTOM )
bs.Add( self.okBtn, pos=(4, 0), span=(1,1), border = border, flag=wx.ALL )
self.okBtn.SetDefault()
bs.Add( self.cancelBtn, pos=(4, 1), span=(1,1), border = border, flag=wx.ALL|wx.ALIGN_RIGHT )
self.SetSizerAndFit(bs)
bs.Fit( self )
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def onOK( self, event ):
num = self.numEdit.GetValue()
if not num or num == self.entry.num:
return
tAdjust = 0.0001 + random.random() / 10000.0 # Add some randomness so that all inserted times will be unique.
if self.beforeAfterBox.GetSelection() == 0:
tAdjust = -tAdjust
tInsert = self.entry.t + tAdjust
undo.pushState()
with Model.LockRace() as race:
rider = race.getRider( num )
race.numTimeInfo.add( num, tInsert )
race.addTime( num, tInsert + ((rider.firstTime or 0.0) if race.isTimeTrial else 0.0) )
Utils.refresh()
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.EndModal( wx.ID_CANCEL )
#------------------------------------------------------------------------------------------------
class SplitNumberDialog( wx.Dialog ):
def __init__( self, parent, entry, id = wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, "Split Number",
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
self.entry = entry
bs = wx.GridBagSizer(vgap=5, hgap=5)
self.numEdit1 = wx.lib.intctrl.IntCtrl( self, style=wx.TE_RIGHT, value=int(self.entry.num), allow_none=False, min=1, max=9999 )
self.numEdit2 = wx.lib.intctrl.IntCtrl( self, style=wx.TE_RIGHT, value=int(self.entry.num), allow_none=False, min=1, max=9999 )
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
border = 8
bs.Add( wx.StaticText( self, label = u'{}: {} {}: {}'.format(
_('Rider Lap'), self.entry.lap,
_('Race Time'), Utils.formatTime(self.entry.t,True)) ),
pos=(0,0), span=(1,2), border = border, flag=wx.GROW|wx.ALL )
bs.Add( wx.StaticText( self, label = _('Num1:') ),
pos=(1,0), span=(1,1), border = border, flag=wx.TOP|wx.BOTTOM|wx.LEFT|wx.ALIGN_RIGHT )
bs.Add( self.numEdit1,
pos=(1,1), span=(1,1), border = border, flag=wx.TOP|wx.BOTTOM|wx.RIGHT|wx.ALIGN_BOTTOM )
bs.Add( wx.StaticText( self, label =_('Num2:') ),
pos=(2,0), span=(1,1), border = border, flag=wx.TOP|wx.BOTTOM|wx.LEFT|wx.ALIGN_RIGHT )
bs.Add( self.numEdit2,
pos=(2,1), span=(1,1), border = border, flag=wx.TOP|wx.BOTTOM|wx.RIGHT|wx.ALIGN_BOTTOM )
bs.Add( self.okBtn, pos=(3, 0), span=(1,1), border = border, flag=wx.ALL )
self.okBtn.SetDefault()
bs.Add( self.cancelBtn, pos=(3, 1), span=(1,1), border = border, flag=wx.ALL|wx.ALIGN_RIGHT )
self.SetSizerAndFit(bs)
bs.Fit( self )
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def onOK( self, event ):
num1 = self.numEdit1.GetValue()
num2 = self.numEdit2.GetValue()
if not num1 or not num2 or num1 == num2:
return
t1 = self.entry.t
t2 = self.entry.t + 0.0001 * random.random()
undo.pushState()
with Model.LockRace() as race:
rider = race.getRider( self.entry.num )
race.numTimeInfo.delete( self.entry.num, self.entry.t )
race.numTimeInfo.add( num1, t1 )
race.numTimeInfo.add( num2, t2 )
race.deleteTime( self.entry.num, self.entry.t )
race.addTime( num1, t1 + ((rider.firstTime or 0.0) if race.isTimeTrial else 0.0) )
race.addTime( num2, t2 + ((rider.firstTime or 0.0) if race.isTimeTrial else 0.0) )
Utils.refresh()
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.EndModal( wx.ID_CANCEL )
#------------------------------------------------------------------------------------------------
@logCall
def CorrectNumber( parent, entry ):
dlg = CorrectNumberDialog( parent, entry )
dlg.ShowModal()
dlg.Destroy()
@logCall
def ShiftNumber( parent, entry ):
dlg = ShiftNumberDialog( parent, entry )
dlg.ShowModal()
dlg.Destroy()
@logCall
def InsertNumber( parent, entry ):
dlg = InsertNumberDialog( parent, entry )
dlg.ShowModal()
dlg.Destroy()
@logCall
def SplitNumber( parent, entry ):
if (entry.lap or 0) == 0:
return
dlg = SplitNumberDialog( parent, entry )
dlg.ShowModal()
dlg.Destroy()
@logCall
def DeleteEntry( parent, entry ):
if (entry.lap or 0) == 0:
return
race = Model.race
raceStartTimeOfDay = Utils.StrToSeconds(race.startTime.strftime('%H:%M:%S.%f')) if race and race.startTime else None
dlg = wx.MessageDialog(parent,
u'{}: {}\n{}: {}\n{}: {}\n{}: {}\n\n{}?'.format(
_('Bib'), entry.num,
_('Lap'), entry.lap,
_('Race Time'), Utils.formatTime(entry.t, True),
_('Clock Time'), Utils.formatTime(entry.t + raceStartTimeOfDay, True) if raceStartTimeOfDay is not None else u'',
_('Confirm Delete')), _('Delete Entry'),
wx.OK | wx.CANCEL | wx.ICON_QUESTION )
# dlg.CentreOnParent(wx.BOTH)
if dlg.ShowModal() == wx.ID_OK:
undo.pushState()
with Model.LockRace() as race:
if race:
race.numTimeInfo.delete( entry.num, entry.t )
race.deleteTime( entry.num, entry.t )
Utils.refresh()
dlg.Destroy()
@logCall
def SwapEntry( a, b ):
race = Model.race
if not race:
return
riderA = race.getRider( a.num )
riderB = race.getRider( b.num )
# Add some numeric noise if the times are equal.
if a.t == b.t:
rAdjust = random.random() / 100000.0
if a.num < b.num:
a_tNew, b_tNew = a.t, b.t + rAdjust
else:
a_tNew, b_tNew = a.t + rAdjust, b.t
else:
a_tNew, b_tNew = a.t, b.t
race.numTimeInfo.change( a.num, a.t, b_tNew, Model.NumTimeInfo.Swap )
race.numTimeInfo.change( b.num, b.t, a_tNew, Model.NumTimeInfo.Swap )
race.deleteTime( a.num, a.t )
race.deleteTime( b.num, b.t )
race.addTime( a.num, b_tNew + ((riderB.firstTime or 0.0) if race.isTimeTrial else 0.0) )
race.addTime( b.num, a_tNew + ((riderA.firstTime or 0.0) if race.isTimeTrial else 0.0) )
class StatusChangeDialog( wx.Dialog ):
def __init__( self, parent, message, title, t=None, externalData=None, id=wx.ID_ANY ):
wx.Dialog.__init__( self, parent, id, title,
style=wx.DEFAULT_DIALOG_STYLE|wx.TAB_TRAVERSAL )
font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.message = wx.StaticText( self, label=message )
self.message.SetFont( font )
if externalData is not None:
self.externalData = wx.StaticText( self, label=externalData )
self.externalData.SetFont( font )
else:
self.externalData = None
if t is not None:
self.entryTime = wx.CheckBox( self, label=u'{}: {}'.format(_('and Enter Last Lap Time at'), Utils.formatTime(t)) )
self.entryTime.SetValue( True )
self.entryTime.SetFont( font )
else:
self.entryTime = None
self.okBtn = wx.Button( self, wx.ID_OK )
self.Bind( wx.EVT_BUTTON, self.onOK, self.okBtn )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL )
self.Bind( wx.EVT_BUTTON, self.onCancel, self.cancelBtn )
border = 16
vs = wx.BoxSizer( wx.VERTICAL )
vs.Add( self.message, flag=wx.ALL, border=border )
if self.externalData:
vs.Add( self.externalData, flag=wx.RIGHT|wx.LEFT|wx.BOTTOM, border=border )
if self.entryTime:
vs.Add( self.entryTime, flag=wx.RIGHT|wx.LEFT|wx.BOTTOM, border=border )
hs = wx.BoxSizer( wx.HORIZONTAL )
hs.Add( self.okBtn, flag=wx.ALL, border = border )
self.okBtn.SetDefault()
hs.AddStretchSpacer()
hs.Add( self.cancelBtn, flag=wx.ALL, border = border )
vs.Add( hs, flag=wx.EXPAND )
self.SetSizerAndFit( vs )
self.CentreOnParent(wx.BOTH)
wx.CallAfter( self.SetFocus )
def getSetEntryTime( self ):
return self.entryTime and self.entryTime.IsChecked()
def onOK( self, event ):
self.EndModal( wx.ID_OK )
def onCancel( self, event ):
self.EndModal( wx.ID_CANCEL )
def DoStatusChange( parent, num, message, title, newStatus, lapTime=None ):
if num is None:
return False
race = Model.race
externalData = []
try:
excelLink = race.excelLink
externalInfo = excelLink.read()
for f in ['LastName', 'FirstName', 'Team']:
try:
externalData.append( six.text_type(externalInfo[num][f] ) )
if f == 'Team':
externalData[-1] = u'({})'.format(externalData[-1])
except KeyError:
pass
if len(externalData) == 3: # Format the team name slightly differently.
externalData = u'{}: {}'.format( six.text_type(num), u', '.join(externalData[:-1]) ) + u' ' + externalData[-1]
else:
externalData = u'{}: {}'.format( six.text_type(num), u', '.join(externalData) ) if externalData else None
except:
externalData = None
d = StatusChangeDialog(parent, message=message.format(num), title=title, externalData=externalData, t=lapTime)
ret = d.ShowModal()
lapTime = lapTime if d.getSetEntryTime() else None
d.Destroy()
if ret != wx.ID_OK:
return False
undo.pushState()
with Model.LockRace() as race:
if not race:
return False
if lapTime:
race.addTime( num, lapTime )
rider = race.getRider( num )
rider.setStatus( newStatus )
race.setChanged()
Utils.refresh()
Utils.refreshForecastHistory()
return True
def getActionMessage( actionName ):
return actionName + u' {}?'
@logCall
def DoDNF( parent, num, lapTime=None ):
return DoStatusChange( parent, num, getActionMessage(_('DNF')), _('Confirm Did Not FINISH'), Model.Rider.DNF, lapTime )
@logCall
def DoPull( parent, num, lapTime=None ):
return DoStatusChange( parent, num, getActionMessage(_('Pull')), _('Confirm PULL Rider'), Model.Rider.Pulled, lapTime)
@logCall
def DoDNS( parent, num, lapTime=None ):
return DoStatusChange( parent, num, getActionMessage(_('DNS')), _('Confirm Did Not START'), Model.Rider.DNS )
@logCall
def DoDQ( parent, num, lapTime=None ):
return DoStatusChange( parent, num, getActionMessage(_('DQ')), _('Confirm Disqualify'), Model.Rider.DQ )
@logCall
def AddLapSplits( num, lap, times, splits ):
undo.pushState()
with Model.LockRace() as race:
rider = race.riders[num]
try:
tLeft = times[lap-1]
tRight = times[lap]
# Split the first lap time to the same ratio as the distances.
category = race.getCategory( num )
if ( lap == 1 and
category is not None and
category.distanceType == category.DistanceByLap and
category.distance and category.firstLapDistance and
category.distance != category.firstLapDistance
):
flr = float(category.firstLapDistance) / float(category.distance)
splitTime = (tRight - tLeft) / (flr + (splits-1))
firstLapSplitTime = splitTime * flr
else:
splitTime = firstLapSplitTime = (tRight - tLeft) / float(splits)
newTime = tLeft
for i in range( 1, splits ):
newTime += (firstLapSplitTime if i == 1 else splitTime)
race.numTimeInfo.add( num, newTime, Model.NumTimeInfo.Split )
race.addTime( num, newTime + ((rider.firstTime or 0.0) if race.isTimeTrial else 0.0) )
return True
except (TypeError, KeyError, ValueError, IndexError) as e:
Utils.logException( e, sys.exc_info() )
return False
if __name__ == '__main__':
app = wx.App( False )
frame = wx.Frame( None )
d = CorrectNumberDialog( frame, Model.Entry( 110, 3, 60*4+7, False ) )
d.Show()
app.MainLoop()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.