ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a2edd21b8b9d28d0c8c69efd0ead187dc60aed9 | # -*- coding: utf-8 -*-
from sqlalchemy import Column, ForeignKey, Integer, Text
from pokr.database import Base
class MeetingAttendee(Base):
__tablename__ = 'meeting_attendee'
id = Column(Integer, autoincrement=True, primary_key=True)
meeting_id = Column(ForeignKey('meeting.id'), nullable=False, index=True)
person_id = Column(ForeignKey('person.id'), nullable=False, index=True)
|
py | 1a2edd30755631a304b9ffae0330b4db1f1274e8 | import numpy as np
from .._helpers import _writer_map, read, reader_map, write
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--output-format",
"-o",
type=str,
choices=sorted(list(_writer_map.keys())),
help="output file format",
default=None,
)
parser.add_argument(
"--ascii",
"-a",
action="store_true",
help="write in ASCII format variant (where applicable, default: binary)",
)
parser.add_argument("outfile", type=str, help="mesh file to be written to")
parser.add_argument(
"--float-format",
"-f",
type=str,
help="float format used in output ASCII files (default: .16e)",
)
parser.add_argument(
"--sets-to-int-data",
"-s",
action="store_true",
help="if possible, convert sets to integer data (useful if the output type does not support sets)",
)
parser.add_argument(
"--int-data-to-sets",
"-d",
action="store_true",
help="if possible, convert integer data to sets (useful if the output type does not support integer data)",
)
def convert(args):
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
# Some converters (like VTK) require `points` to be contiguous.
mesh.points = np.ascontiguousarray(mesh.points)
if args.sets_to_int_data:
mesh.point_sets_to_data()
mesh.cell_sets_to_data()
if args.int_data_to_sets:
for key in mesh.point_data:
mesh.point_data_to_sets(key)
for key in mesh.cell_data:
mesh.cell_data_to_sets(key)
# write it out
kwargs = {"file_format": args.output_format}
if args.float_format is not None:
kwargs["float_fmt"] = args.float_format
if args.ascii:
kwargs["binary"] = False
write(args.outfile, mesh, **kwargs)
|
py | 1a2ede8ccc24684c26563373e68bc1984c47623a | from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict, cross_val_score
import matplotlib.pyplot as plt
import pandas as pd
# load the data set we'll be working with. In this case the Boston housing
boston = load_boston()
boston_df = pd.DataFrame(data=boston.data, columns=boston.feature_names) # get it into a pandas data frame
y = pd.DataFrame(data=boston.data) # get it into a pandas data frame
X = boston_df[['LSTAT', 'AGE']]
# boston_df.describe() # take a look at the data
boston = None # help garbage collector
# Task 2) make a linear regression model with LSTAT+AGE to predict median value
lr1 = LinearRegression() # create the object
lr1.fit(X, y)
# cross_val_predict returns an array of the same size as `y` where each entry
# is a prediction obtained by cross validation:
predicted = cross_val_predict(lr1, X, y, cv=10)
scores = cross_val_score(lr1, X, y, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
fig, ax = plt.subplots()
ax.scatter(y, predicted, edgecolors=(0, 0, 0)) # predicted values
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4) # regression line
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
# uncomment line below to show graph
plt.show()
|
py | 1a2edeb41300d9909b9d2b82d2967da99d2b7ad2 | ###########################################################################
### Estimation of Slope along the boundary using the buffer distance ###
### Author : Lakshmi E ###
### Last Edit: 13-April-2020 ###
###########################################################################
import arcpy
import os,glob
import numpy as np
from arcpy.sa import *
from arcpy import env
import dbf
import csv
# work in the current directory
env.workspace=(input("give the current directory:")) #'C:\Users\Laks\Desktop\REGSim module'
dirpath = os.getcwd()
#assign the buffer distance
buffer_dist = input('Buffer distance between the study area (meters):')
num_pts = input('no. of points considered across the boundary:')
# Load required toolboxes
arcpy.ImportToolbox(".\Module\CreatePointsLines.tbx")
arcpy.CheckOutExtension("spatial")
# create buffer in and out
def buffer(bound):
print('Creating buffer inside and outside the boundary area...')
arcpy.Buffer_analysis(bound, 'buffin{0}.shp'.format(buffer_dist),'-{0}'.format(buffer_dist),'FULL','ROUND','NONE','')
arcpy.Buffer_analysis(bound, 'bufout{0}.shp'.format(buffer_dist),'{0}'.format(buffer_dist),'FULL','ROUND','NONE','')
bound='bound_hmda.shp'
buffer(bound)
# create points to the feature class
print('Converting polygon to line feature class...')
def ext_pts(bound,boundin,boundout,bufin,bufout):
list=[bound,boundin,boundout,bufin,bufout]
for i in list:
print(i)
arcpy.FeatureToLine_management(i,'{0}_line.shp'.format(i[:-4]),'','ATTRIBUTES')
arcpy.AddField_management('{0}_line.shp'.format(i[:-4]),'Length','FLOAT','','','','','NULLABLE','NON_REQUIRED',"")
arcpy.CalculateField_management('{0}_line.shp'.format(i[:-4]), "Length", "!SHAPE.Length!", "PYTHON", "")
length = arcpy.da.SearchCursor('{0}_line.shp'.format(i[:-4]), "Length").next()[0]
dist_intv = length/num_pts #point_num
arcpy.CreatePointsLines_CreatePointsLines('{0}_line.shp'.format(i[:-4]),'INTERVAL BY DISTANCE', 'BEGINNING','NO','',dist_intv,'NO','{0}_pts.shp'.format(i[:-4]))
print('Created points to the feature class...')
bound = 'bound_hmda.shp'
boundin = 'bndin_hmda.shp'
boundout = 'bndou_hmda.shp'
bufin = 'buffin{0}.shp'.format(buffer_dist)
bufout = 'bufout{0}.shp'.format(buffer_dist)
ext_pts(bound,boundin,boundout,bufin,bufout)
# extract elevation value to the points
print('Extracting the elevation data from the raster to the point featureclass...')
def pts_value(raster,list):
for i in raster:
print(i)
ExtractValuesToPoints('bound_hmda_pts.shp','{0}'.format(i),'bound{1}_{0}_extrpts{2}_{3}.shp'.format(i[9:12],buffer_dist,num_pts,i[2:4]),'INTERPOLATE','VALUE_ONLY')
arcpy.AddField_management('bound{1}_{0}_extrpts{2}_{3}.shp'.format(i[9:12],buffer_dist,num_pts,i[2:4]),"Slope","DOUBLE","", "", "", "", "NULLABLE", "NON_REQUIRED", "")
for j,z in zip(list,list_bound):
print(j)
print(z)
ExtractValuesToPoints('{0}_pts.shp'.format(j[:-4]),'{0}'.format(i),'{0}_{1}_extrpts.shp'.format(j[0:5],i[9:12]),'INTERPOLATE','VALUE_ONLY')
ExtractValuesToPoints('{0}_pts.shp'.format(z[:-4]),'{0}'.format(i),'{0}_{1}_extrpts.shp'.format(z[0:5],i[9:12]),'INTERPOLATE','VALUE_ONLY')
for k,l in zip(list_bound,list):
arcpy.Near_analysis('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]),'{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]),'','NO_LOCATION','NO_ANGLE')
arcpy.JoinField_management('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]),'NEAR_FID','{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]),"FID","#")
arcpy.AddField_management('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]), "Slope", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management('{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]), "Slope", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management('bndou_{0}_extrpts.shp'.format(i[9:12]), "Slope", "(!RASTERVALU!- !RASTERVA_1!) / !NEAR_DIST!", "PYTHON_9.3", "")
arcpy.CalculateField_management('bndin_{0}_extrpts.shp'.format(i[9:12]), "Slope", "(!RASTERVA_1!-!RASTERVALU!) / !NEAR_DIST!", "PYTHON_9.3", "")
raster=sorted(glob.glob("*_GWL_*.tif"))
list=['buffin{0}.shp'.format(buffer_dist),'bufout{0}.shp'.format(buffer_dist)]
list_bound = ['bndin_hmda.shp','bndou_hmda.shp']
pts_value(raster,list)
# estimae the average slope
print('Estimating slope in each point of the boundary area...')
filesav = []
def avg_sl(raster):
for i in raster:
list=sorted(glob.glob('bnd*{0}_extrpts.dbf'.format(i[9:12])))
print(list)
tabin=dbf.Table('{0}'.format(list[0]))
tabin.open()
tabout=dbf.Table('{0}'.format(list[1]))
tabout.open()
tabbou=dbf.Table('bound{1}_{0}_extrpts{2}_{3}.dbf'.format(i[9:12],buffer_dist,num_pts,i[2:4]))
tabbou.open(mode=dbf.READ_WRITE)
for l,j,k in zip(tabin,tabout,range(0,len(tabbou))):
mas=l[-1]
sla=j[-1]
res=((mas+sla)/2)
with tabbou[k] as record:
record.slope=res
tabin.close()
tabout.close()
tabbou.close()
print(tabbou)
f = 'bound{1}_{0}_extrpts{2}_{3}'.format(i[9:12],buffer_dist,num_pts,i[2:4])
filesav.append(f)
raster=sorted(glob.glob("*_GWL_*.tif"))
avg_sl(raster)
print(' Saving the output file')
with open('output.csv', 'wb') as output:
csvwriter = csv.writer(output,dialect='excel')
for row in filesav:
csvwriter.writerow([row])
output.close()
#end of the script
|
py | 1a2edef61dc501658997a39df402d79fc3b3143b | import leveldb
db = leveldb.LevelDB('./db')
# single put
db.Put(b'hello', b'hello world')
print(db.Get(b'hello').decode('utf-8'))
# multiple put/delete applied atomically, and committed to disk
batch = leveldb.WriteBatch()
batch.Put(b'hello', b'world')
batch.Put(b'hello again', b'world')
batch.Delete(b'hello')
db.Write(batch, sync = True)
|
py | 1a2edfe6d1928a9d30ef2f2af1ae52daeae8cdd7 | import unittest
import io
import os.path
import tempfile
import littlecheck
class LittlecheckTest(unittest.TestCase):
@classmethod
def setUpClass(self):
""" Switch to test/files directory. """
test_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.join(test_dir, "files"))
def do_1_path_test(self, name, skip=False):
""" Run a single test. The name is the test name.
The input file is the name with .py extension, the expected
output of littlecheck is the name with .expected extension.
"""
test_path = name + ".py" if not "." in name else name
expected_output_path = name + ".expected"
subs = {"%": "%", "s": test_path}
conf = littlecheck.Config()
failures = []
success = littlecheck.check_path(test_path, subs, conf, failures.append)
failures_message = "\n".join([f.message() for f in failures]).strip()
with io.open(expected_output_path, "r", encoding="utf-8") as fd:
expect_text = fd.read().strip()
expect_success = not expect_text
self.assertEqual(failures_message, expect_text)
if skip:
self.assertEqual(success, littlecheck.SKIP)
else:
self.assertEqual(success, expect_success)
def test_py_ok(self):
self.do_1_path_test("python_ok")
def test_py_err1(self):
self.do_1_path_test("python_err1")
def test_py_middle_error(self):
self.do_1_path_test("python_middle_error")
def test_py_missing_output(self):
self.do_1_path_test("python_missing_output")
def test_py_multiple_errour_output(self):
self.do_1_path_test("python_multipe_error_annotation_lines")
def test_py_extra_output(self):
self.do_1_path_test("python_extra_output")
def test_py_out_vs_err(self):
self.do_1_path_test("python_out_vs_err")
def test_py_path(self):
self.do_1_path_test("python_path_cmd")
def test_py_shebang(self):
self.do_1_path_test("python_shebang")
def test_py_color(self):
self.do_1_path_test("python_color")
def test_inline_check(self):
self.do_1_path_test("inline-check")
def test_py_whitespace(self):
self.do_1_path_test("python_whitespace")
def test_py_replace(self):
self.do_1_path_test("python_doublereplace")
def test_skip(self):
self.do_1_path_test("skip", skip=True)
def test_require_succeeds(self):
self.do_1_path_test("no_skip", skip=False)
def test_require_succeeds(self):
self.do_1_path_test("no_skip", skip=False)
def test_exe_found(self):
self.do_1_path_test("exe_found")
def test_exe_not_found(self):
try:
self.do_1_path_test("exe_not_found")
except littlecheck.CheckerError:
return True
raise Error
|
py | 1a2ee250cabe087344af5c065384497ec20f9b02 | from __future__ import absolute_import, unicode_literals
from datetime import date
from django.db import models
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey
from taggit.models import TaggedItemBase
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel)
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Orderable, Page
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
# ABSTRACT MODELS
# =============================
class AbstractLinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
api_fields = ('link', )
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
class AbstractRelatedLink(AbstractLinkFields):
title = models.CharField(max_length=255, help_text="Link title")
api_fields = ('title', ) + AbstractLinkFields.api_fields
panels = [
FieldPanel('title'),
MultiFieldPanel(AbstractLinkFields.panels, "Link"),
]
class Meta:
abstract = True
class AbstractCarouselItem(AbstractLinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
api_fields = (
'image',
'embed_url',
'caption',
) + AbstractLinkFields.api_fields
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(AbstractLinkFields.panels, "Link"),
]
class Meta:
abstract = True
class ContactFieldsMixin(models.Model):
telephone = models.CharField(max_length=20, blank=True)
email = models.EmailField(blank=True)
address_1 = models.CharField(max_length=255, blank=True)
address_2 = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255, blank=True)
post_code = models.CharField(max_length=10, blank=True)
api_fields = (
'telephone',
'email',
'address_1',
'address_2',
'city',
'country',
'post_code',
)
panels = [
FieldPanel('telephone'),
FieldPanel('email'),
FieldPanel('address_1'),
FieldPanel('address_2'),
FieldPanel('city'),
FieldPanel('country'),
FieldPanel('post_code'),
]
class Meta:
abstract = True
# PAGE MODELS
# =============================
# Home page
class HomePage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
body = RichTextField(blank=True)
api_fields = (
'body',
'carousel_items',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('body'),
]
class Meta:
verbose_name = "homepage"
class HomePageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('HomePage', related_name='carousel_items', on_delete=models.CASCADE)
class HomePageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('HomePage', related_name='related_links', on_delete=models.CASCADE)
HomePage.content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
InlinePanel('carousel_items', label="Carousel items"),
InlinePanel('related_links', label="Related links"),
]
# Standard pages
class StandardPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
body = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'intro',
'body',
'feed_image',
'carousel_items',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
index.SearchField('body'),
]
class StandardPageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('StandardPage', related_name='carousel_items', on_delete=models.CASCADE)
class StandardPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('StandardPage', related_name='related_links', on_delete=models.CASCADE)
StandardPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('related_links', label="Related links"),
]
StandardPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class StandardIndexPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'intro',
'feed_image',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
class StandardIndexPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('StandardIndexPage', related_name='related_links', on_delete=models.CASCADE)
StandardIndexPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
StandardIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Blog pages
class BlogEntryPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
body = RichTextField()
tags = ClusterTaggableManager(through='BlogEntryPageTag', blank=True)
date = models.DateField("Post date")
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'body',
'tags',
'date',
'feed_image',
'carousel_items',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('body'),
]
def get_blog_index(self):
# Find closest ancestor which is a blog index
return BlogIndexPage.ancestor_of(self).last()
class BlogEntryPageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('BlogEntryPage', related_name='carousel_items', on_delete=models.CASCADE)
class BlogEntryPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('BlogEntryPage', related_name='related_links', on_delete=models.CASCADE)
class BlogEntryPageTag(TaggedItemBase):
content_object = ParentalKey('BlogEntryPage', related_name='tagged_items', on_delete=models.CASCADE)
BlogEntryPage.content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('body', classname="full"),
InlinePanel('carousel_items', label="Carousel items"),
InlinePanel('related_links', label="Related links"),
]
BlogEntryPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
FieldPanel('tags'),
]
class BlogIndexPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
api_fields = (
'intro',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
def get_blog_entries(self):
# Get list of live blog pages that are descendants of this page
entries = BlogEntryPage.objects.descendant_of(self).live()
# Order by most recent date first
entries = entries.order_by('-date')
return entries
def get_context(self, request):
# Get blog entries
entries = self.get_blog_entries()
# Filter by tag
tag = request.GET.get('tag')
if tag:
entries = entries.filter(tags__name=tag)
paginator, entries = paginate(request, entries, page_key='page', per_page=10)
# Update template context
context = super(BlogIndexPage, self).get_context(request)
context['entries'] = entries
return context
class BlogIndexPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('BlogIndexPage', related_name='related_links', on_delete=models.CASCADE)
BlogIndexPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
# Events pages
class EventPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
date_from = models.DateField("Start date")
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'date_from',
'date_to',
'time_from',
'time_to',
'audience',
'location',
'body',
'cost',
'signup_link',
'feed_image',
'carousel_items',
'related_links',
'speakers',
)
search_fields = Page.search_fields + [
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
]
def get_event_index(self):
# Find closest ancestor which is an event index
return EventIndexPage.objects.ancester_of(self).last()
class EventPageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('EventPage', related_name='carousel_items', on_delete=models.CASCADE)
class EventPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('EventPage', related_name='related_links', on_delete=models.CASCADE)
class EventPageSpeaker(Orderable, AbstractLinkFields):
page = ParentalKey('EventPage', related_name='speakers', on_delete=models.CASCADE)
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'first_name',
'last_name',
'image',
)
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(AbstractLinkFields.panels, "Link"),
]
EventPage.content_panels = Page.content_panels + [
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers"),
InlinePanel('related_links', label="Related links"),
]
EventPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class EventIndexPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
api_fields = (
'intro',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
def get_events(self):
# Get list of live event pages that are descendants of this page
events = EventPage.objects.descendant_of(self).live()
# Filter events list to get ones that are either
# running now or start in the future
events = events.filter(date_from__gte=date.today())
# Order by date
events = events.order_by('date_from')
return events
class EventIndexPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('EventIndexPage', related_name='related_links', on_delete=models.CASCADE)
EventIndexPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
# Person page
class PersonPage(Page, ContactFieldsMixin):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
intro = RichTextField(blank=True)
biography = RichTextField(blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'first_name',
'last_name',
'intro',
'biography',
'image',
'feed_image',
'related_links',
) + ContactFieldsMixin.api_fields
search_fields = Page.search_fields + [
index.SearchField('first_name'),
index.SearchField('last_name'),
index.SearchField('intro'),
index.SearchField('biography'),
]
class PersonPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('PersonPage', related_name='related_links', on_delete=models.CASCADE)
PersonPage.content_panels = Page.content_panels + [
FieldPanel('first_name'),
FieldPanel('last_name'),
FieldPanel('intro', classname="full"),
FieldPanel('biography', classname="full"),
ImageChooserPanel('image'),
MultiFieldPanel(ContactFieldsMixin.panels, "Contact"),
InlinePanel('related_links', label="Related links"),
]
PersonPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Contact page
class ContactPage(Page, ContactFieldsMixin):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
body = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'body',
'feed_image',
) + ContactFieldsMixin.api_fields
search_fields = Page.search_fields + [
index.SearchField('body'),
]
ContactPage.content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
MultiFieldPanel(ContactFieldsMixin.panels, "Contact"),
]
ContactPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
|
py | 1a2ee26475bd8974ad69e81f1b56eb9dc019880d | """Collection of tests for unified linear algebra functions."""
# global
import numpy as np
from hypothesis import given, strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# vector_to_skew_symmetric_matrix
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
)
def test_vector_to_skew_symmetric_matrix(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"vector_to_skew_symmetric_matrix",
vector=np.random.uniform(size=(a, 3)).astype(input_dtype[0]),
)
# matrix_power
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
n=st.integers(-10, 10),
)
def test_matrix_power(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
n,
):
if fw == "torch" and input_dtype == "float16":
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"matrix_power",
n=n,
x=np.random.uniform(size=(a, a)).astype(input_dtype[0]),
)
# matmul
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
seed=st.integers(0, 2**16 - 1),
)
def test_matmul(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
seed,
):
np.random.seed(seed)
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"matmul",
rtol=5e-02,
atol=5e-02,
x1=np.random.uniform(size=(a, b)).astype(input_dtype[0]),
x2=np.random.uniform(size=(b, c)).astype(input_dtype[1]),
)
# det
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_det(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"det",
x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),
)
# eigh
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_eigh(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"eigh",
x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),
)
# eigvalsh
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_eigvalsh(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"eigvalsh",
x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),
)
# inv
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_inv(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"inv",
x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),
)
# matrix_transpose
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_matrix_transpose(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"matrix_transpose",
x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),
)
# outer
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_outer(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"outer",
x1=np.random.uniform(size=a).astype(input_dtype[0]),
x2=np.random.uniform(size=b).astype(input_dtype[1]),
)
# slogdet
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
)
def test_slogdet(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"slogdet",
x=np.random.uniform(size=(a, a)).astype(input_dtype[0]),
)
# solve
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
a=st.integers(1, 50),
)
def test_solve(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"solve",
x1=np.random.uniform(size=(a, a)).astype(input_dtype[0]),
x2=np.random.uniform(size=(a, 1)).astype(input_dtype[1]),
)
# svdvals
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
)
def test_svdvals(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"svdvals",
x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),
)
# tensordot
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
a=st.integers(1, 50) | st.tuples(st.lists(st.integers()), st.lists(st.integers())),
b=st.integers(1, 50),
c=st.integers(1, 50),
d=st.integers(1, 50),
)
def test_tensordot(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
d,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"tensordot",
axes=a,
x1=np.random.uniform(size=(b, c)).astype(input_dtype[0]),
x2=np.random.uniform(size=(c, d)).astype(input_dtype[1]),
)
# trace
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
offset=st.integers(-10, 10),
)
def test_trace(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
offset,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"trace",
offset=offset,
x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),
)
# vecdot
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
a=st.integers(-1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
)
def test_vecdot(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"vecdot",
axes=a,
x1=np.random.uniform(size=(b, c)).astype(input_dtype[0]),
x2=np.random.uniform(size=(b, b)).astype(input_dtype[1]),
)
# vector_norm
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
axis=st.integers(-10, 10) | st.tuples(st.lists(st.integers())),
kd=st.booleans(),
ord=st.integers() | st.floats(),
)
def test_vector_norm(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
axis,
kd,
ord,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"vector_norm",
axis=axis,
keepdims=kd,
ord=ord,
x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),
)
# pinv
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
seed=st.integers(0, 2**4 - 1),
)
def test_pinv(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
seed,
):
if "float16" in input_dtype:
return
np.random.seed(seed)
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"pinv",
rtol=5e-02,
x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),
)
# qr
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
mode=st.sampled_from(("reduced", "complete")),
)
def test_qr(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
mode,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"qr",
mode=mode,
x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),
)
# svd
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
fm=st.booleans(),
)
def test_svd(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
fm,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"svd",
full_matrices=fm,
x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),
)
# matrix_norm
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
kd=st.booleans(),
ord=st.integers(1, 10)
| st.floats(1, 10)
| st.sampled_from(("fro", "nuc", "float('inf')", "-float('inf')")),
)
def test_matrix_norm(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
kd,
ord,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"matrix_norm",
keepdims=kd,
ord=ord,
x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),
)
# matrix_rank
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
c=st.integers(1, 50),
)
def test_matrix_rank(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
c,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"matrix_rank",
rtol=5e-02,
x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),
)
# cholesky
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
upper=st.booleans(),
)
def test_cholesky(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
upper,
):
if "float16" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"cholesky",
upper=upper,
x=np.random.uniform(size=(a, a)).astype(input_dtype[0]),
)
# cross
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
axis=st.integers(-1, 50),
)
def test_cross(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
axis,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"cross",
axis=axis,
x1=np.random.uniform(size=(a, b)).astype(input_dtype[0]),
x2=np.random.uniform(size=(a, b)).astype(input_dtype[1]),
)
# diagonal
@given(
input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=st.integers(0, 1),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
a=st.integers(1, 50),
b=st.integers(1, 50),
offset=st.integers(-10, 50),
axes=st.lists(st.integers(-2, 50), min_size=2, max_size=2, unique=True),
)
def test_diagonal(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
a,
b,
offset,
axes,
):
if "float16" or "int8" in input_dtype:
return
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"diagonal",
offset=offset,
axis1=axes[0],
axis2=axes[1],
x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),
)
|
py | 1a2ee2cbf78b119984f8f1cc2ecb08d56c921a85 | # Copyright 2014 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
"""A high-level abstract interface to KATCP clients, sensors and requests."""
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases() # noqa: E402
import abc
import collections
import logging
import sys
from builtins import object
import tornado
from future.utils import with_metaclass, PY2
from past.builtins import basestring
from tornado.concurrent import Future
from tornado.gen import Return, with_timeout
from katcp import Message, Sensor
from katcp.core import hashable_identity
from katcp.compat import ensure_native_str
logger = logging.getLogger(__name__)
class KATCPResourceError(Exception):
"""Error raised for resource-related errors"""
class KATCPResourceInactive(KATCPResourceError):
"""Raised when a request is made to an inactive resource"""
class KATCPSensorError(KATCPResourceError):
"""Raised if a problem occurred dealing with as KATCPSensor operation"""
class SensorResultTuple(collections.namedtuple(
'SensorResultTuple',
'object name python_identifier description type units reading')):
"""Per-sensor result of list_sensors() method
Attributes
----------
object : KATCPSensor instance
name : str
KATCP (i.e. unescaped) name of the sensor
python_identifier : str
Python-identifier name of the sensor.
description : str
KATCP description of the sensor
type : str
KATCP type of the sensor
units : str
KATCP units of the sensor
reading : KATCPSensorReading instance
Most recently received sensor reading
"""
__slots__ = [] # Prevent dynamic attributes from being possible
def normalize_strategy_parameters(params):
"""Normalize strategy parameters to be a list of strings.
Parameters
----------
params : (space-delimited) string or sequence of strings/numbers Parameters
expected by :class:`SampleStrategy` object, in various forms, where the first
parameter is the name of the strategy.
Returns
-------
params : tuple of strings
Strategy parameters as a list of strings
"""
def fixup_numbers(val):
try:
# See if it is a number
return str(float(val))
except ValueError:
# ok, it is not a number we know of, perhaps a string
return str(val)
if isinstance(params, basestring):
params = params.split(' ')
# No number
return tuple(fixup_numbers(p) for p in params)
def escape_name(name):
"""Escape sensor and request names to be valid Python identifiers."""
return name.replace('.', '_').replace('-', '_')
class KATCPResource(with_metaclass(abc.ABCMeta, object)):
"""Base class to serve as the definition of the KATCPResource API.
A class `C` implementing the KATCPResource API should register itself using
KATCPResource.register(C) or subclass KATCPResource directly. A complication
involved with subclassing is that all the abstract properties must be
implemented as properties; normal instance attributes cannot be used.
Attributes
----------
Apart from the abstract properties described below
TODO Describe how hierarchies are implemented. Also all other descriptions
here so that the sphinx doc can be auto-generated from here.
"""
def __init__(self):
self._active = True
@abc.abstractproperty
def name(self):
"""Name of this KATCP resource."""
@abc.abstractproperty
def description(self):
"""Description of this KATCP resource."""
@abc.abstractproperty
def address(self):
"""Address of the underlying client/device.
Type: tuple(host, port) or None, with host a string and port an integer.
If this KATCPResource is not associated with a specific KATCP device
(e.g. it is only a top-level container for a hierarchy of KATCP
resources), the address should be None.
"""
@abc.abstractproperty
def is_connected(self):
"""Indicate whether the underlying client/device is connected or not."""
@abc.abstractproperty
def req(self):
"""Attribute root/container for all KATCP request wrappers.
Each KATCP request that is exposed on a KATCP device should have a
corresponding :class:`KATCPRequest` object so that calling
`resource.req.request_name(arg1, arg2, ...)`
sends a '?request-name arg1 arg2 ...' message to the KATCP device and
waits for the associated inform-reply and reply messages.
For a :class:`KATCPResource` object that exposes a hierarchical device
it can choose to include lower-level request handlers here such that
`resource.req.dev_request()` maps to `resource.dev.req.request()`.
"""
@abc.abstractproperty
def sensor(self):
"""Attribute root/container for all KATCP sensor wrappers.
Each KATCP sensor that is exposed on a KATCP device should have a
corresponding :class:`KATCPSensor` object so that
`resource.sensor.sensor_name`
corresponds to a sensor named e.g. 'sensor-name', where the object or
attribute name is an escaped/Pythonised version of the original sensor
name (see :func:`escape_name` for the escape mechanism). Hopefully the
device is not crazy enough to have multiple sensors that map to the
same Python identifier.
A :class:`KATCPResource` object that exposes a hierarchical device can
choose to include lower-level sensors here such that
`resource.sensor.dev_sensorname` maps to
`resource.dev.sensor.sensorname`.
"""
@abc.abstractproperty
def parent(self):
"""Parent KATCPResource object of this subordinate resource, or None."""
@abc.abstractproperty
def children(self):
"""AttrDict of subordinate KATCPResource objects keyed by their names."""
@tornado.gen.coroutine
def wait(self, sensor_name, condition_or_value, timeout=5):
"""Wait for a sensor in this resource to satisfy a condition.
Parameters
----------
sensor_name : string
The name of the sensor to check
condition_or_value : obj or callable, or seq of objs or callables
If obj, sensor.value is compared with obj. If callable,
condition_or_value(reading) is called, and must return True if its
condition is satisfied. Since the reading is passed in, the value,
status, timestamp or received_timestamp attributes can all be used
in the check.
timeout : float or None
The timeout in seconds (None means wait forever)
Returns
-------
This command returns a tornado Future that resolves with True when the
sensor value satisfies the condition, or False if the condition is
still not satisfied after a given timeout period.
Raises
------
:class:`KATCPSensorError`
If the sensor does not have a strategy set, or if the named sensor
is not present
"""
sensor_name = escape_name(sensor_name)
sensor = self.sensor[sensor_name]
try:
yield sensor.wait(condition_or_value, timeout)
except tornado.gen.TimeoutError:
raise tornado.gen.Return(False)
else:
raise tornado.gen.Return(True)
@abc.abstractmethod
def list_sensors(self, filter="", strategy=False, status="",
use_python_identifiers=True, tuple=False, refresh=False):
"""List sensors available on this resource matching certain criteria.
Parameters
----------
filter : string, optional
Filter each returned sensor's name against this regexp if specified.
To ease the dichotomy between Python identifier names and actual
sensor names, the default is to search on Python identifier names
rather than KATCP sensor names, unless `use_python_identifiers`
below is set to False. Note that the sensors of subordinate
KATCPResource instances may have inconsistent names and Python
identifiers, better to always search on Python identifiers in this
case.
strategy : {False, True}, optional
Only list sensors with a set strategy if True
status : string, optional
Filter each returned sensor's status against this regexp if given
use_python_identifiers : {True, False}, optional
Match on python identifiers even the the KATCP name is available.
tuple : {True, False}, optional, Default: False
Return backwards compatible tuple instead of SensorResultTuples
refresh : {True, False}, optional, Default: False
If set the sensor values will be refreshed with get_value before
returning the results.
Returns
-------
sensors : list of SensorResultTuples, or list of tuples
List of matching sensors presented as named tuples. The `object`
field is the :class:`KATCPSensor` object associated with the sensor.
Note that the name of the object may not match `name` if it
originates from a subordinate device.
"""
@tornado.gen.coroutine
def set_sampling_strategies(self, filter, strategy_and_params):
"""Set a sampling strategy for all sensors that match the specified filter.
Parameters
----------
filter : string
The regular expression filter to use to select the sensors to which
to apply the specified strategy. Use "" to match all sensors. Is
matched using :meth:`list_sensors`.
strategy_and_params : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy
names and parameters are as defined by the KATCP spec. As str contains the
same elements in space-separated form.
**list_sensor_args : keyword arguments
Passed to the :meth:`list_sensors` call as kwargs
Returns
-------
sensors_strategies : tornado Future
resolves with a dict with the Python identifier names of the sensors
as keys and the value a tuple:
(success, info) with
success : bool
True if setting succeeded for this sensor, else False
info : tuple
normalised sensor strategy and parameters as tuple if success == True
else, sys.exc_info() tuple for the error that occurred.
"""
sensors_strategies = {}
sensor_results = yield self.list_sensors(filter)
for sensor_reslt in sensor_results:
norm_name = sensor_reslt.object.normalised_name
try:
sensor_strat = yield self.set_sampling_strategy(norm_name, strategy_and_params)
sensors_strategies[norm_name] = sensor_strat[norm_name]
except Exception:
sensors_strategies[norm_name] = (
False, sys.exc_info())
raise tornado.gen.Return(sensors_strategies)
@tornado.gen.coroutine
def set_sampling_strategy(self, sensor_name, strategy_and_params):
"""Set a sampling strategy for a specific sensor.
Parameters
----------
sensor_name : string
The specific sensor.
strategy_and_params : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy
names and parameters are as defined by the KATCP spec. As str contains the
same elements in space-separated form.
Returns
-------
sensors_strategies : tornado Future
resolves with a dict with the Python identifier names of the sensors
as keys and the value a tuple:
(success, info) with
success : bool
True if setting succeeded for this sensor, else False
info : tuple
normalised sensor strategy and parameters as tuple if success == True
else, sys.exc_info() tuple for the error that occurred.
"""
sensors_strategies = {}
try:
sensor_obj = self.sensor.get(sensor_name)
yield sensor_obj.set_sampling_strategy(strategy_and_params)
sensors_strategies[sensor_obj.normalised_name] = (
True, sensor_obj.sampling_strategy)
except Exception:
sensors_strategies[sensor_obj.normalised_name] = (
False, sys.exc_info())
raise tornado.gen.Return(sensors_strategies)
def set_active(self, active):
self._active = bool(active)
for child in dict.values(self.children):
child.set_active(active)
def is_active(self):
return self._active
class KATCPRequest(with_metaclass(abc.ABCMeta, object)):
"""Abstract Base class to serve as the definition of the KATCPRequest API.
Wrapper around a specific KATCP request to a given KATCP device. Each
available KATCP request for a particular device has an associated
:class:`KATCPRequest` object in the object hierarchy. This wrapper is mainly
for interactive convenience. It provides the KATCP request help string as a
docstring and pretty-prints the result of the request.
"""
def __init__(self, request_description, is_active=lambda: True):
"""Initialize request with given description and network client
Parameters
----------
request_description : dict
name : str
KATCP name of the request
description : str
KATCP request description (as returned by ?help <name>)
timeout_hint : float or None
Request timeout suggested by device or None if not provided
is_active : callable, optional
Returns True if this request is active, else False
"""
for required_description_key in ('name', 'description', 'timeout_hint'):
if required_description_key not in request_description:
raise ValueError(
'Required request_description key {!r} not present'
.format(required_description_key))
self._request_description = dict(request_description)
self.__doc__ = '\n'.join(('KATCP Documentation',
'===================',
self.description,
'KATCPRequest Documentation',
'==========================',
self.__doc__ or ''))
self._is_active = is_active
@property
def name(self):
"""Name of the KATCP request."""
return self._request_description['name']
@property
def description(self):
"""Description of KATCP request as obtained from the ?help request."""
return self._request_description['description']
@property
def timeout_hint(self):
"""Request timeout suggested by device or None if not provided"""
return self._request_description['timeout_hint']
def __call__(self, *args, **kwargs):
"""Execute the KATCP request described by this object.
All positional arguments of this function are converted to KATCP string
representations and passed on as space-separated parameters to the KATCP
device.
Keyword Arguments
-----------------
timeout : None or float, optional
Timeout in seconds for the request. If None, use request timeout
hint received from server or default for the :class:`KATCPResource`
instance that contains the request if no hint is available.
mid : None or int, optional
Message identifier to use for the request message. If None, use
either auto-incrementing value or no mid depending on the KATCP
protocol version (mid's were only introduced with KATCP v5) and the
default of the containing :class:`KATCPResource` instance.
Returns
-------
reply : tornado future resolving with :class:`KATCPReply` object
KATCP request reply wrapped in KATCPReply object
Raises
------
:class:`ResourceInactive` if the resource is inactive when the request is made.
"""
if self.is_active():
return self.issue_request(*args, **kwargs)
else:
raise KATCPResourceInactive(
"Can't make ?{} request; resource is inactive".format(self.name))
@abc.abstractmethod
def issue_request(self, *args, **kwargs):
"""Signature as for __call__
Do the request immediately without checking active state.
"""
def is_active(self):
"""True if resource for this request is active"""
return self._is_active()
class KATCPDummyRequest(KATCPRequest):
"""Dummy counterpart to KATCPRequest that always returns a successful reply"""
def issue_request(self, *args, **kwargs):
reply_msg = Message.reply('fake', 'ok')
reply = KATCPReply(reply_msg, [])
fut = Future()
fut.set_result(reply)
return fut
class KATCPSensorReading(collections.namedtuple(
'KATCPSensorReading', 'received_timestamp timestamp istatus value')):
"""Sensor reading as a (received_timestamp, timestamp, istatus, value) tuple.
Attributes
----------
received_timestamp : float
Time (in seconds since UTC epoch) at which the sensor value was received.
timestamp : float
Time (in seconds since UTC epoch) at which the sensor value was determined.
istatus : int Sensor status constant
Whether the value represents an error condition or not, as in class:`katcp.Sensor`
The status is stored as an int, but output as a string, eg 'nominal'.
value : object
The value of the sensor (the type will be appropriate to the
sensor's type).
"""
__slots__ = [] # Prevent dynamic attributes
@property
def status(self):
" Returns the string representation of sensor status, eg 'nominal'"
try:
return Sensor.STATUSES[int(self.istatus)]
except TypeError:
return 'unknown'
class KATCPSensorsManager(with_metaclass(abc.ABCMeta, object)):
"""Sensor management class used by KATCPSensor. Abstracts communications details.
This class should arrange:
1. A mechanism for setting sensor strategies
2. A mechanism for polling a sensor value
3. Keeping track of- and reapplying sensor strategies after reconnect, etc.
4. Providing local time. This is doing to avoid direct calls to time.time, allowing
accelerated time testing / simulation / dry-running
"""
@abc.abstractmethod
def time(self):
"""Returns the current time (in seconds since UTC epoch)"""
@abc.abstractmethod
def get_sampling_strategy(self, sensor_name):
"""Get the current sampling strategy for the named sensor
Parameters
----------
sensor_name : str
Name of the sensor (normal or escaped form)
Returns
-------
strategy : tornado Future that resolves with tuple of str
contains (<strat_name>, [<strat_parm1>, ...]) where the strategy names and
parameters are as defined by the KATCP spec
"""
@abc.abstractmethod
def set_sampling_strategy(self, sensor_name, strategy_and_parms):
"""Set the sampling strategy for the named sensor
Parameters
----------
sensor_name : str
Name of the sensor
strategy : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy
names and parameters are as defined by the KATCP spec. As str contains the
same elements in space-separated form.
Returns
-------
done : tornado Future that resolves when done or raises KATCPSensorError
Notes
-----
It is recommended that implementations use :func:`normalize_strategy_parameters`
to process the strategy_and_parms parameter, since it will deal with both string
and list versions and makes sure that numbers are represented as strings in a
consistent format.
This method should arrange for the strategy to be set on the underlying network
device or whatever other implementation is used. This strategy should also be
automatically re-set if the device is reconnected, etc. If a strategy is set for a
non-existing sensor, it should still cache the strategy and ensure that is applied
whenever said sensor comes into existence. This allows an applications to pre-set
strategies for sensors before synced / connected to a device.
"""
@abc.abstractmethod
def drop_sampling_strategy(self, sensor_name):
"""Drop the sampling strategy for the named sensor from the cache
Calling :meth:`set_sampling_strategy` requires the sensor manager to
memorise the requested strategy so that it can automatically be reapplied.
If the client is no longer interested in the sensor, or knows the sensor
may be removed from the server, then it can use this method to ensure the
manager forgets about the strategy. This method will not change the current
strategy. No error is raised if there is no strategy to drop.
Parameters
----------
sensor_name : str
Name of the sensor (normal or escaped form)
"""
@abc.abstractmethod
def poll_sensor(self, sensor_name):
"""Poll sensor and arrange for sensor object to be updated
Returns
-------
done_future : tornado Future
Resolves when the poll is complete, or raises KATCPSensorError
"""
# TODO NM 2015-02-03 Might want to add a timeout parameter here, and to all the
# other code that calls this
@abc.abstractmethod
def reapply_sampling_strategies(self):
"""Reapply all sensor strategies using cached values
Would typically be called when a connection is re-established. Should
not raise errors when resetting strategies for sensors that no longer
exist on the KATCP resource.
"""
class KATCPSensor(with_metaclass(abc.ABCMeta, object)):
"""Wrapper around a specific KATCP sensor on a given KATCP device.
Each available KATCP sensor for a particular device has an associated
:class:`KATCPSensor` object in the object hierarchy. This wrapper is mainly
for interactive convenience. It provides the KATCP request help string as a
docstring and registers listeners. Subclasses need to call the base class
version of __init__().
"""
def __init__(self, sensor_description, sensor_manager):
"""Subclasses must arrange to call this in their __init__().
Parameters
----------
sensor_description : dict
Description of the KATCP sensor, with keys same as the parameters of
:class:`katcp.Sensor`
sensor_manager : :class:`KATCPSensorsManager` instance
Manages sensor strategies, allows sensor polling, and provides time
"""
self._manager = sensor_manager
self.clear_listeners()
self._reading = KATCPSensorReading(0, 0, Sensor.UNKNOWN, None)
# We'll be abusing a katcp.Sensor object slightly to make use of its
# parsing and formatting functionality
self._sensor = Sensor(**sensor_description)
self._name = self._sensor.name
# Overide the katpc.Sensor's set method with ours
self._sensor.set = self.set
# Steal the the katcp.Sensor's set_formatted method. Since we overrode
# its set() method with ours, calling set_formatted will result in this
# KATCPSensor object's value being set.
self.set_formatted = self._sensor.set_formatted
@property
def parent_name(self):
"""Name of the parent of this KATCPSensor"""
return self._manager.resource_name
@property
def name(self):
"""Name of this KATCPSensor"""
return self._name
@property
def normalised_name(self):
"""Normalised name of this KATCPSensor that can be used as a python identifier"""
return escape_name(self._name)
@property
def reading(self):
"""Most recently received sensor reading as KATCPSensorReading instance"""
return self._reading
@property
def value(self):
return self._reading.value
@property
def status(self):
return self._reading.status
@property
def sampling_strategy(self):
"""Current sampling strategy"""
return self._manager.get_sampling_strategy(self.name)
@property
def description(self):
return self._sensor.description
@property
def units(self):
return self._sensor.units
@property
def type(self):
return self._sensor.type
def parse_value(self, s_value):
"""Parse a value from a string.
Parameters
----------
s_value : str
A string value to attempt to convert to a value for
the sensor.
Returns
-------
value : object
A value of a type appropriate to the sensor.
"""
return self._sensor.parse_value(s_value)
def set_strategy(self, strategy, params=None):
"""Set current sampling strategy for sensor.
Add this footprint for backwards compatibility.
Parameters
----------
strategy : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy
names and parameters are as defined by the KATCP spec. As str contains the
same elements in space-separated form.
params : seq of str or str
(<strat_name>, [<strat_parm1>, ...])
Returns
-------
done : tornado Future that resolves when done or raises KATCPSensorError
"""
if not params:
param_args = []
elif isinstance(params, basestring):
param_args = [str(p) for p in params.split(' ')]
else:
if not isinstance(params, collections.Iterable):
params = (params,)
param_args = [str(p) for p in params]
samp_strategy = " ".join([strategy] + param_args)
return self._manager.set_sampling_strategy(self.name, samp_strategy)
def set_sampling_strategy(self, strategy):
"""Set current sampling strategy for sensor
Parameters
----------
strategy : seq of str or str
As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy
names and parameters are as defined by the KATCP spec. As str contains the
same elements in space-separated form.
Returns
-------
done : tornado Future that resolves when done or raises KATCPSensorError
"""
return self._manager.set_sampling_strategy(self.name, strategy)
def drop_sampling_strategy(self):
"""Drop memorised sampling strategy for sensor, if any
Calling this method ensures that the sensor manager does not attempt
to reapply a sampling strategy. It will not raise an error if no strategy
has been set. Use :meth:`set_sampling_strategy` to memorise a strategy again.
"""
self._manager.drop_sampling_strategy(self.name)
def register_listener(self, listener, reading=False):
"""Add a callback function that is called when sensor value is updated.
The callback footprint is received_timestamp, timestamp, status, value.
Parameters
----------
listener : function
Callback signature: if reading listener(katcp_sensor, reading) where
`katcp_sensor` is this KATCPSensor instance `reading` is an instance of
:class:`KATCPSensorReading`.
Callback signature: default, if not reading listener(received_timestamp,
timestamp, status, value)
"""
listener_id = hashable_identity(listener)
self._listeners[listener_id] = (listener, reading)
logger.debug(
'Register listener for {}'
.format(self.name))
def unregister_listener(self, listener):
"""Remove a listener callback added with register_listener().
Parameters
----------
listener : function
Reference to the callback function that should be removed
"""
listener_id = hashable_identity(listener)
self._listeners.pop(listener_id, None)
def is_listener(self, listener):
listener_id = hashable_identity(listener)
return listener_id in self._listeners
def clear_listeners(self):
"""Clear any registered listeners to updates from this sensor."""
self._listeners = {}
def call_listeners(self, reading):
logger.debug(
'Calling listeners {}'
.format(self.name))
for listener, use_reading in list(self._listeners.values()):
try:
if use_reading:
listener(self, reading)
else:
listener(reading.received_timestamp, reading.timestamp,
reading.status, reading.value)
except Exception:
logger.exception(
'Unhandled exception calling KATCPSensor callback {0!r}'
.format(listener))
def set(self, timestamp, status, value):
"""Set sensor with a given received value, matches :meth:`katcp.Sensor.set`"""
received_timestamp = self._manager.time()
reading = KATCPSensorReading(received_timestamp, timestamp, status, value)
self._reading = reading
self.call_listeners(reading)
def set_value(self, value, status=Sensor.NOMINAL, timestamp=None):
"""Set sensor value with optinal specification of status and timestamp"""
if timestamp is None:
timestamp = self._manager.time()
self.set(timestamp, status, value)
def set_formatted(self, raw_timestamp, raw_status, raw_value, major):
"""Set sensor using KATCP string formatted inputs
Mirrors :meth:`katcp.Sensor.set_formatted`.
This implementation is empty. Will, during instantiation, be overridden by the
set_formatted() method of a katcp.Sensor object.
"""
@tornado.gen.coroutine
def get_reading(self):
"""Get a fresh sensor reading from the KATCP resource
Returns
-------
reply : tornado Future resolving with :class:`KATCPSensorReading` object
Notes
-----
As a side-effect this will update the reading stored in this object, and result in
registered listeners being called.
"""
yield self._manager.poll_sensor(self._name)
# By now the sensor manager should have set the reading
raise Return(self._reading)
@tornado.gen.coroutine
def get_value(self):
"""Get a fresh sensor value from the KATCP resource
Returns
-------
reply : tornado Future resolving with :class:`KATCPSensorReading` object
Notes
-----
As a side-effect this will update the reading stored in this object, and result in
registered listeners being called.
"""
yield self._manager.poll_sensor(self._name)
# By now the sensor manager should have set the reading
raise Return(self._reading.value)
@tornado.gen.coroutine
def get_status(self):
"""Get a fresh sensor status from the KATCP resource
Returns
-------
reply : tornado Future resolving with :class:`KATCPSensorReading` object
Notes
-----
As a side-effect this will update the reading stored in this object, and result in
registered listeners being called.
"""
yield self._manager.poll_sensor(self._name)
# By now the sensor manager should have set the reading
raise Return(self._reading.status)
def wait(self, condition_or_value, timeout=None):
"""Wait for the sensor to satisfy a condition.
Parameters
----------
condition_or_value : obj or callable, or seq of objs or callables
If obj, sensor.value is compared with obj. If callable,
condition_or_value(reading) is called, and must return True if its
condition is satisfied. Since the reading is passed in, the value,
status, timestamp or received_timestamp attributes can all be used
in the check.
TODO: Sequences of conditions (use SensorTransitionWaiter thingum?)
timeout : float or None
The timeout in seconds (None means wait forever)
Returns
-------
This command returns a tornado Future that resolves with True when the
sensor value satisfies the condition. It will never resolve with False;
if a timeout is given a TimeoutError happens instead.
Raises
------
:class:`KATCPSensorError`
If the sensor does not have a strategy set
:class:`tornado.gen.TimeoutError`
If the sensor condition still fails after a stated timeout period
"""
if (isinstance(condition_or_value, collections.Sequence) and not
isinstance(condition_or_value, basestring)):
raise NotImplementedError(
'Currently only single conditions are supported')
condition_test = (condition_or_value if callable(condition_or_value)
else lambda s: s.value == condition_or_value)
ioloop = tornado.ioloop.IOLoop.current()
f = Future()
if self.sampling_strategy == ('none', ):
raise KATCPSensorError(
'Cannot wait on a sensor that does not have a strategy set')
def handle_update(sensor, reading):
# This handler is called whenever a sensor update is received
try:
assert sensor is self
if condition_test(reading):
self.unregister_listener(handle_update)
# Try and be idempotent if called multiple times after the
# condition is matched. This should not happen unless the
# sensor object is being updated in a thread outside of the
# ioloop.
if not f.done():
ioloop.add_callback(f.set_result, True)
except Exception:
f.set_exc_info(sys.exc_info())
self.unregister_listener(handle_update)
self.register_listener(handle_update, reading=True)
# Handle case where sensor is already at the desired value
ioloop.add_callback(handle_update, self, self._reading)
if timeout:
to = ioloop.time() + timeout
timeout_f = with_timeout(to, f)
# Make sure we stop listening if the wait times out to prevent a
# buildup of listeners
timeout_f.add_done_callback(
lambda f: self.unregister_listener(handle_update))
return timeout_f
else:
return f
_KATCPReplyTuple = collections.namedtuple('_KATCPReplyTuple', 'reply informs')
class KATCPReply(_KATCPReplyTuple):
"""Container for return messages of KATCP request (reply and informs).
This is based on a named tuple with 'reply' and 'informs' fields so that
the :class:`KATCPReply` object can still be unpacked into a normal tuple.
Parameters
----------
reply : :class:`katcp.Message` object
Reply message returned by katcp request
informs : list of :class:`katcp.Message` objects
List of inform messages returned by KATCP request
Attributes
----------
messages: list of :class:`katcp.Message` objects
List of all messages returned by KATCP request, reply first
reply: :class:`katcp.Message` object
Reply message returned by KATCP request
informs: list of :class:`katcp.Message` objects
List of inform messages returned by KATCP request
The instance evaluates to nonzero (i.e. truthy) if the request succeeded.
"""
__slots__ = [] # Prevent dynamic attributes from being possible
def __repr__(self):
"""String representation for pretty-printing in IPython."""
return "\n".join(
"%s%s %s"
% (
ensure_native_str(Message.TYPE_SYMBOLS[m.mtype]),
m.name,
" ".join([ensure_native_str(arg) for arg in m.arguments]),
)
for m in self.messages
)
def __str__(self):
"""String representation using KATCP wire format"""
return '\n'.join(str(m) for m in self.messages)
def __bool__(self):
"""True if request succeeded (i.e. first reply argument is 'ok')."""
return self.messages[0].reply_ok()
if PY2:
__nonzero__ = __bool__
@property
def messages(self):
"""List of all messages returned by KATCP request, reply first."""
return [self.reply] + self.informs
@property
def succeeded(self):
"""True if request succeeded (i.e. first reply argument is 'ok')."""
return bool(self)
|
py | 1a2ee45e0d3505c7a62e3d84edae917cc0417cd5 | #!/usr/bin/env python
from os import path
import setuptools
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
from metaappscriptsdk import info
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
packages = [
'metaappscriptsdk',
'metaappscriptsdk.logger',
'metaappscriptsdk.services',
'metaappscriptsdk.schedule',
'metaappscriptsdk.feed',
]
install_reqs = parse_requirements('requirements.txt')
reqs = install_reqs
setuptools.setup(
name=info.__package_name__,
version=info.__version__,
description='Meta App Scripts SDK',
long_description=long_description,
url='https://github.com/rw-meta/meta-app-script-py-sdk',
author='Artur Geraschenko',
author_email='[email protected]',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3'
],
install_requires=reqs,
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'metaappscriptsdk': 'metaappscriptsdk'},
include_package_data=True,
)
|
py | 1a2ee540ef6781291803a80a80f59af0ef0156d2 | """
Module description:
"""
__version__ = '0.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = '[email protected], [email protected]'
import numpy as np
from ast import literal_eval as make_tuple
from tqdm import tqdm
from elliot.dataset.samplers import pointwise_pos_neg_sampler as pws
from elliot.recommender.neural.NeuMF.neural_matrix_factorization_model import NeuralMatrixFactorizationModel
from elliot.recommender.recommender_utils_mixin import RecMixin
from elliot.utils.write import store_recommendation
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.base_recommender_model import init_charger
np.random.seed(42)
class NeuMF(RecMixin, BaseRecommenderModel):
r"""
Neural Collaborative Filtering
For further details, please refer to the `paper <https://arxiv.org/abs/1708.05031>`_
Args:
mf_factors: Number of MF latent factors
mlp_factors: Number of MLP latent factors
mlp_hidden_size: List of units for each layer
lr: Learning rate
dropout: Dropout rate
is_mf_train: Whether to train the MF embeddings
is_mlp_train: Whether to train the MLP layers
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
NeuMF:
meta:
save_recs: True
epochs: 10
mf_factors: 10
mlp_factors: 10
mlp_hidden_size: (64,32)
lr: 0.001
dropout: 0.0
is_mf_train: True
is_mlp_train: True
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._sampler = pws.Sampler(self._data.i_train_dict)
self._params_list = [
("_learning_rate", "lr", "lr", 0.001, None, None),
("_mf_factors", "mf_factors", "mffactors", 10, int, None),
("_mlp_factors", "mlp_factors", "mlpfactors", 10, int, None),
("_mlp_hidden_size", "mlp_hidden_size", "mlpunits", "(64,32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_dropout", "dropout", "drop", 0, None, None),
("_is_mf_train", "is_mf_train", "mftrain", True, None, None),
("_is_mlp_train", "is_mlp_train", "mlptrain", True, None, None),
]
self.autoset_params()
if self._batch_size < 1:
self._batch_size = self._data.transactions
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._model = NeuralMatrixFactorizationModel(self._num_users, self._num_items, self._mf_factors,
self._mlp_factors, self._mlp_hidden_size,
self._dropout, self._is_mf_train, self._is_mlp_train,
self._learning_rate)
@property
def name(self):
return "NeuMF"\
+ "_e:" + str(self._epochs) \
+ "_bs:" + str(self._batch_size) \
+ f"_{self.get_params_shortcut()}"
def train(self):
if self._restore:
return self.restore_weights()
best_metric_value = 0
for it in range(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._data.transactions // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._data.transactions, self._batch_size):
steps += 1
loss += self._model.train_step(batch)
t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})
t.update()
if not (it + 1) % self._validation_rate:
recs = self.get_recommendations(self.evaluator.get_needed_recommendations())
result_dict = self.evaluator.eval(recs)
self._results.append(result_dict)
print(f'Epoch {(it + 1)}/{self._epochs} loss {loss/steps:.5f}')
if self._results[-1][self._validation_k]["val_results"][self._validation_metric] > best_metric_value:
print("******************************************")
best_metric_value = self._results[-1][self._validation_k]["val_results"][self._validation_metric]
if self._save_weights:
self._model.save_weights(self._saving_filepath)
if self._save_recs:
store_recommendation(recs, self._config.path_output_rec_result + f"{self.name}-it:{it + 1}.tsv")
def get_recommendations(self, k: int = 100):
predictions_top_k = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(
np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])
)
)
v, i = self._model.get_top_k(predictions, self.get_train_mask(offset, offset_stop), k=k)
items_ratings_pair = [list(zip(map(self._data.private_items.get, u_list[0]), u_list[1]))
for u_list in list(zip(i.numpy(), v.numpy()))]
predictions_top_k.update(dict(zip(map(self._data.private_users.get,
range(offset, offset_stop)), items_ratings_pair)))
return predictions_top_k
|
py | 1a2ee5c384e1bc675aca57bff1eb30291c6e2ad1 | from django.apps import apps
def get_current_site(request):
"""
Check if contrib.sites is installed and return either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
# Imports are inside the function because its point is to avoid importing
# the Site models when django.contrib.sites isn't installed.
if apps.is_installed('django.contrib.sites'):
from .models import Site
return Site.objects.get_current(request)
else:
from .requests import RequestSite
return RequestSite(request)
|
py | 1a2ee8806c7295fe5ca0987773269b476e6ccb94 | class BuySellStock:
# @param prices, a list of stock prices
# @return index of buy and sell price
def choiceStocks(self, prices):
n = len(prices)
if n == 0:
return None, None
if n == 1:
return 0, 0
maxPrice = prices[n - 1]
mpIndex = n - 1
maxProfit = 0
for price in range(n):
currPrice = prices[n - price - 1]
if currPrice > maxPrice:
maxPrice = currPrice
mpIndex = n - price - 1
currProfit = maxPrice - currPrice
if currProfit > maxProfit:
maxProfit = currProfit
bpIndex = n - price - 1
return bpIndex, mpIndex
# Driver code to test the program
run = BuySellStock()
print(run.choiceStocks([5,6,7,8,10,3,8,7,11,1,2,11]))
|
py | 1a2ee9b36c7eaf80f22187398afb4ca70cdb3239 | from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class SubstanceNucleicAcid_SugarSchema:
"""
Nucleic acids are defined by three distinct elements: the base, sugar and
linkage. Individual substance/moiety IDs will be created for each of these
elements. The nucleotide sequence will be always entered in the 5’-3’
direction.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Nucleic acids are defined by three distinct elements: the base, sugar and
linkage. Individual substance/moiety IDs will be created for each of these
elements. The nucleotide sequence will be always entered in the 5’-3’
direction.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
identifier: The Substance ID of the sugar or sugar-like component that make up the
nucleotide.
name: The name of the sugar or sugar-like component that make up the nucleotide.
residueSite: The residues that contain a given sugar will be captured. The order of given
residues will be captured in the 5‘-3‘direction consistent with the base
sequences listed above.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema
if (
max_recursion_limit
and nesting_list.count("SubstanceNucleicAcid_Sugar") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["SubstanceNucleicAcid_Sugar"]
my_parent_path = (
parent_path + ".substancenucleicacid_sugar"
if parent_path
else "substancenucleicacid_sugar"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# The Substance ID of the sugar or sugar-like component that make up the
# nucleotide.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The name of the sugar or sugar-like component that make up the nucleotide.
StructField("name", StringType(), True),
# The residues that contain a given sugar will be captured. The order of given
# residues will be captured in the 5‘-3‘direction consistent with the base
# sequences listed above.
StructField("residueSite", StringType(), True),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
|
py | 1a2eea72af7ac86b9529a084586cc260eef68da8 | m = h = mu = 0
while True:
print(25*'-')
print(' CADASTRE UMA PESSOA')
print(25*'-')
i = int(input('Idade: '))
if i > 17:
m+=1
while True:
s = input('Sexo: [M/F] ').strip().upper()[0]
if s in 'MF':
break
print(25*'-')
if s == 'M':
h+=1
if s == 'F' and i < 21:
mu+=1
while True:
q = input('Quer continuar? [S/N] ').strip().upper()[0]
if q in 'SN':
break
if q == 'N':
break
print(f'====== FIM DO PROGRAMA ======\nTotal de pessoas com mais de 18 anos: {m}\nAo todo temos {h} homens cadastrados.\nE temos {mu} mulheres com menos de 20 anos.') |
py | 1a2eeb918f387d630e9884de81cfb4ee96e47303 | # -*- coding: utf-8 -*-
"""
MTD Parser to sqlAlchemy model.
Creates a Python file side by side with the original MTD file.
Can be overloaded with a custom class to enhance/change available
functions. See pineboolib/pnobjectsfactory.py
"""
from pineboolib import application, logging
from pineboolib.application import file
import os
from typing import List, Union, TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.application.metadata import pnfieldmetadata, pntablemetadata # pragma: no cover
LOGGER = logging.get_logger(__name__)
RESERVER_WORDS = ["pass"]
def mtd_parse(
meta_or_name: Union[str, "pntablemetadata.PNTableMetaData"], path_mtd: str = ""
) -> str:
"""
Parse MTD into SqlAlchemy model.
"""
if application.PROJECT.conn_manager is None:
raise Exception("Project is not connected yet")
dest_file = "%s_model.py" % (
path_mtd
if isinstance(meta_or_name, str)
else "%s/cache/%s" % (application.PROJECT.tmpdir, meta_or_name.name())
)
if os.path.exists(dest_file):
return dest_file
if isinstance(meta_or_name, str):
metadata = application.PROJECT.conn_manager.manager().metadata(meta_or_name, True)
else:
metadata = meta_or_name
if metadata is None:
return ""
lines = _generate_model(metadata)
if not lines:
dest_file = ""
else:
_write_file(dest_file, lines)
return dest_file
def _write_file(file_name: str, lines: List[str]) -> None:
"""Write lines to a file."""
file_ = open(file_name, "w", encoding="UTF-8")
file_.writelines([line if line.endswith("\n") else "%s\n" % line for line in lines])
file_.close()
def _get_meta(file_mtd: "file.File") -> List[str]:
"""Return list with meta."""
mtd_data_list: List[str] = []
if os.path.exists(file_mtd.path()):
mtd_data = application.PROJECT.conn_manager.manager().metadata(file_mtd.filename, True)
if mtd_data is not None:
mtd_data_list = _generate_model(mtd_data, False)
return mtd_data_list
def _generate_model(mtd_table: "pntablemetadata.PNTableMetaData", header: bool = True) -> List[str]:
"""
Create a list of lines from a mtd_table (pntablemetadata.PNTableMetaData).
"""
return _create_declaration(mtd_table, header)
def generate_field(field: "pnfieldmetadata.PNFieldMetaData") -> str:
"""
Get text representation for sqlAlchemy of a field type given its pnfieldmetadata.PNFieldMetaData.
"""
data: List[str] = []
# TYPE
# = "String"
ret = ""
type_ = field.type()
if type_ in ("int, serial"):
ret = "sqlalchemy.Integer"
elif type_ in ("uint"):
ret = "sqlalchemy.BigInteger"
elif type_ in ("calculated"):
ret = "sqlalchemy.String"
# elif field.type() in ("double"):
# ret = "sqlalchemy.Numeric"
# ret += "(%s , %s)" % (field.partInteger(), field.partDecimal())
elif type_ == "double":
ret = "sqlalchemy.Float"
elif type_ in ("string", "stringlist", "pixmap"):
ret = "sqlalchemy.String"
if field.length():
ret += "(%s)" % field.length()
elif type_ in ("bool", "unlock"):
ret = "sqlalchemy.Boolean"
elif type_ == "timestamp":
ret = "sqlalchemy.DateTime"
elif type_ == "json":
ret = "sqlalchemy.types.JSON"
elif type_ == "time":
ret = "sqlalchemy.Time"
elif type_ == "date":
ret = "sqlalchemy.Date"
elif type_ in ("bytearray"):
ret = "sqlalchemy.LargeBinary"
else:
ret = "Desconocido %s" % type_
data.append(ret)
if field.isPrimaryKey():
data.append("primary_key = True")
return ", ".join(data)
def generate_field_metadata(field: "pnfieldmetadata.PNFieldMetaData") -> List[str]:
"""Generate field data from a PNFieldMetaData."""
field_data: List = []
# NAME
field_data.append("'name' : '%s'" % field.name())
# ALIAS
if field.alias():
field_data.append("'alias' : '%s'" % field.alias().replace("'", '"'))
# PK
if field.isPrimaryKey():
field_data.append("'pk' : True")
# CK
if field.isCompoundKey():
field_data.append("'ck' : True")
# TYPE
field_relation: List[str] = []
field_data.append("'type' : '%s'" % field.type())
# LENGTH
if field.length():
field_data.append("'length' : %s" % field.length())
# REGEXP
if field.regExpValidator():
field_data.append("'regexp' : '%s'" % field.regExpValidator())
rel_list: List[str]
# RELATIONS 1M
for rel in field.relationList():
rel_list = []
rel_list.append("'card' : '%s'" % rel.cardinality())
rel_list.append("'table' : '%s'" % rel.foreignTable())
rel_list.append("'field' : '%s'" % rel.foreignField())
if rel.deleteCascade():
rel_list.append("'delc' : True")
if rel.updateCascade():
rel_list.append("'updc' : True")
if not rel.checkIn():
rel_list.append("'checkin' : False")
field_relation.append("{%s}" % ", ".join(rel_list))
# if field_relation:
# field_data.append("'relations' : [%s]" % ", ".join(field_relation))
# RELATIONS M1
if field.private._relation_m1:
rel = field.private._relation_m1
rel_list = []
rel_list.append("'card' : '%s'" % rel.cardinality())
rel_list.append("'table' : '%s'" % rel.foreignTable())
rel_list.append("'field' : '%s'" % rel.foreignField())
if rel.deleteCascade():
rel_list.append("'delC' : True")
if rel.updateCascade():
rel_list.append("'updC' : True")
if not rel.checkIn():
rel_list.append("'checkIn' : False")
field_relation.append("{%s}" % ", ".join(rel_list))
if field_relation:
field_data.append("'relations' : [%s]" % ", ".join(field_relation))
# ASSOCIATED
if field.private.associated_field_name:
field_data.append(
"'associated':{'with' : '%s', 'by' : '%s' }"
% (field.private.associated_field_filter_to, field.private.associated_field_name)
)
# UNIQUE
if field.isUnique():
field_data.append("'isunique' : True")
# ALLOW_NULL
if not field.allowNull():
field_data.append("'null' : False")
# DEFAULT_VALUE
if field.defaultValue() is not None:
value = (
field.defaultValue()
if field.type() in ["bool", "unlock", "int", "uint", "double", "serial", "json"]
else "'%s'" % field.defaultValue()
)
field_data.append("'default' : %s" % value)
# OUT_TRANSACTION
if field.outTransaction():
field_data.append("'outtransaction' : True")
# COUNTER
if field.isCounter():
field_data.append("'counter' : True")
# CALCULATED
if field.calculated():
field_data.append("'calculated' : True")
# FULLY_CALCULATED
if field.fullyCalculated():
field_data.append("'fullycalculated' : True")
# TRIMMED
if field.trimmed():
field_data.append("'trimmed' : True")
# VISIBLE
if not field.visible():
field_data.append("'visible' : False")
# VISIBLE_GRID
if not field.visibleGrid():
field_data.append("'visiblegrid' : False")
# EDITABLE
if not field.editable():
field_data.append("'editable' : False")
if field.type() == "double":
# PARTI
if field.partInteger():
field_data.append("'partI' : %s" % field.partInteger())
# PARTD
if field.partDecimal():
field_data.append("'partD' : %s" % field.partDecimal())
# INDEX
if field.isIndex():
field_data.append("'index' : True")
# OPTIONS_LIST
if field.optionsList():
texto = ""
for item in field.optionsList():
texto += "'%s', " % item
field_data.append("'optionslist' : [%s]" % texto)
# SEARCH_OPTIONS
if field.searchOptions():
texto = ""
for item in field.searchOptions():
texto += "'%s', " % item
field_data.append("'searchoptions' : [%s]" % texto)
return field_data
def use_mtd_fields(path_model: str) -> bool:
"""Return if models use mtd fields."""
file_ = open(path_model, "r", encoding="UTF-8")
lines = file_.readlines()
file_.close()
for line in lines:
if line.find("legacy_metadata") > -1:
return False
return True
def populate_fields(dest_file_name: str, mtd_name: str) -> str:
"""Populate models fields with mtd field."""
new_file_path: str = ""
if mtd_name in application.PROJECT.files.keys():
file_mtd = application.PROJECT.files[mtd_name]
file_ = open(dest_file_name, "r")
lines = file_.readlines()
file_.close()
new_lines: List[str] = []
for number, line in enumerate(list(lines)):
if line.find("__tablename__") > -1:
new_lines = lines[0 : number + 1] + _get_meta(file_mtd) + lines[number + 1 :]
break
if new_lines:
new_key = "%s_model.py" % file_mtd.filename[:-4]
conn = application.PROJECT.conn_manager.mainConn()
db_name = conn.DBName()
application.PROJECT.files[new_key] = file.File(
file_mtd.module,
"%s_model.py" % file_mtd.path(),
basedir=file_mtd.basedir,
sha=file_mtd.sha,
db_name=db_name,
)
application.PROJECT.files[new_key].filekey = "%s_model.py" % file_mtd.filekey
new_file_path = application.PROJECT.files[new_key].path()
if os.path.exists(new_file_path):
os.remove(new_file_path)
_write_file(new_file_path, new_lines)
return new_file_path
def _create_declaration(
mtd_table: "pntablemetadata.PNTableMetaData", header: bool = True
) -> List[str]:
"""Create metadata section."""
data: List[str] = []
list_data_field: List[str] = []
validator_list: List[str] = []
metadata_table: List = []
metadata_table.append("'name' : '%s'" % mtd_table.name())
metadata_table.append("'alias' : '%s'" % mtd_table.alias())
if mtd_table.isQuery():
metadata_table.append("'query':'%s'" % mtd_table.query())
if mtd_table.concurWarn():
metadata_table.append("'concurwarn': True")
if mtd_table.detectLocks():
metadata_table.append("'detectlocks':True")
if mtd_table.FTSFunction():
metadata_table.append("'ftsfunction' :'%s'" % mtd_table.FTSFunction())
try:
mtd_table.primaryKey()
except Exception as error: # noqa: F841
pass
field_list: List[List[str]] = []
pk_found = False
for field in mtd_table.fieldList(): # Crea los campos
if field.isPrimaryKey():
pk_found = True
if field.name() in validator_list:
LOGGER.warning(
"Hay un campo %s duplicado en %s.mtd. Omitido", field.name(), mtd_table.name()
)
else:
field_data = []
field_data.append(" ")
if field.name() in RESERVER_WORDS:
field_data.append("%s_" % field.name())
else:
field_data.append(field.name())
field_data.append(" = sqlalchemy.Column('%s', " % field.name())
field_list.append(generate_field_metadata(field))
field_data.append(generate_field(field))
field_data.append(")")
validator_list.append(field.name())
if field.isPrimaryKey():
pk_found = True
list_data_field.append("".join(field_data))
meta_fields: List = []
for meta_field in field_list:
meta_fields.append("{%s}" % ", ".join(meta_field))
metadata_table.append(
"\n 'fields' : [\n %s\n ]" % ",\n ".join(meta_fields)
)
class_name = "%s%s" % (mtd_table.name()[0].upper(), mtd_table.name()[1:])
if header:
data.append("# -*- coding: utf-8 -*-")
data.append("# Translated with pineboolib %s" % application.PINEBOO_VER)
data.append(
'"""%s%s_model module."""' % (mtd_table.name()[0].upper(), mtd_table.name()[1:])
)
data.append("")
data.append("from pineboolib.application.database.orm import basemodel")
data.append("from pineboolib.qsa import qsa")
data.append("")
data.append("import sqlalchemy")
data.append("")
data.append("")
data.append("# @class_declaration Oficial")
data.append("class Oficial(basemodel.BaseModel): # type: ignore [misc] # noqa: F821")
data.append(' """Oficial class."""')
data.append(" __tablename__ = '%s'" % mtd_table.name()) # si query nombre query
data.append("")
else:
data.append("")
data.append("")
data.append(" # --- POPULATED WITH METADATA FIELDS ---")
data.append("")
data.append("")
data.append(" # --- Metadata --->")
data.append(" legacy_metadata = {%s}" % ", ".join(metadata_table))
data.append("\n")
data.append(" # <--- Metadata ---")
data.append("")
data.append("")
data.append(" # --- Fields --->")
data.append("")
for data_field in list_data_field:
data.append(data_field)
data.append("")
data.append(" # <--- Fields ---")
data.append("")
if header:
data.append("# @class_declaration %s" % class_name)
data.append(
"class %s(Oficial): # type: ignore [misc] # noqa: F821" % class_name
) # si query nombre query
data.append(' """%s class."""' % class_name)
data.append(" pass")
if not pk_found and not mtd_table.isQuery():
LOGGER.warning(
"La tabla %s no tiene definida una clave primaria. No se generará el model."
% (mtd_table.name())
)
data = []
return data
|
py | 1a2eebb7c19bacf5dae0ea0950d2f17e0427094b | #!/usr/bin/python
# Tests if the SS segment override prefix is not explicitly produced when unnecessary
# Github issue: #9
# Author: Duncan (mrexodia)
from keystone import *
import regress
class TestX86(regress.RegressTest):
def runTest(self):
# Initialize Keystone engine
ks = Ks(KS_ARCH_X86, KS_MODE_32)
# Assemble to get back insn encoding & statement count
encoding1, _ = ks.asm(b"MOV EAX,DWORD PTR SS:[ESP+8]")
encoding2, _ = ks.asm(b"MOV EAX,DWORD PTR SS:[EBP+8]")
# Assert the result
self.assertEqual(encoding1, [ 0x8B, 0x44, 0x24, 0x08 ])
self.assertEqual(encoding2, [ 0x8B, 0x45, 0x08 ])
encoding, _ = ks.asm(b"MOV DWORD PTR SS:[EBP-0xC],0x1994000")
self.assertEqual(encoding, [ 0xC7, 0x45, 0xF4, 0x00, 0x40, 0x99, 0x01 ])
if __name__ == '__main__':
regress.main()
|
py | 1a2eebea61469c862ce664d315bdd9275bcab6f5 | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
The main training/evaluation loop
Modified from: https://github.com/facebookresearch/deit
"""
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
import utils
import xcit
def get_args_parser():
parser = argparse.ArgumentParser('XCiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=400, type=int)
# Model parameters
parser.add_argument('--model', default='xcit_s_12', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET',
'INAT', 'INAT19', 'CARS', 'FLOWERS',
'IMNET22k'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--test-freq', default=1, type=int, help='Number of epochs between \
validation runs.')
parser.add_argument('--full_crop', action='store_true', help='use crop_ratio=1.0 instead of the\
default 0.875 (Used by CaiT).')
parser.add_argument("--pretrained", default=None, type=str, help='Path to pre-trained checkpoint')
parser.add_argument('--surgery', default=None, type=str, help='Path to checkpoint to copy the \
patch projection from. \
Can improve stability for very \
large models.')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None
)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.pretrained:
if args.pretrained.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.pretrained, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.pretrained, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
model.load_state_dict(checkpoint_model, strict=True)
model.to(device)
if args.surgery:
checkpoint = torch.load(args.surgery, map_location='cpu')
checkpoint_model = checkpoint['model']
patch_embed_weights = {key.replace("patch_embed.", ""): value for key,
value in checkpoint['model'].items() if 'patch_embed' in key}
model.patch_embed.load_state_dict(patch_embed_weights)
for p in model.patch_embed.parameters():
p.requires_grad = False
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
output_dir = Path(args.output_dir)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
resume_path = os.path.join(output_dir, 'checkpoint.pth')
if args.resume and os.path.exists(resume_path):
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
print("Loading from checkpoint ...")
checkpoint = torch.load(resume_path, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
surgery=args.surgery
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
if (epoch % args.test_freq == 0) or (epoch == args.epochs - 1):
test_stats = evaluate(data_loader_val, model, device)
if test_stats["acc1"] >= max_accuracy:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'args': args,
}, os.path.join(output_dir, 'best_model.pth'))
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('XCiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
py | 1a2eed3a1d54dddbdefb034dee20a0c99e513fe1 | ###################################
# File Name : exception_performance.py
###################################
#!/usr/bin/python3
import os
import time
TRY_TEST_FILE="performance_try_file"
TRY_ELSE_TEST_FILE="performance_try_else_file"
def write_file_only_try():
try:
f = open(TRY_TEST_FILE, "w")
for i in range(10000000):
f.write(str(i))
f.close()
except:
print ("File open error")
finally:
os.remove(TRY_TEST_FILE)
def write_file_try_else():
try:
f = open(TRY_ELSE_TEST_FILE, "w")
except:
print ("File open error")
else:
for i in range(10000000):
f.write(str(i))
f.close()
finally:
os.remove(TRY_ELSE_TEST_FILE)
def check_runtime(func):
accumulate_time = 0
for i in range(10):
start = time.time()
func()
accumulate_time += (time.time() - start)
print ("Run time summary : %s" % str(accumulate_time / 10))
if __name__ == "__main__":
print ("=== Try Performance Test ===")
check_runtime(write_file_only_try)
print ("=== Try/Else Performance Test ===")
check_runtime(write_file_try_else)
|
py | 1a2eed613faa08496c8905dc43f153e18b0df979 | #pragma out
#pragma repy
def foo(ip,port,mess, ch):
print ip,port,mess,ch
stopcomm(ch)
if callfunc == 'initialize':
ip = getmyip()
recvmess(ip,<messport>,foo)
sleep(.1)
sendmess(ip,<messport>,'hi')
|
py | 1a2eee7c782323d32fa6afe322d8b0b4f6a6c96d | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
###########################################################
Runner Validation Test Suite for Cross-language Transforms
###########################################################
As per Beams's Portability Framework design, Cross-language transforms
should work out of the box. In spite of this, there always exists a
possibility of rough edges existing. It could be caused due to unpolished
implementation of any part of the execution code path, for example:
- Transform expansion [SDK]
- Pipeline construction [SDK]
- Cross-language artifact staging [Runner]
- Language specific serialization/deserialization of PCollection (and
other data types) [Runner/SDK]
In an effort to improve developer visibility into potential problems,
this test suite validates correct execution of 5 Core Beam transforms when
used as cross-language transforms within the Python SDK from any foreign SDK:
- ParDo
(https://beam.apache.org/documentation/programming-guide/#pardo)
- GroupByKey
(https://beam.apache.org/documentation/programming-guide/#groupbykey)
- CoGroupByKey
(https://beam.apache.org/documentation/programming-guide/#cogroupbykey)
- Combine
(https://beam.apache.org/documentation/programming-guide/#combine)
- Flatten
(https://beam.apache.org/documentation/programming-guide/#flatten)
- Partition
(https://beam.apache.org/documentation/programming-guide/#partition)
See Runner Validation Test Plan for Cross-language transforms at
https://docs.google.com/document/d/1xQp0ElIV84b8OCVz8CD2hvbiWdR8w4BvWxPTZJZA6NA
for further details.
"""
import logging
import os
import typing
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
TEST_PREFIX_URN = "beam:transforms:xlang:test:prefix"
TEST_MULTI_URN = "beam:transforms:xlang:test:multi"
TEST_GBK_URN = "beam:transforms:xlang:test:gbk"
TEST_CGBK_URN = "beam:transforms:xlang:test:cgbk"
TEST_COMGL_URN = "beam:transforms:xlang:test:comgl"
TEST_COMPK_URN = "beam:transforms:xlang:test:compk"
TEST_FLATTEN_URN = "beam:transforms:xlang:test:flatten"
TEST_PARTITION_URN = "beam:transforms:xlang:test:partition"
class CrossLanguageTestPipelines(object):
def __init__(self, expansion_service=None):
self.expansion_service = expansion_service or (
'localhost:%s' % os.environ.get('EXPANSION_PORT'))
def run_prefix(self, pipeline):
"""
Target transform - ParDo
(https://beam.apache.org/documentation/programming-guide/#pardo)
Test scenario - Mapping elements from a single input collection to a
single output collection
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create(['a', 'b']).with_output_types(str)
| beam.ExternalTransform(
TEST_PREFIX_URN,
ImplicitSchemaPayloadBuilder({'data': u'0'}),
self.expansion_service))
assert_that(res, equal_to(['0a', '0b']))
def run_multi_input_output_with_sideinput(self, pipeline):
"""
Target transform - ParDo
(https://beam.apache.org/documentation/programming-guide/#pardo)
Test scenario - Mapping elements from multiple input collections (main
and side) to multiple output collections (main and side)
Boundary conditions checked -
- PCollectionTuple to external transforms
- PCollectionTuple from external transforms
"""
with pipeline as p:
main1 = p | 'Main1' >> beam.Create(
['a', 'bb'], reshuffle=False).with_output_types(str)
main2 = p | 'Main2' >> beam.Create(
['x', 'yy', 'zzz'], reshuffle=False).with_output_types(str)
side = p | 'Side' >> beam.Create(['s']).with_output_types(str)
res = dict(
main1=main1, main2=main2, side=side) | beam.ExternalTransform(
TEST_MULTI_URN, None, self.expansion_service)
assert_that(res['main'], equal_to(['as', 'bbs', 'xs', 'yys', 'zzzs']))
assert_that(res['side'], equal_to(['ss']), label='CheckSide')
def run_group_by_key(self, pipeline):
"""
Target transform - GroupByKey
(https://beam.apache.org/documentation/programming-guide/#groupbykey)
Test scenario - Grouping a collection of KV<K,V> to a collection of
KV<K, Iterable<V>> by key
Boundary conditions checked -
- PCollection<KV<?, ?>> to external transforms
- PCollection<KV<?, Iterable<?>>> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([(0, "1"), (0, "2"),
(1, "3")], reshuffle=False).with_output_types(
typing.Tuple[int, str])
| beam.ExternalTransform(TEST_GBK_URN, None, self.expansion_service)
| beam.Map(lambda x: "{}:{}".format(x[0], ','.join(sorted(x[1])))))
assert_that(res, equal_to(['0:1,2', '1:3']))
def run_cogroup_by_key(self, pipeline):
"""
Target transform - CoGroupByKey
(https://beam.apache.org/documentation/programming-guide/#cogroupbykey)
Test scenario - Grouping multiple input collections with keys to a
collection of KV<K, CoGbkResult> by key
Boundary conditions checked -
- KeyedPCollectionTuple<?> to external transforms
- PCollection<KV<?, Iterable<?>>> from external transforms
"""
with pipeline as p:
col1 = p | 'create_col1' >> beam.Create(
[(0, "1"), (0, "2"), (1, "3")], reshuffle=False).with_output_types(
typing.Tuple[int, str])
col2 = p | 'create_col2' >> beam.Create(
[(0, "4"), (1, "5"), (1, "6")], reshuffle=False).with_output_types(
typing.Tuple[int, str])
res = (
dict(col1=col1, col2=col2)
| beam.ExternalTransform(TEST_CGBK_URN, None, self.expansion_service)
| beam.Map(lambda x: "{}:{}".format(x[0], ','.join(sorted(x[1])))))
assert_that(res, equal_to(['0:1,2,4', '1:3,5,6']))
def run_combine_globally(self, pipeline):
"""
Target transform - Combine
(https://beam.apache.org/documentation/programming-guide/#combine)
Test scenario - Combining elements globally with a predefined simple
CombineFn
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([1, 2, 3]).with_output_types(int)
| beam.ExternalTransform(
TEST_COMGL_URN, None, self.expansion_service))
assert_that(res, equal_to([6]))
def run_combine_per_key(self, pipeline):
"""
Target transform - Combine
(https://beam.apache.org/documentation/programming-guide/#combine)
Test scenario - Combining elements per key with a predefined simple
merging function
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([('a', 1), ('a', 2),
('b', 3)]).with_output_types(typing.Tuple[str, int])
| beam.ExternalTransform(
TEST_COMPK_URN, None, self.expansion_service))
assert_that(res, equal_to([('a', 3), ('b', 3)]))
def run_flatten(self, pipeline):
"""
Target transform - Flatten
(https://beam.apache.org/documentation/programming-guide/#flatten)
Test scenario - Merging multiple collections into a single collection
Boundary conditions checked -
- PCollectionList<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
col1 = p | 'col1' >> beam.Create([1, 2, 3]).with_output_types(int)
col2 = p | 'col2' >> beam.Create([4, 5, 6]).with_output_types(int)
res = ((col1, col2)
| beam.ExternalTransform(
TEST_FLATTEN_URN, None, self.expansion_service))
assert_that(res, equal_to([1, 2, 3, 4, 5, 6]))
def run_partition(self, pipeline):
"""
Target transform - Partition
(https://beam.apache.org/documentation/programming-guide/#partition)
Test scenario - Splitting a single collection into multiple collections
with a predefined simple PartitionFn
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollectionList<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([1, 2, 3, 4, 5, 6]).with_output_types(int)
| beam.ExternalTransform(
TEST_PARTITION_URN, None, self.expansion_service))
assert_that(res['0'], equal_to([2, 4, 6]), label='check_even')
assert_that(res['1'], equal_to([1, 3, 5]), label='check_odd')
@attr('UsesCrossLanguageTransforms')
@unittest.skipUnless(
os.environ.get('EXPANSION_PORT'),
"EXPANSION_PORT environment var is not provided.")
class ValidateRunnerXlangTest(unittest.TestCase):
_multiprocess_can_split_ = True
def create_pipeline(self):
test_pipeline = TestPipeline()
test_pipeline.not_use_test_runner_api = True
return test_pipeline
def test_prefix(self, test_pipeline=None):
CrossLanguageTestPipelines().run_prefix(
test_pipeline or self.create_pipeline())
def test_multi_input_output_with_sideinput(self, test_pipeline=None):
CrossLanguageTestPipelines().run_multi_input_output_with_sideinput(
test_pipeline or self.create_pipeline())
def test_group_by_key(self, test_pipeline=None):
CrossLanguageTestPipelines().run_group_by_key(
test_pipeline or self.create_pipeline())
def test_cogroup_by_key(self, test_pipeline=None):
CrossLanguageTestPipelines().run_cogroup_by_key(
test_pipeline or self.create_pipeline())
def test_combine_globally(self, test_pipeline=None):
CrossLanguageTestPipelines().run_combine_globally(
test_pipeline or self.create_pipeline())
def test_combine_per_key(self, test_pipeline=None):
CrossLanguageTestPipelines().run_combine_per_key(
test_pipeline or self.create_pipeline())
def test_flatten(self, test_pipeline=None):
CrossLanguageTestPipelines().run_flatten(
test_pipeline or self.create_pipeline())
def test_partition(self, test_pipeline=None):
CrossLanguageTestPipelines().run_partition(
test_pipeline or self.create_pipeline())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
py | 1a2eee8b4ab428996b7aa50d407ec5841bed9992 | # coding: utf-8
"""
EPIC API
REST API for interacting with EPIC (https://epic.zenotech.com) services. <br /> Please note this API is in BETA and does not yet contain all EPIC functionality. # noqa: E501
The version of the OpenAPI document: v2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from epiccore.configuration import Configuration
class TeamDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'name': 'str',
'number_of_members': 'int',
'user_role': 'str',
'members': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'number_of_members': 'number_of_members',
'user_role': 'user_role',
'members': 'members'
}
def __init__(self, id=None, name=None, number_of_members=None, user_role=None, members=None, local_vars_configuration=None): # noqa: E501
"""TeamDetails - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._number_of_members = None
self._user_role = None
self._members = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if number_of_members is not None:
self.number_of_members = number_of_members
if user_role is not None:
self.user_role = user_role
if members is not None:
self.members = members
@property
def id(self):
"""Gets the id of this TeamDetails. # noqa: E501
ID for this team # noqa: E501
:return: The id of this TeamDetails. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TeamDetails.
ID for this team # noqa: E501
:param id: The id of this TeamDetails. # noqa: E501
:type id: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this TeamDetails. # noqa: E501
Name of this team # noqa: E501
:return: The name of this TeamDetails. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TeamDetails.
Name of this team # noqa: E501
:param name: The name of this TeamDetails. # noqa: E501
:type name: str
"""
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def number_of_members(self):
"""Gets the number_of_members of this TeamDetails. # noqa: E501
Number of members in this team # noqa: E501
:return: The number_of_members of this TeamDetails. # noqa: E501
:rtype: int
"""
return self._number_of_members
@number_of_members.setter
def number_of_members(self, number_of_members):
"""Sets the number_of_members of this TeamDetails.
Number of members in this team # noqa: E501
:param number_of_members: The number_of_members of this TeamDetails. # noqa: E501
:type number_of_members: int
"""
self._number_of_members = number_of_members
@property
def user_role(self):
"""Gets the user_role of this TeamDetails. # noqa: E501
Your role in this team # noqa: E501
:return: The user_role of this TeamDetails. # noqa: E501
:rtype: str
"""
return self._user_role
@user_role.setter
def user_role(self, user_role):
"""Sets the user_role of this TeamDetails.
Your role in this team # noqa: E501
:param user_role: The user_role of this TeamDetails. # noqa: E501
:type user_role: str
"""
self._user_role = user_role
@property
def members(self):
"""Gets the members of this TeamDetails. # noqa: E501
List of user ids and roles for members of this team # noqa: E501
:return: The members of this TeamDetails. # noqa: E501
:rtype: str
"""
return self._members
@members.setter
def members(self, members):
"""Sets the members of this TeamDetails.
List of user ids and roles for members of this team # noqa: E501
:param members: The members of this TeamDetails. # noqa: E501
:type members: str
"""
self._members = members
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TeamDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TeamDetails):
return True
return self.to_dict() != other.to_dict()
|
py | 1a2ef19fe5cde47f873d292aee2cd6eb153fe33c | from django.db import models
from datetime import datetime
class Activity(models.Model):
activity_id = models.AutoField(primary_key = True)
title = models.CharField(max_length = 30)
image = models.ImageField(upload_to = 'images', default = 'default/test_image.jpg')
status = models.CharField(max_length = 20, default = '正在抢票')
remain = models.IntegerField(default = 100)
publisher = models.CharField(max_length = 30, default = 'unknown publisher')
description = models.CharField(max_length = 1024, default = '哎呀,这个活动的介绍文字似乎走丢了...')
time = models.DateTimeField(default = datetime.now)
place = models.CharField(max_length = 30, default = 'none place')
price = models.FloatField(default = 0.0)
heat = models.FloatField(default = 50.0) # 活动热度
scan_change = models.FloatField(default = 0.02) # 浏览时增加的热度
star_change = models.FloatField(default = 0.5) # 关注/取关时的热度变化
purchase_change = models.FloatField(default = 2.0) # 购买/退票时的热度变化
# arrive_change = models.FloatField(default = 2.5) # 到场时的热度变化
# max_heat = models.FloatField(default = 1000)
min_heat = models.FloatField(default = 0) # 活动过期后置为最低热度
# 该活动有以下检票员
# inspectors = models.ManyToManyField(User)
class Meta:
db_table = 'Activity'
class User(models.Model):
# basic info
user_id = models.AutoField(primary_key = True)
openid = models.CharField(max_length = 30)
username = models.CharField(max_length = 30)
password = models.CharField(max_length = 30)
student_id = models.CharField(max_length = 10, default=0, blank=True, null=True)
# ManyToManyField中没有null,设置blank以实现可在admin中置空
# 关联一个外键多次时,需使用related_name
starred = models.ManyToManyField(Activity, related_name='starred', blank=True)
# 该用户向以下活动提出了待处理的检票员申请
inspector_apply_list = models.ManyToManyField(Activity, related_name='inspector_apply_list', blank=True)
# 该用户是以下活动的检票员
inspector_list = models.ManyToManyField(Activity, related_name='inspector_list', blank=True)
# varify info
is_verified = models.BooleanField(default=False) # 学号登录验证接口
class Meta:
db_table = 'User'
class Ticket(models.Model):
ticket_id = models.AutoField(primary_key = True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, default = '')
activity = models.ForeignKey(Activity, on_delete=models.DO_NOTHING, default = '')
is_valid = models.BooleanField(default=False) # 仅表示是否退票
purchaseTime = models.DateTimeField('购票日期', auto_now_add = True) # PS:auto_now_add使之为readonly。若需修改puchaseTime,则此处应改用default = timezone.now
# WARMNING:修改含auto_now或auto_now_add的字段时,需先改为default = 'xxxx-xx-xx xx:xx:xx'并完成一次迁移
QRCode = models.ImageField(upload_to = 'QR', default = 'QR/default.png')
is_checked = models.BooleanField(default=False)
class Meta:
db_table = 'Ticket'
|
py | 1a2ef246f1d7ab7917028ee9191f2a5562970364 | """InVEST specific code utils."""
import codecs
import math
import os
import contextlib
import logging
import tempfile
import shutil
from datetime import datetime
import time
import pandas
import numpy
from shapely.wkt import loads
from osgeo import gdal
from osgeo import osr
import pygeoprocessing
LOGGER = logging.getLogger(__name__)
LOG_FMT = (
"%(asctime)s "
"(%(name)s) "
"%(module)s.%(funcName)s(%(lineno)d) "
"%(levelname)s %(message)s")
# GDAL has 5 error levels, python's logging has 6. We skip logging.INFO.
# A dict clarifies the mapping between levels.
GDAL_ERROR_LEVELS = {
gdal.CE_None: logging.NOTSET,
gdal.CE_Debug: logging.DEBUG,
gdal.CE_Warning: logging.WARNING,
gdal.CE_Failure: logging.ERROR,
gdal.CE_Fatal: logging.CRITICAL,
}
# In GDAL 3.0 spatial references no longer ignore Geographic CRS Axis Order
# and conform to Lat first, Lon Second. Transforms expect (lat, lon) order
# as opposed to the GIS friendly (lon, lat). See
# https://trac.osgeo.org/gdal/wiki/rfc73_proj6_wkt2_srsbarn Axis order
# issues. SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) swaps the
# axis order, which will use Lon,Lat order for Geographic CRS, but otherwise
# leaves Projected CRS alone
DEFAULT_OSR_AXIS_MAPPING_STRATEGY = osr.OAMS_TRADITIONAL_GIS_ORDER
@contextlib.contextmanager
def capture_gdal_logging():
"""Context manager for logging GDAL errors with python logging.
GDAL error messages are logged via python's logging system, at a severity
that corresponds to a log level in ``logging``. Error messages are logged
with the ``osgeo.gdal`` logger.
Args:
``None``
Returns:
``None``
"""
osgeo_logger = logging.getLogger('osgeo')
def _log_gdal_errors(err_level, err_no, err_msg):
"""Log error messages to osgeo.
All error messages are logged with reasonable ``logging`` levels based
on the GDAL error level.
Args:
err_level (int): The GDAL error level (e.g. ``gdal.CE_Failure``)
err_no (int): The GDAL error number. For a full listing of error
codes, see: http://www.gdal.org/cpl__error_8h.html
err_msg (string): The error string.
Returns:
``None``
"""
osgeo_logger.log(
level=GDAL_ERROR_LEVELS[err_level],
msg='[errno {err}] {msg}'.format(
err=err_no, msg=err_msg.replace('\n', ' ')))
gdal.PushErrorHandler(_log_gdal_errors)
try:
yield
finally:
gdal.PopErrorHandler()
def _format_time(seconds):
"""Render the integer number of seconds as a string. Returns a string."""
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
hours = int(hours)
minutes = int(minutes)
if hours > 0:
return "%sh %sm %ss" % (hours, minutes, seconds)
if minutes > 0:
return "%sm %ss" % (minutes, seconds)
return "%ss" % seconds
@contextlib.contextmanager
def prepare_workspace(
workspace, name, logging_level=logging.NOTSET, exclude_threads=None):
"""Prepare the workspace."""
if not os.path.exists(workspace):
os.makedirs(workspace)
logfile = os.path.join(
workspace,
'InVEST-{modelname}-log-{timestamp}.txt'.format(
modelname='-'.join(name.replace(':', '').split(' ')),
timestamp=datetime.now().strftime("%Y-%m-%d--%H_%M_%S")))
with capture_gdal_logging(), log_to_file(logfile,
exclude_threads=exclude_threads,
logging_level=logging_level):
with sandbox_tempdir(dir=workspace):
logging.captureWarnings(True)
LOGGER.info('Writing log messages to %s', logfile)
start_time = time.time()
try:
yield
finally:
LOGGER.info('Elapsed time: %s',
_format_time(round(time.time() - start_time, 2)))
logging.captureWarnings(False)
class ThreadFilter(logging.Filter):
"""Filters out log messages issued by the given thread.
Any log messages generated by a thread with the name matching the
threadname provided to the constructor will be excluded.
"""
def __init__(self, thread_name):
"""Construct a ThreadFilter.
Args:
thread_name (string): The thread name to filter on.
"""
logging.Filter.__init__(self)
self.thread_name = thread_name
def filter(self, record):
"""Filter the given log record.
Args:
record (log record): The log record to filter.
Returns:
True if the record should be included, false if not.
"""
if record.threadName == self.thread_name:
return False
return True
@contextlib.contextmanager
def log_to_file(logfile, exclude_threads=None, logging_level=logging.NOTSET,
log_fmt=LOG_FMT, date_fmt=None):
"""Log all messages within this context to a file.
Args:
logfile (string): The path to where the logfile will be written.
If there is already a file at this location, it will be
overwritten.
exclude_threads=None (list): If None, logging from all threads will be
included in the log. If a list, it must be a list of string thread
names that should be excluded from logging in this file.
logging_level=logging.NOTSET (int): The logging threshold. Log
messages with a level less than this will be automatically
excluded from the logfile. The default value (``logging.NOTSET``)
will cause all logging to be captured.
log_fmt=LOG_FMT (string): The logging format string to use. If not
provided, ``utils.LOG_FMT`` will be used.
date_fmt (string): The logging date format string to use.
If not provided, ISO8601 format will be used.
Yields:
``handler``: An instance of ``logging.FileHandler`` that
represents the file that is being written to.
Returns:
``None``
"""
try:
if os.path.exists(logfile):
LOGGER.warning('Logfile %s exists and will be overwritten',
logfile)
except SystemError:
# This started happening in Windows tests:
# SystemError: <built-in function stat> returned NULL without
# setting an error
# Looking at https://bugs.python.org/issue28040#msg276223, this might
# be a low-level python error.
pass
handler = logging.FileHandler(logfile, 'w', encoding='UTF-8')
formatter = logging.Formatter(log_fmt, date_fmt)
root_logger = logging.getLogger()
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(handler)
handler.setFormatter(formatter)
handler.setLevel(logging_level)
if exclude_threads is not None:
for threadname in exclude_threads:
thread_filter = ThreadFilter(threadname)
handler.addFilter(thread_filter)
try:
yield handler
finally:
handler.close()
root_logger.removeHandler(handler)
@contextlib.contextmanager
def sandbox_tempdir(suffix='', prefix='tmp', dir=None):
"""Create a temporary directory for this context and clean it up on exit.
Parameters are identical to those for :py:func:`tempfile.mkdtemp`.
When the context manager exits, the created temporary directory is
recursively removed.
Args:
suffix='' (string): a suffix for the name of the directory.
prefix='tmp' (string): the prefix to use for the directory name.
dir=None (string or None): If a string, a directory that should be
the parent directory of the new temporary directory. If None,
tempfile will determine the appropriate tempdir to use as the
parent folder.
Yields:
``sandbox`` (string): The path to the new folder on disk.
Returns:
``None``
"""
sandbox = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield sandbox
finally:
try:
shutil.rmtree(sandbox)
except OSError:
LOGGER.exception('Could not remove sandbox %s', sandbox)
def make_suffix_string(args, suffix_key):
"""Make an InVEST appropriate suffix string.
Creates an InVEST appropriate suffix string given the args dictionary and
suffix key. In general, prepends an '_' when necessary and generates an
empty string when necessary.
Args:
args (dict): the classic InVEST model parameter dictionary that is
passed to `execute`.
suffix_key (string): the key used to index the base suffix.
Returns:
If `suffix_key` is not in `args`, or `args['suffix_key']` is ""
return "",
If `args['suffix_key']` starts with '_' return `args['suffix_key']`
else return '_'+`args['suffix_key']`
"""
try:
file_suffix = args[suffix_key]
if file_suffix != "" and not file_suffix.startswith('_'):
file_suffix = '_' + file_suffix
except KeyError:
file_suffix = ''
return file_suffix
def exponential_decay_kernel_raster(expected_distance, kernel_filepath):
"""Create a raster-based exponential decay kernel.
The raster created will be a tiled GeoTiff, with 256x256 memory blocks.
Args:
expected_distance (int or float): The distance (in pixels) of the
kernel's radius, the distance at which the value of the decay
function is equal to `1/e`.
kernel_filepath (string): The path to the file on disk where this
kernel should be stored. If this file exists, it will be
overwritten.
Returns:
None
"""
max_distance = expected_distance * 5
kernel_size = int(numpy.round(max_distance * 2 + 1))
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_filepath.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Float32, options=[
'BIGTIFF=IF_SAFER', 'TILED=YES', 'BLOCKXSIZE=256',
'BLOCKYSIZE=256'])
# Make some kind of geotransform, it doesn't matter what but
# will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([0, 1, 0, 0, 0, -1])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(-9999)
cols_per_block, rows_per_block = kernel_band.GetBlockSize()
n_cols = kernel_dataset.RasterXSize
n_rows = kernel_dataset.RasterYSize
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
integration = 0.0
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
# Numpy creates index rasters as ints by default, which sometimes
# creates problems on 32-bit builds when we try to add Int32
# matrices to float64 matrices.
row_indices, col_indices = numpy.indices((row_block_width,
col_block_width),
dtype=float)
row_indices += float(row_offset - max_distance)
col_indices += float(col_offset - max_distance)
kernel_index_distances = numpy.hypot(
row_indices, col_indices)
kernel = numpy.where(
kernel_index_distances > max_distance, 0.0,
numpy.exp(-kernel_index_distances / expected_distance))
integration += numpy.sum(kernel)
kernel_band.WriteArray(kernel, xoff=col_offset,
yoff=row_offset)
# Need to flush the kernel's cache to disk before opening up a new Dataset
# object in interblocks()
kernel_band.FlushCache()
kernel_dataset.FlushCache()
for block_data in pygeoprocessing.iterblocks(
(kernel_filepath, 1), offset_only=True):
kernel_block = kernel_band.ReadAsArray(**block_data)
kernel_block /= integration
kernel_band.WriteArray(kernel_block, xoff=block_data['xoff'],
yoff=block_data['yoff'])
kernel_band.FlushCache()
kernel_dataset.FlushCache()
kernel_band = None
kernel_dataset = None
def build_file_registry(base_file_path_list, file_suffix):
"""Combine file suffixes with key names, base filenames, and directories.
Args:
base_file_tuple_list (list): a list of (dict, path) tuples where
the dictionaries have a 'file_key': 'basefilename' pair, or
'file_key': list of 'basefilename's. 'path'
indicates the file directory path to prepend to the basefile name.
file_suffix (string): a string to append to every filename, can be
empty string
Returns:
dictionary of 'file_keys' from the dictionaries in
`base_file_tuple_list` mapping to full file paths with suffixes or
lists of file paths with suffixes depending on the original type of
the 'basefilename' pair.
Raises:
ValueError if there are duplicate file keys or duplicate file paths.
ValueError if a path is not a string or a list of strings.
"""
all_paths = set()
duplicate_keys = set()
duplicate_paths = set()
f_reg = {}
def _build_path(base_filename, path):
"""Internal helper to avoid code duplication."""
pre, post = os.path.splitext(base_filename)
full_path = os.path.join(path, pre+file_suffix+post)
# Check for duplicate keys or paths
if full_path in all_paths:
duplicate_paths.add(full_path)
else:
all_paths.add(full_path)
return full_path
for base_file_dict, path in base_file_path_list:
for file_key, file_payload in base_file_dict.items():
# check for duplicate keys
if file_key in f_reg:
duplicate_keys.add(file_key)
else:
# handle the case whether it's a filename or a list of strings
if isinstance(file_payload, str):
full_path = _build_path(file_payload, path)
f_reg[file_key] = full_path
elif isinstance(file_payload, list):
f_reg[file_key] = []
for filename in file_payload:
full_path = _build_path(filename, path)
f_reg[file_key].append(full_path)
else:
raise ValueError(
"Unknown type in base_file_dict[%s]=%s" % (
file_key, path))
if len(duplicate_paths) > 0 or len(duplicate_keys):
raise ValueError(
"Cannot consolidate because of duplicate paths or keys: "
"duplicate_keys: %s duplicate_paths: %s" % (
duplicate_keys, duplicate_paths))
return f_reg
def build_lookup_from_csv(
table_path, key_field, column_list=None, to_lower=True):
"""Read a CSV table into a dictionary indexed by ``key_field``.
Creates a dictionary from a CSV whose keys are unique entries in the CSV
table under the column named by ``key_field`` and values are dictionaries
indexed by the other columns in ``table_path`` including ``key_field``
whose values are the values on that row of the CSV table.
If an entire row is NA/NaN (including ``key_field``) then it is dropped
from the table and a warning is given of the dropped rows.
Args:
table_path (string): path to a CSV file containing at
least the header key_field
key_field: (string): a column in the CSV file at `table_path` that
can uniquely identify each row in the table and sets the row index.
column_list (list): a list of column names to subset from the CSV
file, default=None
to_lower (bool): if True, converts all unicode in the CSV,
including headers and values to lowercase, otherwise uses raw
string values. default=True.
Returns:
lookup_dict (dict): a dictionary of the form
{key_field_0: {csv_header_0: value0, csv_header_1: value1...},
key_field_1: {csv_header_0: valuea, csv_header_1: valueb...}}
if ``to_lower`` all strings including key_fields and values are
converted to lowercase unicode.
Raise:
ValueError
If ValueError occurs during conversion to dictionary.
KeyError
If ``key_field`` is not present during ``set_index`` call.
"""
# Reassign to avoid mutation
col_list = column_list
# if a list of columns are provided to use and return, make sure
# 'key_field' is one of them.
if col_list and key_field not in col_list:
col_list.append(key_field)
table = read_csv_to_dataframe(
table_path, to_lower=to_lower, sep=None, index_col=False,
engine='python')
# if 'to_lower`, case handling is done before trying to access the data.
# the columns are stripped of leading/trailing whitespace in
# ``read_csv_to_dataframe``, and also lowercased if ``to_lower`` so we only
# need to convert the rest of the table.
if to_lower:
key_field = key_field.lower()
# lowercase column names
if col_list:
col_list = [col.lower() for col in col_list]
# lowercase values
table = table.applymap(
lambda x: x.lower() if isinstance(x, str) else x)
# Set 'key_field' as the index of the dataframe
try:
table.set_index(key_field, drop=False, inplace=True)
except KeyError:
# If 'key_field' is not a column then KeyError is raised for using
# it as the index column
LOGGER.error(f"'key_field' : '{key_field}' could not be found as a"
f" column in the table. Table path: {table_path}.")
raise
# Subset dataframe by columns if desired
if col_list:
table = table.loc[:, col_list]
# look for NaN values and warn if any are found.
table_na = table.isna()
if table_na.values.any():
LOGGER.warning(
f"Empty or NaN values were found in the table: {table_path}.")
# look to see if an entire row is NA values
table_na_rows = table_na.all(axis=1)
na_rows = table_na_rows.index[table_na_rows].tolist()
# if a completely empty row, drop it
if na_rows:
LOGGER.warning(
"Encountered an entirely blank row on line(s)"
f" {[x+2 for x in na_rows]}. Dropping rows from table.")
table.dropna(how="all", inplace=True)
# fill the rest of empty or NaN values with empty string
table.fillna(value="", inplace=True)
try:
lookup_dict = table.to_dict(orient='index')
except ValueError:
# If 'key_field' is not unique then a value error is raised.
LOGGER.error(f"The 'key_field' : '{key_field}' column values are not"
f" unique: {table.index.tolist()}")
raise
return lookup_dict
def read_csv_to_dataframe(
path, to_lower=False, sep=None, encoding=None, engine='python',
**kwargs):
"""Return a dataframe representation of the CSV.
Wrapper around ``pandas.read_csv`` that standardizes the column names by
stripping leading/trailing whitespace and optionally making all lowercase.
This helps avoid common errors caused by user-supplied CSV files with
column names that don't exactly match the specification.
Args:
path (string): path to a CSV file
to_lower (bool): if True, convert all column names to lowercase
sep: separator to pass to pandas.read_csv. Defaults to None, which
lets the Python engine infer the separator (if engine='python').
encoding (string): name of encoding codec to pass to `pandas.read_csv`.
Defaults to None. Setting engine='python' when encoding=None allows
a lot of non-UTF8 encodings to be read without raising an error.
Any special characters in other encodings may get replaced with the
replacement character.
If encoding=None, and the file begins with a BOM, the encoding gets
set to 'utf-8-sig'; otherwise the BOM causes an error.
engine (string): kwarg for pandas.read_csv: 'c', 'python', or None.
Defaults to 'python' (see note about encoding).
**kwargs: any kwargs that are valid for ``pandas.read_csv``
Returns:
pandas.DataFrame with the contents of the given CSV
"""
# Check if the file encoding is UTF-8 BOM first
# allow encoding kwarg to override this if it's provided
if not encoding and has_utf8_bom(path):
encoding = 'utf-8-sig'
try:
dataframe = pandas.read_csv(path, engine=engine, encoding=encoding,
sep=sep, **kwargs)
except UnicodeDecodeError as error:
LOGGER.error(
f'{path} must be encoded as utf-8 or ASCII')
raise error
# this won't work on integer types, which happens if you set header=None
# however, there's little reason to use this function if there's no header
dataframe.columns = dataframe.columns.str.strip()
if to_lower:
dataframe.columns = dataframe.columns.str.lower()
return dataframe
def make_directories(directory_list):
"""Create directories in `directory_list` if they do not already exist."""
if not isinstance(directory_list, list):
raise ValueError(
"Expected `directory_list` to be an instance of `list` instead "
"got type %s instead", type(directory_list))
for path in directory_list:
# From http://stackoverflow.com/a/14364249/42897
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def mean_pixel_size_and_area(pixel_size_tuple):
"""Convert to mean and raise Exception if they are not close.
Parameter:
pixel_size_tuple (tuple): a 2 tuple indicating the x/y size of a
pixel.
Returns:
tuple of (mean absolute average of pixel_size, area of pixel size)
Raises:
ValueError if the dimensions of pixel_size_tuple are not almost
square.
"""
x_size, y_size = abs(pixel_size_tuple[0]), abs(pixel_size_tuple[1])
if not numpy.isclose(x_size, y_size):
raise ValueError(
"pixel size is not square. dimensions: %s" % repr(
pixel_size_tuple))
return (x_size, x_size*y_size)
def create_coordinate_transformer(
base_ref, target_ref,
osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):
"""Create a spatial reference coordinate transformation function.
Args:
base_ref (osr spatial reference): A defined spatial reference to
transform FROM
target_ref (osr spatial reference): A defined spatial reference
to transform TO
osr_axis_mapping_strategy (int): OSR axis mapping strategy for
``SpatialReference`` objects. Defaults to
``utils.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should
not be changed unless you know what you are doing.
Returns:
An OSR Coordinate Transformation object
"""
# Make a copy of the base and target spatial references to avoid side
# effects from mutation of setting the axis mapping strategy
base_ref_wkt = base_ref.ExportToWkt()
target_ref_wkt = target_ref.ExportToWkt()
base_ref_copy = osr.SpatialReference()
target_ref_copy = osr.SpatialReference()
base_ref_copy.ImportFromWkt(base_ref_wkt)
target_ref_copy.ImportFromWkt(target_ref_wkt)
base_ref_copy.SetAxisMappingStrategy(osr_axis_mapping_strategy)
target_ref_copy.SetAxisMappingStrategy(osr_axis_mapping_strategy)
transformer = osr.CreateCoordinateTransformation(
base_ref_copy, target_ref_copy)
return transformer
def _assert_vectors_equal(
expected_vector_path, actual_vector_path, field_value_atol=1e-3):
"""Assert two vectors are equal.
Assert spatial reference, feature count, geometries, field names, and
values are equal with no respect to order of field names or geometries.
Args:
actual_vector_path (string): path on disk to a gdal Vector dataset.
expected_vector_path (string): path on disk to a gdal Vector dataset
to use as the ground truth.
field_value_atol (float): the absolute tolerance for comparing field
attribute values, default=1e-3.
Returns:
None on success
Raise:
AssertionError
If vector projections, feature counts, field names, or geometries
do not match.
"""
try:
# Open vectors
actual_vector = gdal.OpenEx(actual_vector_path, gdal.OF_VECTOR)
actual_layer = actual_vector.GetLayer()
expected_vector = gdal.OpenEx(expected_vector_path, gdal.OF_VECTOR)
expected_layer = expected_vector.GetLayer()
# Check projections
expected_projection = expected_layer.GetSpatialRef()
expected_projection_wkt = expected_projection.ExportToWkt()
actual_projection = actual_layer.GetSpatialRef()
actual_projection_wkt = actual_projection.ExportToWkt()
if expected_projection_wkt != actual_projection_wkt:
raise AssertionError(
"Vector projections are not the same. \n"
f"Expected projection wkt: {expected_projection_wkt}. \n"
f"Actual projection wkt: {actual_projection_wkt}. ")
# Check feature count
actual_feat_count = actual_layer.GetFeatureCount()
expected_feat_count = expected_layer.GetFeatureCount()
if expected_feat_count != actual_feat_count:
raise AssertionError(
"Vector feature counts are not the same. \n"
f"Expected feature count: {expected_feat_count}. \n"
f"Actual feature count: {actual_feat_count}. ")
# Check field names
expected_field_names = [field.name for field in expected_layer.schema]
actual_field_names = [field.name for field in actual_layer.schema]
if sorted(expected_field_names) != sorted(actual_field_names):
raise AssertionError(
"Vector field names are not the same. \n"
f"Expected field names: {sorted(expected_field_names)}. \n"
f"Actual field names: {sorted(actual_field_names)}. ")
# Check field values and geometries
for expected_feature in expected_layer:
fid = expected_feature.GetFID()
expected_values = [
expected_feature.GetField(field)
for field in expected_field_names]
actual_feature = actual_layer.GetFeature(fid)
actual_values = [
actual_feature.GetField(field)
for field in expected_field_names]
for av, ev in zip(actual_values, expected_values):
if av is not None:
# Number comparison
if isinstance(av, int) or isinstance(av, float):
if not numpy.allclose(numpy.array([av]),
numpy.array([ev]),
atol=field_value_atol):
raise AssertionError(
"Vector field values are not equal: \n"
f"Expected value: {ev}. \n"
f"Actual value: {av}. ")
# String and other comparison
else:
if av != ev:
raise AssertionError(
"Vector field values are not equal. \n"
f"Expected value : {ev}. \n"
f"Actual value : {av}. ")
else:
if ev is not None:
raise AssertionError(
"Vector field values are not equal: \n"
f"Expected value: {ev}. \n"
f"Actual value: {av}. ")
expected_geom = expected_feature.GetGeometryRef()
expected_geom_wkt = expected_geom.ExportToWkt()
actual_geom = actual_feature.GetGeometryRef()
actual_geom_wkt = actual_geom.ExportToWkt()
expected_geom_shapely = loads(expected_geom_wkt)
actual_geom_shapely = loads(actual_geom_wkt)
# Try comparing geoms exactly equal allowing for different
# geometry ordering
geoms_equal = expected_geom_shapely.equals(actual_geom_shapely)
if not geoms_equal:
# Try almost_equal allowing for precision differences
geoms_almost_eq = expected_geom_shapely.almost_equals(
actual_geom_shapely)
if not geoms_almost_eq:
raise AssertionError(
"Vector geometry assertion fail. \n"
f"Expected geometry: {expected_geom_wkt}. \n"
f"Actual geometry: {actual_geom_wkt}. ")
expected_feature = None
actual_feature = None
finally:
actual_layer = None
actual_vector = None
expected_layer = None
expected_vector = None
return None
def has_utf8_bom(textfile_path):
"""Determine if the text file has a UTF-8 byte-order marker.
Args:
textfile_path (str): The path to a file on disk.
Returns:
A bool indicating whether the textfile has a BOM. If ``True``, a BOM
is present.
"""
with open(textfile_path, 'rb') as file_obj:
first_line = file_obj.readline()
return first_line.startswith(codecs.BOM_UTF8)
def reclassify_raster(
raster_path_band, value_map, target_raster_path, target_datatype,
target_nodata, error_details):
"""A wrapper function for calling ``pygeoprocessing.reclassify_raster``.
This wrapper function is helpful when added as a ``TaskGraph.task`` so
a better error message can be provided to the users if a
``pygeoprocessing.ReclassificationMissingValuesError`` is raised.
Args:
raster_path_band (tuple): a tuple including file path to a raster
and the band index to operate over. ex: (path, band_index)
value_map (dictionary): a dictionary of values of
{source_value: dest_value, ...} where source_value's type is the
same as the values in ``base_raster_path`` at band ``band_index``.
Must contain at least one value.
target_raster_path (string): target raster output path; overwritten if
it exists
target_datatype (gdal type): the numerical type for the target raster
target_nodata (numerical type): the nodata value for the target raster
Must be the same type as target_datatype
error_details (dict): a dictionary with key value pairs that provide
more context for a raised
``pygeoprocessing.ReclassificationMissingValuesError``.
keys must be {'raster_name', 'column_name', 'table_name'}. Values
each key represent:
'raster_name' - string for the raster name being reclassified
'column_name' - name of the table column that ``value_map``
dictionary keys came from.
'table_name' - table name that ``value_map`` came from.
Returns:
None
Raises:
ValueError if ``values_required`` is ``True`` and a pixel value from
``raster_path_band`` is not a key in ``value_map``.
"""
# Error early if 'error_details' keys are invalid
raster_name = error_details['raster_name']
column_name = error_details['column_name']
table_name = error_details['table_name']
try:
pygeoprocessing.reclassify_raster(
raster_path_band, value_map, target_raster_path, target_datatype,
target_nodata, values_required=True)
except pygeoprocessing.ReclassificationMissingValuesError as err:
error_message = (
f"Values in the {raster_name} raster were found that are not"
f" represented under the '{column_name}' column of the"
f" {table_name} table. The missing values found in the"
f" {raster_name} raster but not the table are:"
f" {err.missing_values}.")
raise ValueError(error_message)
|
py | 1a2ef3fa7e4c4bad44a2303092c2343e9f7d4fe5 | from configparser import ConfigParser
class GetconfigData:
"""
Class used to access config.ini data
"""
def __init__(self):
# instantiate
self.config = ConfigParser()
# parse existing file
self.config.read('config.ini')
def GetClientId():
"""
Function used by main.py to get the Client ID from the config.ini file
Returns:
client_ID: The client ID used to get data
"""
# Create object
get_client_id = GetconfigData()
# read values from a section
client_id = get_client_id.config.get('config', 'client_id')
return client_id
def GetAccessToken():
"""
Function used by main.py to get the Access Token from the config.ini file
Returns:
access_token: The Access Token used to get data
"""
# Create object
get_access_token = GetconfigData()
# read values from a section
access_token = get_access_token.config.get('config', 'access_token')
return access_token
|
py | 1a2ef438f0bf0c43b3ee10f8857977d69781a6f3 | # -*- coding: utf-8 -*-
# python std lib
import random
# rediscluster imports
from .crc import crc16
from .exceptions import RedisClusterException, RedisClusterConfigError
# 3rd party imports
from redis import Redis
from redis._compat import unicode, long, basestring
from redis.connection import Encoder
from redis import ConnectionError, TimeoutError, ResponseError
class NodeManager(object):
"""
"""
RedisClusterHashSlots = 16384
def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False,
host_port_remap=None, **connection_kwargs):
"""
:skip_full_coverage_check:
Skips the check of cluster-require-full-coverage config, useful for clusters
without the CONFIG command (like aws)
:nodemanager_follow_cluster:
The node manager will during initialization try the last set of nodes that
it was operating on. This will allow the client to drift along side the cluster
if the cluster nodes move around alot.
"""
self.connection_kwargs = connection_kwargs
self.nodes = {}
self.slots = {}
self.startup_nodes = [] if startup_nodes is None else startup_nodes
self.orig_startup_nodes = [node for node in self.startup_nodes]
self.reinitialize_counter = 0
self.reinitialize_steps = reinitialize_steps or 25
self._skip_full_coverage_check = skip_full_coverage_check
self.nodemanager_follow_cluster = nodemanager_follow_cluster
self.encoder = Encoder(
connection_kwargs.get('encoding', 'utf-8'),
connection_kwargs.get('encoding_errors', 'strict'),
connection_kwargs.get('decode_responses', False)
)
self._validate_host_port_remap(host_port_remap)
self.host_port_remap = host_port_remap
if not self.startup_nodes:
raise RedisClusterException("No startup nodes provided")
def _validate_host_port_remap(self, host_port_remap):
"""
Helper method that validates all entries in the host_port_remap config.
"""
if host_port_remap is None:
# Nothing to validate if config not set
return
if not isinstance(host_port_remap, list):
raise RedisClusterConfigError("host_port_remap must be a list")
for item in host_port_remap:
if not isinstance(item, dict):
raise RedisClusterConfigError("items inside host_port_remap list must be of dict type")
# If we have from_host, we must have a to_host option to allow for translation to work
if ('from_host' in item and 'to_host' not in item) or ('from_host' not in item and 'to_host' in item):
raise RedisClusterConfigError("Both from_host and to_host must be present in remap item if either is defined")
if ('from_port' in item and 'to_port' not in item) or ('from_port' not in item and 'to_port' in item):
raise RedisClusterConfigError("Both from_port and to_port must be present in remap item")
def keyslot(self, key):
"""
Calculate keyslot for a given key.
Tuned for compatibility with python 2.7.x
"""
k = self.encoder.encode(key)
start = k.find(b"{")
if start > -1:
end = k.find(b"}", start + 1)
if end > -1 and end != start + 1:
k = k[start + 1:end]
return crc16(k) % self.RedisClusterHashSlots
def node_from_slot(self, slot):
"""
"""
for node in self.slots[slot]:
if node['server_type'] == 'master':
return node
def all_nodes(self):
"""
"""
for node in self.nodes.values():
yield node
def all_masters(self):
"""
"""
for node in self.nodes.values():
if node["server_type"] == "master":
yield node
def random_startup_node(self):
"""
"""
random.shuffle(self.startup_nodes)
return self.startup_nodes[0]
def random_startup_node_ittr(self):
"""
Generator that will return a random startup nodes. Works as a generator.
"""
while True:
yield random.choice(self.startup_nodes)
def random_node(self):
"""
"""
key = random.choice(list(self.nodes.keys()))
return self.nodes[key]
def get_redis_link(self, host, port, decode_responses=False):
"""
"""
allowed_keys = (
'host',
'port',
'db',
'username',
'password',
'socket_timeout',
'socket_connect_timeout',
'socket_keepalive',
'socket_keepalive_options',
'connection_pool',
'unix_socket_path',
'encoding',
'encoding_errors',
'charset',
'errors',
'decode_responses',
'retry_on_timeout',
'ssl',
'ssl_keyfile',
'ssl_certfile',
'ssl_cert_reqs',
'ssl_ca_certs',
'max_connections',
)
disabled_keys = (
'host',
'port',
'decode_responses',
)
connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)}
return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs)
def initialize(self):
"""
Init the slots cache by asking all startup nodes what the current cluster configuration is
"""
nodes_cache = {}
tmp_slots = {}
all_slots_covered = False
disagreements = []
startup_nodes_reachable = False
nodes = self.orig_startup_nodes
# With this option the client will attempt to connect to any of the previous set of nodes instead of the original set of nodes
if self.nodemanager_follow_cluster:
nodes = self.startup_nodes
for node in nodes:
try:
r = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True)
cluster_slots = r.execute_command("cluster", "slots")
startup_nodes_reachable = True
except (ConnectionError, TimeoutError):
continue
except ResponseError as e:
# Isn't a cluster connection, so it won't parse these exceptions automatically
message = e.__str__()
if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message:
continue
else:
raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node))
except Exception:
raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node))
all_slots_covered = True
# If there's only one server in the cluster, its ``host`` is ''
# Fix it to the host in startup_nodes
if (len(cluster_slots) == 1 and len(cluster_slots[0][2][0]) == 0 and len(self.startup_nodes) == 1):
cluster_slots[0][2][0] = self.startup_nodes[0]['host']
# No need to decode response because Redis should handle that for us...
for slot in cluster_slots:
master_node = slot[2]
if master_node[0] == '':
master_node[0] = node['host']
master_node[1] = int(master_node[1])
master_node = self.remap_internal_node_object(master_node)
node, node_name = self.make_node_obj(master_node[0], master_node[1], 'master')
nodes_cache[node_name] = node
for i in range(int(slot[0]), int(slot[1]) + 1):
if i not in tmp_slots:
tmp_slots[i] = [node]
slave_nodes = [slot[j] for j in range(3, len(slot))]
for slave_node in slave_nodes:
slave_node = self.remap_internal_node_object(slave_node)
target_slave_node, slave_node_name = self.make_node_obj(slave_node[0], slave_node[1], 'slave')
nodes_cache[slave_node_name] = target_slave_node
tmp_slots[i].append(target_slave_node)
else:
# Validate that 2 nodes want to use the same slot cache setup
if tmp_slots[i][0]['name'] != node['name']:
disagreements.append("{0} vs {1} on slot: {2}".format(
tmp_slots[i][0]['name'], node['name'], i),
)
if len(disagreements) > 5:
raise RedisClusterException("startup_nodes could not agree on a valid slots cache. {0}".format(", ".join(disagreements)))
self.populate_startup_nodes()
self.refresh_table_asap = False
if self._skip_full_coverage_check:
need_full_slots_coverage = False
else:
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
# Validate if all slots are covered or if we should try next startup node
for i in range(0, self.RedisClusterHashSlots):
if i not in tmp_slots and need_full_slots_coverage:
all_slots_covered = False
if all_slots_covered:
# All slots are covered and application can continue to execute
break
if not startup_nodes_reachable:
raise RedisClusterException("Redis Cluster cannot be connected. Please provide at least one reachable node.")
if not all_slots_covered:
raise RedisClusterException("All slots are not covered after query all startup_nodes. {0} of {1} covered...".format(
len(tmp_slots), self.RedisClusterHashSlots))
# Set the tmp variables to the real variables
self.slots = tmp_slots
self.nodes = nodes_cache
self.reinitialize_counter = 0
def remap_internal_node_object(self, node_obj):
if not self.host_port_remap:
# No remapping rule set, return object unmodified
return node_obj
for remap_rule in self.host_port_remap:
if 'from_host' in remap_rule and 'to_host' in remap_rule:
if remap_rule['from_host'] in node_obj[0]:
# print('remapping host', node_obj[0], remap_rule['to_host'])
node_obj[0] = remap_rule['to_host']
## The port value is always an integer
if 'from_port' in remap_rule and 'to_port' in remap_rule:
if remap_rule['from_port'] == node_obj[1]:
# print('remapping port', node_obj[1], remap_rule['to_port'])
node_obj[1] = remap_rule['to_port']
return node_obj
def increment_reinitialize_counter(self, ct=1):
for i in range(min(ct, self.reinitialize_steps)):
self.reinitialize_counter += 1
if self.reinitialize_counter % self.reinitialize_steps == 0:
self.initialize()
def cluster_require_full_coverage(self, nodes_cache):
"""
if exists 'cluster-require-full-coverage no' config on redis servers,
then even all slots are not covered, cluster still will be able to
respond
"""
nodes = nodes_cache or self.nodes
def node_require_full_coverage(node):
try:
r_node = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True)
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
except ConnectionError:
return False
except Exception:
raise RedisClusterException("ERROR sending 'config get cluster-require-full-coverage' command to redis server: {0}".format(node))
# at least one node should have cluster-require-full-coverage yes
return any(node_require_full_coverage(node) for node in nodes.values())
def set_node_name(self, n):
"""
Format the name for the given node object
# TODO: This shold not be constructed this way. It should update the name of the node in the node cache dict
"""
if "name" not in n:
n["name"] = "{0}:{1}".format(n["host"], n["port"])
def make_node_obj(self, host, port, server_type):
"""
Create a node datastructure.
Returns the node datastructure and the node name
"""
node_name = "{0}:{1}".format(host, port)
node = {
'host': host,
'port': port,
'name': node_name,
'server_type': server_type
}
return (node, node_name)
def set_node(self, host, port, server_type=None):
"""
Update data for a node.
"""
node, node_name = self.make_node_obj(host, port, server_type)
self.nodes[node_name] = node
return node
def populate_startup_nodes(self):
"""
Do something with all startup nodes and filters out any duplicates
"""
for item in self.startup_nodes:
self.set_node_name(item)
for n in self.nodes.values():
if n not in self.startup_nodes:
self.startup_nodes.append(n)
# freeze it so we can set() it
uniq = {frozenset(node.items()) for node in self.startup_nodes}
# then thaw it back out into a list of dicts
self.startup_nodes = [dict(node) for node in uniq]
def reset(self):
"""
Drop all node data and start over from startup_nodes
"""
self.initialize()
|
py | 1a2ef9c250f5b0592f93c3e716253062338b9f44 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class TargetingGeoLocationLocationCluster(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isTargetingGeoLocationLocationCluster = True
super(TargetingGeoLocationLocationCluster, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
key = 'key'
id = 'id'
_field_types = {
'key': 'int',
'id': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
|
py | 1a2ef9dc52649c21ee5e5267a735b624d1e5db24 | # coding=utf-8
"""
Collect slony metrics from postgresql
#### Dependencies
* psycopg2
#### Example Configuration
```
enabled = True
host = localhost
port = 5432
slony_node_string = Node [0-9] - [_a-z0-9]*@(.*).example.com
[instances]
[[database1]]
slony_db = postgres
slony_schema = _slony
[[database2]]
user = postgres
password = postgres
slony_db = data_db
slony_node_string = Node [0-9] - [_a-z0-9]*@(.*).i.example.com
slony_schema = _data_db
```
"""
import diamond.collector
try:
import psycopg2
import psycopg2.extensions
psycopg2 # workaround for pyflakes issue #13
except ImportError:
psycopg2 = None
class SlonyCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(SlonyCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'user': 'Username',
'password': 'Password',
'port': 'Port number',
'slony_node_string': 'Regex for SQL SUBSTRING to extract ' +
'the hostname from sl_node.no_comment',
'instances': 'Subcategory of slony instances that includes the ' +
'slony database, and slony schema to be monitored. ' +
'Optionally, user, password and slony_node_string ' +
'maybe overridden per instance (see example).'
})
return config_help
def get_default_config(self):
"""
Return default config.
"""
config = super(SlonyCollector, self).get_default_config()
config.update({
'path': 'postgres',
'host': 'localhost',
'user': 'postgres',
'password': 'postgres',
'port': 5432,
'slony_node_string': 'Node [0-9]+ - postgres@localhost',
'method': 'Threaded',
'instances': {},
})
return config
def collect(self):
if psycopg2 is None:
self.log.error('Unable to import module psycopg2')
return {}
instances = self.config['instances']
# HACK: setting default with subcategory messes up merging of configs,
# so we only set the default if one wasn't provided.
if not instances:
instances = {
'default': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
}
}
for name, instance in instances.iteritems():
host = self.config['host']
port = self.config['port']
user = instance.get('user') or self.config['user']
password = instance.get('password') or self.config['password']
slony_node_string = instance.get('slony_node_string') or \
self.config['slony_node_string']
slony_db = instance['slony_db']
slony_schema = instance['slony_schema']
stats = self._get_stats_by_database(
host, port, user, password, slony_db,
slony_schema, slony_node_string
)
[self.publish(metric, value) for metric, value in stats]
def _get_stats_by_database(self, host, port, user,
password, db, schema, node_string):
path = "slony.%(datname)s.%(metric)s.lag_events"
conn = psycopg2.connect(
host=host,
user=user,
password=password,
port=port,
database=db)
# Avoid using transactions, set isolation level to autocommit
conn.set_isolation_level(0)
query = """
SELECT SUBSTRING(sl.no_comment FROM %(node_extractor)s) AS node,
st.st_lag_num_events AS lag_events
FROM %(schema)s.sl_status AS st, %(schema)s.sl_node AS sl
WHERE sl.no_id = st.st_received
"""
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query, {
'node_extractor': node_string,
'schema': psycopg2.extensions.AsIs(schema),
})
metrics = []
for row in cursor.fetchall():
stats = row.copy()
metrics.append((
path % {'datname': db, 'metric': stats.get('node')},
stats.get('lag_events')
))
return metrics
|
py | 1a2efa426e0f6d38cb8ae226501c722d38d52bed | from django.db import models
from accounts.models import User
class ExcelFile(models.Model):
# user = models.ForeignKey(User, on_delete=models.CASCADE)
excel_file = models.FileField(blank=True, default='')
timestamp = models.DateTimeField(auto_now_add=True)
|
py | 1a2efa4861e56582ea91b6f361894c41ffe65061 | from unittest import mock
from django.conf import settings
from django.test import TestCase, override_settings
from daiquiri.jobs.tests.mixins import AsyncTestMixin
from daiquiri.query.models import QueryJob, Example
@override_settings(QUERY_ANONYMOUS=True)
@mock.patch(settings.ADAPTER_DATABASE + '.submit_query', mock.Mock())
@mock.patch(settings.ADAPTER_DATABASE + '.fetch_nrows', mock.Mock(return_value=100))
@mock.patch(settings.ADAPTER_DATABASE + '.fetch_size', mock.Mock(return_value=100))
@mock.patch(settings.ADAPTER_DATABASE + '.count_rows', mock.Mock(return_value=100))
@mock.patch(settings.ADAPTER_DATABASE + '.rename_table', mock.Mock())
@mock.patch(settings.ADAPTER_DATABASE + '.drop_table', mock.Mock())
@mock.patch(settings.ADAPTER_DATABASE + '.create_user_schema_if_not_exists', mock.Mock())
class AsyncTestCase(AsyncTestMixin, TestCase):
databases = ('default', 'data', 'tap', 'oai')
fixtures = (
'auth.json',
'metadata.json',
'jobs.json',
'queryjobs.json',
'examples.json'
)
users = (
('admin', 'admin'),
('user', 'user'),
('evil', 'evil'),
('anonymous', None),
)
url_names = {
'list': 'tap:async-list',
'detail': 'tap:async-detail',
'results': 'tap:async-results',
'result': 'tap:async-result',
'parameters': 'tap:async-parameters',
'destruction': 'tap:async-destruction',
'executionduration': 'tap:async-executionduration',
'phase': 'tap:async-phase',
'error': 'tap:async-error',
'quote': 'tap:async-quote',
'owner': 'tap:async-owner'
}
jobs = QueryJob.objects.filter(owner__username='user')
def get_parameter_for_new_jobs(self, username):
return [{
'LANG': example.query_language,
'QUERY': example.query_string
} for example in Example.objects.filter(access_level='PUBLIC')]
def get_parameter_for_new_jobs_internal(self, username):
return [{
'LANG': example.query_language,
'QUERY': example.query_string
} for example in Example.objects.filter(access_level='INTERNAL')]
|
py | 1a2efac3154f2d441117018b9a8e276b2fdc06ab | import json
import traceback
from datetime import timedelta
from flask import request, g, current_app
from sqlalchemy import desc, func
from apps.auth.models.users import User
from apps.project.business.credit import CreditBusiness
from apps.project.models.assets import Phone, PhoneRecord, VirtualAsset, PhoneBorrow
from apps.project.models.credit import Credit
from apps.public.models.public import Config
from library.api.db import db
from library.api.transfer import transfer2json
from library.notification import notification
from library.trpc import Trpc
user_trpc = Trpc('auth')
class PhoneBusiness(object):
public_trpc = Trpc('public')
user_trpc = Trpc('auth')
message_trpc = Trpc('message')
@classmethod
def _query(cls):
return Phone.query.add_columns(
Phone.id.label('id'),
Phone.name.label('name'),
Phone.asset_id.label('asset_id'),
Phone.vendor.label('vendor'),
Phone.device_number.label('device_number'),
Phone.os.label('os'),
Phone.cpu.label('cpu'),
Phone.core.label('core'),
Phone.ram.label('ram'),
Phone.rom.label('rom'),
Phone.resolution.label('resolution'),
Phone.buy_date.label('buy_date'),
Phone.region.label('region'),
Phone.status.label('status'),
Phone.borrow_id.label('borrow_id'),
Phone.creator_id.label('creator_id'),
Phone.device_source.label('device_source'),
Phone.device_belong.label('device_belong'),
)
@classmethod
@transfer2json(
'?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'
'!borrow_id|!creator_id|!device_source|!device_belong'
)
def query_all_json(cls, page_size, page_index):
ret = cls._query().filter(
Phone.status == Phone.ACTIVE).order_by(
desc(Phone.id)).limit(int(page_size)).offset(
int(page_index - 1) * int(page_size)).all()
return ret
@classmethod
def query_all_count(cls):
count = cls._query().filter(Phone.status == Phone.ACTIVE).count()
return count
@classmethod
@transfer2json(
'?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'
'!borrow_id|!creator_id|!device_source|!device_belong'
)
def query_json_by_id(cls, pid):
return cls._query().filter(
Phone.id == pid, Phone.status == Phone.ACTIVE).all()
@classmethod
def get_phone_by_id(cls, pid):
users = user_trpc.requests(method='get', path='/user')
phone = cls.query_json_by_id(pid)
if len(phone) <= 0:
return 101, 'phone not exist!'
phone = phone[0]
for user in users:
if user.get('userid') == phone.get('creator_id'):
phone['creator_nickname'] = user.get('nickname')
if user.get('userid') == phone.get('borrow_id'):
phone['borrow_nickname'] = user.get('nickname')
return 0, [phone]
@classmethod
def send_message(cls, user_list, creator, text):
if cls.message_trpc.requests('post', '/message',
body={'send_id': creator, 'rec_id': user_list, 'content': text}):
current_app.logger.info('发送站内信成功')
else:
current_app.logger.info('发送站内信失败')
@classmethod
def get_phone_all(cls, page_size, page_index):
# 通过设备名称进行搜索
name = request.args.get('name', '')
# 通过制造商进行搜索
vendor = request.args.get('vendor', '')
# 通过系统进行搜索
os = request.args.get('os', '')
# 通过分辨率进行搜索
resolution = request.args.get('resolution', '')
# 通过借用人进行搜索
borrower_id = request.args.get('borrower_id')
# 通过持有人进行搜索
creator_id = request.args.get('creator_id')
# 通过 归属
device_belong = request.args.get('device_belong', '')
# 通过 来源
device_source = request.args.get('device_source', '')
# 通过 归属人
# 获取所有 手机设备列表
phones, count = cls.search_phone_all(name, vendor, os, resolution, borrower_id, device_belong,
device_source, creator_id, page_size, page_index)
# 获取所有用户的 基本信息
users = {int(user.get('userid')): user
for user in user_trpc.requests(method='get', path='/user', query={'base_info': True})}
# 获取所有借用关系列表
phone_borrows = {phone_borrow.phone_id: phone_borrow for phone_borrow in PhoneBorrow.query.all()}
data = []
for phone in phones:
phone_borrow = phone_borrows.get(phone.get('id'))
if g.userid == phone.get('borrow_id'):
phone["move_status"] = 1
else:
phone["move_status"] = 0
if PhoneBusiness.in_confirm_status(phone_borrow):
phone["move_status"] = 2
if PhoneBusiness.need_confirm_status(phone_borrow):
phone["confirm_status"] = 0
else:
phone["confirm_status"] = 1
try:
borrower = users.get(phone.get('borrow_id')).get("nickname")
creator = users.get(phone.get('creator_id')).get('nickname')
phone['borrow_nickname'] = borrower
phone['creator_nickname'] = creator
# 有此条借用记录
if phone_borrow:
user_list = [int(uid) for uid in phone_borrow.user_list.split(',') if uid != '']
# 有需要确认的用户
if phone_borrow.confirm_userid != 0:
confirm_user_nickname = users.get(phone_borrow.confirm_userid).get('nickname')
phone['borrow_status'] = f'[{confirm_user_nickname}] 待接收'
# 用户借用列表
elif user_list:
user_list_temp = [users.get(userid).get('nickname') for userid in user_list]
phone['borrow_status'] = f'[{",".join(user_list_temp)}] 申请借用'
phone['move_status'] = 3 if phone["move_status"] == 1 else 0
# 无借用、确认、归还
else:
phone['borrow_status'] = f'[{borrower}] 持有'
else:
phone['borrow_status'] = f'[{borrower}] 持有'
except Exception as e:
current_app.logger.error(e)
phone['borrow_status'] = '未知'
phone['borrow_nickname'] = '未知'
data.append(phone)
current_app.logger.info(data)
return data, count
@classmethod
@transfer2json(
'?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'
'!borrow_id|!creator_id|!device_source|!device_belong'
)
def search_phone_json(cls, data):
return data.all()
@classmethod
def search_phone_all(cls, name, vendor, os, resolution, borrower_id, device_belong, device_source, creator_id,
page_size, page_index):
try:
data_all = cls._query().filter(Phone.status == Phone.ACTIVE)
if name != '':
data_all = data_all.filter(Phone.name.like(f'%{name}%'))
if vendor != '':
data_all = data_all.filter(Phone.vendor.like(f'%{vendor}%'))
if os != '':
data_all = data_all.filter(Phone.os.like(f'%{os}%'))
if resolution != '':
data_all = data_all.filter(Phone.resolution.like(f'%{resolution}%'))
if device_belong != '':
data_all = data_all.filter(Phone.device_belong.like(f'%{device_belong}%'))
if device_source != '':
data_all = data_all.filter(Phone.device_source.like(f'%{device_source}%'))
if borrower_id:
data_all = data_all.filter(Phone.borrow_id == borrower_id)
if creator_id:
data_all = data_all.filter(Phone.creator_id == creator_id)
count = data_all.count()
data = cls.search_phone_json(
data_all.order_by(desc(Phone.id)).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)))
return data, count
except Exception as e:
current_app.logger.error(e)
@classmethod
def get_holder_json(cls):
# 获取所有持有者的信息
try:
data_all = []
temp = []
phones = Phone.query.add_columns(Phone.borrow_id.label('borrow_id')).filter(
Phone.status == Phone.ACTIVE).all()
for phone in phones:
if phone.borrow_id not in temp:
temp.append(phone.borrow_id)
user = cls.user_trpc.requests('get', '/user/{}'.format(phone.borrow_id))[0]
data = {
'nickname': user.get('nickname'),
'id': user.get('userid')
}
data_all.append(data)
return data_all
except Exception as e:
current_app.logger.error(e)
@classmethod
def can_move_status(cls, phone_id):
# 判断此设备是否归属于当前用户
phone = Phone.query.get(phone_id)
if phone and phone.borrow_id == g.userid:
return True
else:
return False
@classmethod
def need_confirm_status(cls, phone_borrow):
# 判断此手机需要是否当前用户确认
try:
if phone_borrow is not None:
if int(phone_borrow.confirm_userid) == g.userid:
return True
else:
return False
else:
return False
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 101, str(e)
@classmethod
def in_confirm_status(cls, phone_borrow):
# 判断此设备是否存在于确认流程中
try:
if phone_borrow is not None:
if int(phone_borrow.confirm_userid) != 0:
return True
return False
else:
return False
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 101, str(e)
@classmethod
def qyweixin_email(cls, user_ids, text):
if not isinstance(user_ids, list):
user_ids = [user_ids]
notification.send_notification(user_ids, text, creator=0)
return 0, 'success'
@classmethod
def send_need_confirm_msg(cls, current_phone, phone_current_holder, phone_new_holder):
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您有一台设备需要确认接收:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
现持有人 : {} (微信号: {})
请及时到系统中确认接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid, phone_new_holder.nickname,
phone_new_holder.wx_userid)
# phone_current_holder 原持有人
# phone_new_holder 确认人
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_cancel_move_msg(cls, current_phone, phone_current_holder, phone_new_holder):
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您有一台设备由于超过 3 天没有接收,已被系统退回:
设备 : {},
资产编号 : {},
现持有人 : {} (微信号: {})
""".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name, current_phone.asset_id,
phone_new_holder.nickname, phone_new_holder.wx_userid)
# phone_current_holder 原持有人
# phone_new_holder 确认人
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_need_move_msg(cls, current_phone, phone_current_holder):
new_holder_msg_text = """[TCloud] {} ({})
您有一条借用请求需要处理:
设备 : {}
资产编号 : {}
请及时到系统中处理!
请通过 TCloud->资产->流转 进行转出。""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id,
phone_current_holder.wx_userid)
# phone_current_holder 当前持有人
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_create_msg_qywx(cls, current_phone, phone_holder):
msg_text = """[TCloud] {} ({})
您拥有了一台新的设备:
设备 : {},
资产编号 : {},
持有人 : {} (微信号: {})""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_holder.nickname,
phone_holder.wx_userid, )
ret, msg = PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
return ret, msg
@classmethod
def send_delay_msg_qywx(cls, current_phone, phone_holder):
deadline = PhoneBusiness.deadline(current_phone)
msg_text = """[TCloud] {} ({})
您拥有的一台设备需要归还:
设备 : {},
资产编号 : {},
持有人 : {} (微信号: {})
到期时间: {}
续借 : 请到系统中点击 续借 进行续借
归还 : 请到系统中点击 退回 进行归还
过期 2 天后会根据超时时间扣除信用分!请及时归还!""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_holder.nickname,
phone_holder.wx_userid, deadline)
return PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
@classmethod
def send_move_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您的一台设备状态将要发生变化:
设备 : {},
资产编号 : {},
变化 : 持有人将 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 等待接收人确认""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid, phone_new_holder.nickname,
phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您将拥有一台新的设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
现持有人 : {} (微信号: {})
可持有时间: {} 天
到期时间: {}
请及时到系统中确认接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid, Phone.HOLD_DATE, deadline)
# phone_current_holder 原持有人
# phone_new_holder 新持有人
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_move_confirm_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您的一台设备状态发生了变化:
设备 : {},
资产编号 : {},
变化 : 持有人已 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 已接收""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid, phone_new_holder.nickname,
phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您拥有了一台新的设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
现持有人 : {} (微信号: {})
可持有时间: {} 天
到期时间: {}
状态: 已接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid, Phone.HOLD_DATE, deadline)
# phone_current_holder 原持有人
# phone_new_holder 新持有人
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_return_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您归还了一台设备:
设备 : {},
资产编号 : {},
变化 : 持有人将 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 等待接收人确认""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id,
phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
new_holder_msg_text = """[TCloud] {} ({})
您收到别人归还的一台设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
持有人 : {} (微信号: {})
状态 : 等待确认
请到系统中及时确认接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name,
current_phone.asset_id,
phone_current_holder.nickname, phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_return_confirm_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您成功归还了一台设备:
设备 : {},
资产编号 : {},
变化 : 持有人已 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 接收人已接收""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
new_holder_msg_text = """[TCloud] {} ({})
您已接收别人归还的一台设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
持有人 : {} (微信号: {})
状态 : 您已接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name, current_phone.asset_id,
phone_current_holder.nickname, phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def deadline(cls, current_phone):
# 根据 phone 最后一条记录计算到期时间
phone_recorder = PhoneRecord.query.filter(PhoneRecord.phone_id == current_phone.id).order_by(
PhoneRecord.id.desc()).first()
deadline = phone_recorder.creation_time + timedelta(days=Phone.HOLD_DATE) # 到期时间
return deadline
@classmethod
def create(cls, name, asset_id, vendor, device_number, os, cpu, core, ram, rom, resolution, buy_date, region,
borrow_id, device_source, device_belong, creator_id):
try:
t = Phone(
name=name,
asset_id=asset_id,
vendor=vendor,
device_number=device_number,
os=os,
cpu=cpu,
core=core,
ram=ram,
rom=rom,
resolution=resolution,
buy_date=buy_date,
region=region,
borrow_id=borrow_id or g.userid,
creator_id=creator_id or g.userid,
device_source=device_source,
device_belong=device_belong,
)
db.session.add(t)
db.session.flush()
PhoneRecordBusiness.create(t, g.userid)
db.session.commit()
phone_holder = User.query.get(t.creator_id)
# 发送企业微信
PhoneBusiness.send_create_msg_qywx(t, phone_holder)
return 0, None
except Exception as e:
current_app.logger.error(str(e))
current_app.logger.error(traceback.format_exc())
return 102, str(e)
# 发起流转
@classmethod
def move_to_user(cls, id, borrow_id):
try:
t = Phone.query.get(id)
phone_new_holder = User.query.get(borrow_id)
phone_current_holder = User.query.get(t.borrow_id)
# 消除对应设备已有的申请借用用户列表, 将老用户 id 放入,等待接收
PhoneBorrowBusiness.clear_borrow_user_list(id, phone_current_holder.id)
# 将设备的借出标志置为 1,等待接受者确认
PhoneBorrowBusiness.add_user_to_confirm(id, phone_new_holder.id)
# 发送企业微信
PhoneBusiness.send_move_msg_qywx(t, phone_current_holder, phone_new_holder)
return 0, None
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
# 确认流转
@classmethod
def move(cls, id, borrow_id):
try:
t = Phone.query.get(id)
phone_new_holder = User.query.get(borrow_id)
if not phone_new_holder:
return 101, '要转移的用户不存在,请检查用户信息'
t.borrow_id = borrow_id
db.session.add(t)
PhoneRecordBusiness.update(t, g.userid)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(e)
return 102, str(e)
# 退回设备
@classmethod
def return_to_admin(cls, id):
try:
# 此处返还给 创建人
current_phone = Phone.query.get(id)
admin_id = current_phone.creator_id
phone_current_holder = User.query.get(current_phone.borrow_id)
phone_new_holder = User.query.get(admin_id)
PhoneRecordBusiness.update(current_phone, g.userid)
# 发送企业微信
PhoneBusiness.send_return_msg_qywx(current_phone, phone_current_holder, phone_new_holder)
# 消除对应设备已有的申请借用用户列表, 将老用户 id 放入,等待接收
PhoneBorrowBusiness.clear_borrow_user_list(id, phone_current_holder.id)
# 增加 admin 到 确认名单
PhoneBorrowBusiness.add_user_to_confirm(id, admin_id)
return 0, None
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
# 超时 3 天未接收设备,将退回
@classmethod
def cancel_move_to(cls, id):
try:
# 直接清除 phone borrow 数据
current_phone = Phone.query.get(id)
phone_borrow = PhoneBorrowBusiness.get_borrow_by_phone_id(phone_id=id)
admin_id = current_phone.creator_id
phone_current_holder = User.query.get(phone_borrow.confirm_userid)
phone_new_holder = User.query.get(admin_id)
# 发送企业微信
cls.send_cancel_move_msg(current_phone, phone_current_holder, phone_new_holder)
ret, msg = PhoneBorrowBusiness.update(phone_borrow.id, phone_borrow.phone_id, 0, '')
return ret, msg
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def update(cls, id, name, asset_id, vendor, device_number, os, cpu, core, ram, rom, resolution, buy_date, region,
borrow_id, device_source, device_belong, creator_id):
try:
t = Phone.query.get(id)
t.name = name
t.asset_id = asset_id
t.vendor = vendor
t.device_number = device_number
t.os = os
t.cpu = cpu
t.core = core
t.ram = ram
t.rom = rom
t.resolution = resolution
t.buy_date = buy_date
t.region = region
t.borrow_id = borrow_id
t.device_source = device_source
t.device_belong = device_belong
t.creator_id = creator_id
db.session.add(t)
PhoneRecordBusiness.update(t, g.userid)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def delete(cls, id):
try:
t = Phone.query.get(id)
if t is None:
return 0
t.status = Phone.DISABLE
db.session.add(t)
PhoneRecordBusiness.delete(t, g.userid)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
class PhoneRecordBusiness(object):
@classmethod
@transfer2json(
'?id|!phone_id|!name|!asset_id|!vendor|!creation_time|!modified_time|!device_number|!os|!cpu|!core|!ram|'
'!rom|!resolution|!buy_date|!region|!status|!borrow_id|!creator_id|!device_source|!device_belong|!editor_id'
)
def query_json_by_id(cls, id):
return cls._query().filter(
PhoneRecord.phone_id == id, Phone.status == Phone.ACTIVE).all()
@classmethod
@transfer2json(
'?id|!phone_id|!name|!asset_id|!vendor|!creation_time|!modified_time|!device_number|!os|!cpu|!core|!ram|!rom'
'|!resolution|!buy_date|!region|!status|!borrow_id|!creator_id|!device_source|!device_belong|!editor_id'
)
def query_record_json(cls, phone_id):
ret = cls._query().filter(PhoneRecord.phone_id == phone_id).order_by(PhoneRecord.id).all()
return ret
@classmethod
def _query(cls):
return PhoneRecord.query.add_columns(
PhoneRecord.id.label('id'),
PhoneRecord.phone_id.label('phone_id'),
PhoneRecord.name.label('name'),
PhoneRecord.asset_id.label('asset_id'),
PhoneRecord.vendor.label('vendor'),
func.date_format(PhoneRecord.creation_time, "%Y-%m-%d %H:%i:%s").label('creation_time'),
func.date_format(PhoneRecord.modified_time, "%Y-%m-%d %H:%i:%s").label('modified_time'),
PhoneRecord.device_number.label('device_number'),
PhoneRecord.os.label('os'),
PhoneRecord.cpu.label('cpu'),
PhoneRecord.core.label('core'),
PhoneRecord.ram.label('ram'),
PhoneRecord.rom.label('rom'),
PhoneRecord.resolution.label('resolution'),
PhoneRecord.buy_date.label('buy_date'),
PhoneRecord.region.label('region'),
PhoneRecord.status.label('status'),
PhoneRecord.borrow_id.label('borrow_id'),
PhoneRecord.creator_id.label('creator_id'),
PhoneRecord.device_source.label('device_source'),
PhoneRecord.device_belong.label('device_belong'),
PhoneRecord.editor_id.label('editor_id'),
)
@classmethod
def create(cls, t, editor_id):
t_record = PhoneRecord(
phone_id=t.id,
name=t.name,
asset_id=t.asset_id,
vendor=t.vendor,
device_number=t.device_number,
os=t.os,
cpu=t.cpu,
core=t.core,
ram=t.ram,
rom=t.rom,
resolution=t.resolution,
buy_date=t.buy_date,
region=t.region,
borrow_id=t.borrow_id,
creator_id=t.creator_id,
device_source=t.device_source,
device_belong=t.device_belong,
editor_id=editor_id,
)
db.session.add(t_record)
@classmethod
def update(cls, t, editor_id):
t_record = PhoneRecord(
phone_id=t.id,
name=t.name,
asset_id=t.asset_id,
vendor=t.vendor,
device_number=t.device_number,
os=t.os,
cpu=t.cpu,
core=t.core,
ram=t.ram,
rom=t.rom,
resolution=t.resolution,
buy_date=t.buy_date,
region=t.region,
borrow_id=t.borrow_id,
creator_id=t.creator_id,
device_source=t.device_source,
device_belong=t.device_belong,
editor_id=editor_id,
)
db.session.add(t_record)
@classmethod
def delete(cls, t, editor_id):
t_record = PhoneRecord(
phone_id=t.id,
name=t.name,
asset_id=t.asset_id,
vendor=t.vendor,
device_number=t.device_number,
os=t.os,
cpu=t.cpu,
core=t.core,
ram=t.ram,
rom=t.rom,
resolution=t.resolution,
buy_date=t.buy_date,
region=t.region,
borrow_id=t.borrow_id,
creator_id=t.creator_id,
device_source=t.device_source,
device_belong=t.device_belong,
editor_id=editor_id,
)
db.session.add(t_record)
@classmethod
def query_record_detail(cls, phone_id):
ret = cls.query_record_json(phone_id)
if not ret:
return []
ret_list = []
asset_config = Config.query.add_columns(Config.content.label('content')).filter(Config.module == 'asset',
Config.module_type == 1).first()
content = json.loads(asset_config.content)
operation_dict = content['operation_dict']
# name = operation_dict.get('name')
# asset_id = operation_dict.get('asset_id')
# status = operation_dict.get('status')
# borrow_id = operation_dict.get('borrow_id')
ret_dict = {}
user_creater = User.query.get(int(ret[0]['editor_id']))
ret_dict['modified_time'] = ret[0]['creation_time']
ret_dict['operation'] = "[{}({})] : 增加新的资产 {}".format(user_creater.nickname, user_creater.wx_userid,
ret[0]['name'])
ret_list.append(ret_dict)
current_app.logger.info(ret)
for r in range(1, len(ret)):
for asset_key, asset_value in ret[r - 1].items():
if asset_key in operation_dict.keys():
current_app.logger.info(
"修改的字段:" + str(asset_key) + ", 字段值:" + str(asset_value) + "-->" + str(ret[r][asset_key]))
user_editor = User.query.get(int(ret[r]['editor_id']))
ret_dict = None
if asset_key in ('borrow_id',):
ret_dict = {'modified_time': ret[r]['modified_time']}
if asset_value != ret[r][asset_key]:
user_from = User.query.filter(User.id == int(asset_value)).first()
user_to = User.query.filter(User.id == int(ret[r][asset_key])).first()
ret_dict['operation'] = "[{}({})] : {} 由 {}({}) 变更为 {}({})".format(user_editor.nickname,
user_editor.wx_userid,
operation_dict[
asset_key],
user_from.nickname,
user_from.wx_userid,
user_to.nickname,
user_to.wx_userid)
else:
# user_from = User.query.filter(User.id == int(asset_value)).first()
user_to = User.query.filter(User.id == int(ret[r][asset_key])).first()
ret_dict['operation'] = "[{}({})] : 续借了设备,{} 为 {}({})".format(user_editor.nickname,
user_editor.wx_userid,
operation_dict[asset_key],
user_to.nickname,
user_to.wx_userid)
else:
if asset_value != ret[r][asset_key]:
ret_dict = {
'modified_time': ret[r]['modified_time'],
'operation': "[{}({})] : 修改了{} {} 为 {}".format(user_editor.nickname,
user_editor.wx_userid,
operation_dict[asset_key],
asset_value,
ret[r][asset_key])
}
if ret_dict is not None:
ret_list.append(ret_dict)
ret_list = ret_list[::-1]
return ret_list
class VirtualAssetBusiness(object):
@classmethod
def _query(cls):
return VirtualAsset.query.add_columns(
VirtualAsset.id.label('id'),
VirtualAsset.asset_id.label('asset_id'),
VirtualAsset.passwd.label('passwd'),
VirtualAsset.administrator.label('administrator'),
VirtualAsset.bind_tel.label('bind_tel'),
VirtualAsset.idcard.label('idcard'),
VirtualAsset.status.label('status'),
VirtualAsset.asset_type.label('asset_type'),
VirtualAsset.operator.label('operator')
)
@classmethod
@transfer2json(
'?id|!asset_id|!passwd|!administrator|!idcard|!bind_tel|!status|!asset_type|!operator'
)
def query_json_by_id(cls, id):
return cls._query().filter(VirtualAsset.id == id,
VirtualAsset.status != VirtualAsset.DISABLE).all()
@classmethod
def create(cls, asset_id, passwd, administrator, bind_tel, idcard, asset_type, operator):
try:
va = VirtualAsset(
asset_id=asset_id,
passwd=passwd,
administrator=administrator,
bind_tel=bind_tel,
idcard=idcard,
asset_type=asset_type,
operator=operator,
)
db.session.add(va)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def update(cls, id, asset_id, passwd, administrator, bind_tel, idcard, asset_type, operator):
try:
va = VirtualAsset.query.get(id)
va.asset_id = asset_id
va.passwd = passwd
va.administrator = administrator
va.bind_tel = bind_tel
va.idcard = idcard
va.asset_type = asset_type
va.operator = operator
db.session.add(va)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def delete(cls, id):
try:
va = VirtualAsset.query.get(id)
if va is None:
return 0
va.status = VirtualAsset.DISABLE
db.session.add(va)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
@classmethod
@transfer2json(
'?id|!asset_id|!passwd|!administrator|!idcard|!bind_tel|!status|!asset_type|!operator',
ispagination=True
)
def paginate_data(cls, page_size, page_index):
asset_type = request.args.get('type')
query = cls._query().filter(VirtualAsset.status != VirtualAsset.DISABLE)
if asset_type:
query = query.filter(VirtualAsset.asset_type == int(asset_type))
count = query.count()
data = query.order_by(desc(VirtualAsset.id)).limit(
int(page_size)).offset(int(page_index - 1) * int(page_size)).all()
return data, count
class PhoneBorrowBusiness(object):
user_trpc = Trpc('auth')
@classmethod
def _query(cls):
return PhoneBorrow.query.add_columns(
PhoneBorrow.id.label('id'),
PhoneBorrow.phone_id.label('phone_id'),
PhoneBorrow.user_list.label('user_list'),
PhoneBorrow.confirm_userid.label('confirm_userid'),
func.date_format(PhoneBorrow.creation_time, "%Y-%m-%d %H:%i:%s").label('creation_time'),
func.date_format(PhoneBorrow.modified_time, "%Y-%m-%d %H:%i:%s").label('modified_time'),
)
@classmethod
@transfer2json('?id|!phone_id|!user_list|!confirm_userid|!creation_time|!modified_time')
def get_borrow_all(cls):
phone_borrows = cls._query().all()
return phone_borrows
@classmethod
def get_borrow_by_phone_id(cls, phone_id):
phone_borrow = cls._query().filter(PhoneBorrow.phone_id == phone_id).first()
return phone_borrow
@classmethod
def create(cls, phone_id, confirm_userid=0, user_list=''):
try:
phone_borrow = PhoneBorrow(
phone_id=phone_id,
user_list=user_list,
confirm_userid=confirm_userid,
)
db.session.add(phone_borrow)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def update(cls, id, phone_id, confirm_userid, user_list):
try:
phone_borrow = PhoneBorrow.query.get(id)
if not phone_borrow:
cls.create(phone_id, confirm_userid, user_list)
phone_borrow.user_list = user_list
phone_borrow.confirm_userid = confirm_userid
db.session.add(phone_borrow)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(e)
return 102, str(e)
@classmethod
def clear_borrow_user_list(cls, phone_id, old_holder_id):
# 清除 申请用户列表
# 只剩 原持有者 ID
try:
old_holder_id = str(old_holder_id)
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
ret, msg = cls.create(phone_id, 0, old_holder_id)
else:
ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, 0, old_holder_id)
return ret, msg
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def add_user_to_confirm(cls, phone_id, user_id):
# 添加 用户ID 到 当前设备的 接收确认列表
try:
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
ret, msg = cls.create(phone_id, user_id)
else:
ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, user_id, phone_borrow.user_list)
return ret, msg
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def add_user_to_userlist(cls, phone_id, user_id):
# 将 申请用户 ID 添加到申请列表
try:
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
cls.create(phone_id)
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
old_user_list = [id for id in phone_borrow.user_list.split(',')]
user_id = str(user_id)
if user_id not in old_user_list:
old_user_list.append(user_id)
else:
return 103, "不能重复借用"
new_user_list = ','.join(old_user_list)
cls.update(phone_borrow.id, phone_id, 0, new_user_list)
return 0, None
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
@transfer2json(
'?id|!nickname'
)
def get_user_list_by_phone_id(cls, phone_id):
try:
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
return []
user_list = [id for id in phone_borrow.user_list.split(',')]
users = []
for user_id in user_list:
if len(user_id) > 0:
user = User.query.get(int(user_id))
if user:
users.append(user)
return users
except Exception as e:
current_app.logger.error(str(e))
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def send_borrow_msg_qywx(cls, current_phone, phone_holder, current_user):
current_user_nickname = current_user.nickname
current_user_wx_userid = current_user.wx_userid
receiver_id = phone_holder.wx_userid
msg_text = """[TCloud] {}({})
您收到一个设备借用请求:
借用的设备 : {},
资产编号 : {},
借用人 : {} (微信号: {}),
请通过企业微信沟通,如借出,请通过 TCloud->资产->流转 进行转出。""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, current_user_nickname,
current_user_wx_userid)
PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
@classmethod
def send_borrow_continue_msg_qywx(cls, current_phone, phone_holder, current_user):
deadline = PhoneBusiness.deadline(current_phone)
current_user_nickname = current_user.nickname
current_user_wx_userid = current_user.wx_userid
receiver_id = phone_holder.wx_userid
msg_text = """[TCloud] {} ({})
您续借了一台设备:
借用的设备 : {},
资产编号 : {},
借用人 : {} (微信号: {})
可持有时间: {} 天
到期时间: {}""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, current_user_nickname, current_user_wx_userid,
Phone.HOLD_DATE, deadline)
PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
@classmethod
def borrow(cls, phone_id):
# 发起借用
try:
ret, msg = 0, None
current_phone = Phone.query.get(phone_id)
if current_phone:
current_user = User.query.get(g.userid)
phone_holder = User.query.get(current_phone.borrow_id)
if current_phone.borrow_id == g.userid:
ret, msg = PhoneBusiness.move(phone_id, phone_holder.id)
PhoneBorrowBusiness.send_borrow_continue_msg_qywx(current_phone, phone_holder, current_user)
else:
ret, msg = PhoneBorrowBusiness.add_user_to_userlist(phone_id, g.userid)
if ret == 103:
return ret, msg
PhoneBorrowBusiness.send_borrow_msg_qywx(current_phone, phone_holder, current_user)
else:
return 101, '设备无效'
return ret, msg
except Exception as e:
current_app.logger.error(traceback.format_exc())
current_app.logger.error(e)
return 101, e
@classmethod
def confirm_borrow(cls, phone_id):
# 确认借用, admin 确认接收
try:
current_phone = Phone.query.get(phone_id)
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if int(phone_borrow.confirm_userid) != g.userid:
return 403, '只有接收人可以确认'
phone_current_holder = User.query.get(current_phone.borrow_id)
phone_new_holder = User.query.get(phone_borrow.confirm_userid)
ret, msg = PhoneBusiness.move(phone_id, int(phone_borrow.confirm_userid))
admins = cls.user_trpc.requests('get', '/user/admin')
current_app.logger.info('{} 确认接收设备'.format(int(phone_borrow.confirm_userid)))
if (int(phone_borrow.confirm_userid) in admins or
int(phone_borrow.confirm_userid) == current_phone.creator_id):
try:
PhoneBusiness.send_return_confirm_msg_qywx(current_phone, phone_current_holder, phone_new_holder)
reason = '成功归还了设备 {}({}) '.format(current_phone.name, current_phone.asset_id)
current_app.logger.info(reason)
user_old_id = int(phone_borrow.user_list)
ret, msg = CreditBusiness.add_sub_score(user_old_id, Credit.CREDIT_ADD_ONCE, reason)
except Exception as e:
current_app.logger.error(e)
else:
PhoneBusiness.send_move_confirm_msg_qywx(current_phone, phone_current_holder, phone_new_holder)
ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, 0, '')
return ret, msg
except Exception as e:
current_app.logger.error(str(e))
current_app.logger.error(traceback.format_exc())
return 102, e
|
py | 1a2efaf4a3af509ee3dcb5e4b2027c195f95038e | import ConfigParser
import importlib
from pkg_resources import resource_string
import StringIO
from landmarkrest.util.util import Util
class FieldPredictor(object):
def __init__(self):
# TODO: use the configuration file determine if we are using InferLink's field typer, or ISI's
self.__predictor_impl = 'InferLink'
if self.__predictor_impl == 'InferLink':
self.__section_name = 'Models'
self.__field_models = self.__load_field_models()
"""
End-point function that predicts the type of column that has been extracted (e.g., values extracted
from multiple pages)
@param preceding_stripes: These are the stripes preceding each of the field values.
@param field_values: A set of values from multiple pages for the same slot
@param following_stripes: These are the stripes coming right after the slot values
@param confidence_threshold: Any column type that is not assigned with at least this level of confidence is
not returned
@note: The field_values and preceding_stripes should be ordered so they are aligned (e.g., 1st is 1st for both)
@retun: A tuple of (field_name, confidence)
"""
def predict(self, preceding_stripes, slot_values, following_stripes, confidence_threshold=0.0):
if self.__predictor_impl == 'ISI':
return {} # TODO: this is where we call ISI's code, however they do it (service, etc.)
else:
return self.__inferlink_predict(preceding_stripes, slot_values, following_stripes, confidence_threshold)
def __inferlink_predict(self, preceding_stripes, slot_values, following_stripes, confidence_threshold):
preds = {}
for col_type in self.__field_models:
model = self.__field_models[col_type]
conf = model.generate_confidence(preceding_stripes, slot_values, following_stripes)
if conf >= confidence_threshold:
preds[col_type] = conf
top_x = Util.top_x_from_dict(preds, top_x=1)
argmax = None
if top_x:
argmax = top_x[0] # the first one in a one person list
return argmax
def __load_field_models(self):
self.__field_models = {}
config = ConfigParser.ConfigParser()
config_buffer = resource_string(__name__, 'config/field_model_configs.cfg')
buf = StringIO.StringIO(config_buffer)
config.readfp(buf)
for (attr, value) in config.items(self.__section_name):
curr_class = importlib.import_module("landmarkrest.field_predictor.field_models.%s" % value)
instance = getattr(curr_class, value)() # reflection... booya!
self.__field_models[attr] = instance
return self.__field_models |
py | 1a2efb6deda8d3f8fe78a1c12cc9f64c492364fd | from blue_ui import app
if __name__ == "__main__":
app.run() |
py | 1a2efbb64cbd9e877bddc00983df58326fc5f37a | from example_system.db_connection_wrapper import db_connection_wrapper
def process_data() -> None:
pass
def process_data_with_error() -> None:
raise Exception("Something went wrong")
def run_example() -> None:
with db_connection_wrapper() as db_connection:
db_connection.execute("SELECT * FROM USERS;")
db_connection.execute("SELECT * FROM CUSTOMERS;")
db_connection.execute("INSERT ...")
process_data()
with db_connection_wrapper() as db_connection:
db_connection.execute("SELECT * FROM USERS;")
db_connection.execute("SELECT * FROM CUSTOMERS;")
db_connection.execute("INSERT ...")
process_data_with_error()
if __name__ == "__main__":
run_example()
|
py | 1a2efce8dbbc7ae3c26f3dadb75d2b8ab84a63e0 | """
sde
==========
License: BSD, see LICENSE for more details.
"""
__author__ = "Danila Vershinin"
import logging
from .__about__ import (
__version__,
)
from .sde import main
from .sde import edit_file, read_file
# https://realpython.com/python-logging-source-code/#library-vs-application-logging-what-is-nullhandler
# when used as library, we default to opt-in approach, whereas library user have to enable logging
# from lastversion
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
py | 1a2efd8e13205172124ac138799462ec35f7e1be | from functools import reduce
from jinja2 import Markup
import json
import logging
import os
import shutil
from sigal import signals
from sigal.utils import url_from_path
from sigal.writer import AbstractWriter
logger = logging.getLogger(__name__)
ASSETS_PATH = os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'static', 'js'))
class PageWriter(AbstractWriter):
'''A writer for writing media pages, based on writer'''
template_file = "search.html"
def write(self, album):
''' Generate the media page and save it '''
from sigal import __url__ as sigal_link
page = self.template.render({
'album': album,
'index_title': self.index_title,
'settings': self.settings,
'sigal_link': sigal_link,
'theme': {'name': os.path.basename(self.theme),
'url': url_from_path(os.path.relpath(self.theme_path,
album.dst_path))},
})
output_file = os.path.join(album.dst_path, 'search.html')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(page)
def generate_search(gallery):
id = 1
output_file = os.path.join(gallery.albums['.'].dst_path, 'static/js/search-content.js')
store = {}
for album in gallery.albums.values():
album_titles = " , ".join([*map(lambda x: x[1], album.breadcrumb)])
for item in album.medias:
data = {}
data['title'] = item.title
if 'author' in item.meta:
data['author'] = item.meta['author'][0]
data['url'] = "/" + item.path + "/" + item.url
data['thumbnail'] = item.thumbnail
data['mime'] = item.mime
if 'slides' in item.meta:
data['slides'] = item.meta['slides'][0]
data['album'] = album_titles;
store[str(id)] = data
id = id + 1
with open(output_file, 'w', encoding='utf8') as f:
f.write("window.store = ")
f.write(json.dumps(store))
writer = PageWriter(gallery.settings, index_title="Search Results")
writer.write(gallery.albums['.'])
shutil.copyfile(os.path.join(ASSETS_PATH, 'lunr.js'),
os.path.join(gallery.albums['.'].dst_path, 'static', 'js', 'lunr.js'))
def register(settings):
signals.gallery_build.connect(generate_search)
|
py | 1a2efe67762125121390d438c4b5fa8951f4f55d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 12 11:52:04 2021
@author: Sarah
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 11:30:49 2021
@author: Sarah
"""
import pandas as pd
import pandasql
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc #this had to be changed
#from dash import dcc
import dash_html_components as html #this as well
#from dash import html
import plotly.express as px
from urllib.request import urlopen
import json
pd.options.mode.chained_assignment = None # default='warn'
# get vaccination data from rki vaccination github repo:
# (https://github.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland)
url_vacc_data = "https://raw.githubusercontent.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland/master/Aktuell_Deutschland_Impfquoten_COVID-19.csv"
# read-in data from csv-file (filter out Deutschland & Bundesressorts)
vacc_data = pd.read_csv(url_vacc_data, skiprows=[1, 18])
# Open Germany map as GeoJSON
with urlopen("https://raw.githubusercontent.com/isellsoap/deutschlandGeoJSON/main/2_bundeslaender/2_hoch.geo.json") as file:
germany_states = json.load(file)
# Read-in Covid-Data (States)
with urlopen("https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=LAN_ew_AGS,LAN_ew_GEN,Aktualisierung,cases7_bl_per_100k,death7_bl,cases7_bl_per_100k_txt,cases7_bl&outSR=4326&f=json") as cases_states:
covid_states = json.load(cases_states)
covid_data = pd.json_normalize(covid_states, record_path=['features'])
## Read in Voting-Results
with urlopen("https://raw.githubusercontent.com/julianrosenberger/VisualizationSDU/main/data/kerg2.csv?token=AQYCHUSY2GHUHR23UV3RZU3BYGNO2") as f:
data = pd.read_csv(f, delimiter=';', skiprows=9, usecols=['Gebietsnummer', 'Gebietsname', 'UegGebietsnummer', 'Gruppenart', 'Gruppenname', 'Gruppenreihenfolge', 'Stimme', 'Prozent'])
# #Deleting where Gruppenart!=Partei
df_clear=data[data.Gruppenart=="Partei"]
# deleting Stimme==2:
df_clear2 = df_clear[df_clear.Stimme==1]
# Grouped dataframe with only the states 1-16 (both incl.)
df_clear3 = df_clear2[df_clear2.Gebietsnummer < 17]
# Make sure Gebietsnummer belongs to state
df_clear4 = df_clear3[df_clear3.UegGebietsnummer == 99]
df_clear = df_clear4
# cleaning
print(type('Prozent')) # string --> convert to int
#(nan --> 0
df_clear['Prozent'] = df_clear['Prozent'].fillna(0)
# , --> .
df_clear['Prozent'] = (df_clear['Prozent'].replace(',', '.', regex=True).astype(float))
# string --> int
df_clear['Prozent'] = pd.to_numeric(df_clear['Prozent'])
#print(df_clear.to_string())
# Gruping by state:
df_group = df_clear.groupby('Gebietsnummer')
print(df_group)
#print(df_group['Gebietsnummer'] == 11)
for key, item in df_group:
print(df_group.get_group(key))
# Get the indices of the original dataframe to find out which party etc. it belongs to:
#idx = df_group(['Gebietsnummer'])['Prozent'].transform(max) == df_clear['Prozent']
#print(idx.head())
maximums = df_group['Prozent'].max()
#print(maximums.to_string())
#print(df_clear.loc[df_clear.groupby(['Gebietsnummer'])['Prozent'].idxmax()].reset_index(drop=True))
winners = df_clear.loc[df_clear.groupby(['Gebietsnummer'])['Prozent'].idxmax()].reset_index(drop=True)
print(winners.to_string())
## Plot Vaccination Map
vacc = px.choropleth_mapbox(
mapbox_style='white-bg',
data_frame=vacc_data,
geojson=germany_states,
locations='Bundesland',
featureidkey='properties.name',
hover_name='Bundesland',
hover_data={'Bundesland': False,
'Impfquote_gesamt_voll': ':.2f%',
'Datum': True},
color='Impfquote_gesamt_voll',
color_continuous_scale=px.colors.sequential.Blues,
labels={'Impfquote_gesamt_voll': 'Fully vaccinated', 'Bundesland': 'State', 'Datum': 'Date'}
)
vacc.update_mapboxes(
center_lat=51.5,
center_lon=10.25,
zoom=4.6
)
vacc.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0})
## Plot Covid-Map
cov = px.choropleth_mapbox(
mapbox_style='white-bg',
data_frame=covid_data,
geojson=germany_states,
locations='attributes.LAN_ew_GEN',
featureidkey='properties.name',
hover_name='attributes.LAN_ew_GEN',
hover_data={'attributes.LAN_ew_GEN': False,
'attributes.cases7_bl_per_100k': ':.2f',
'attributes.death7_bl': True},
color='attributes.cases7_bl_per_100k',
color_continuous_scale=px.colors.sequential.YlOrRd,
labels={'attributes.cases7_bl_per_100k': '7-day incidence', 'attributes.LAN_ew_GEN': 'State', 'attributes.death7_bl': '7-day deaths'}
)
cov.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0})
cov.update_mapboxes(
center_lat=51.5,
center_lon=10.25,
zoom=4.6
)
## Plot Voting-results
vote = px.choropleth_mapbox(
mapbox_style='white-bg',
data_frame=winners,
geojson=germany_states,
locations='Gebietsname',
featureidkey='properties.name',
hover_name='Gebietsname',
hover_data={'Gebietsname': False,
'Gruppenname': True,
'Prozent': ':.2f%'},
color='Gruppenname',
color_discrete_map={'SPD': "#E3000F",
"CDU": "#32302e",
"CSU": "#32302e",
"AfD": "#009ee0"},
labels={'Gebietsname': 'State', 'Gruppenname': 'Party', 'Prozent': 'Result'}
)
vote.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0})
vote.update_mapboxes(
center_lat=51.5,
center_lon=10.25,
zoom=4.6
)
## Plot Voting-results in form of pie chart:
# want for entire Germany, instead of states:
vote_germ=data[data.Gebietsnummer==99]
vote_germ = vote_germ[vote_germ.Stimme==1]
vote_germ=vote_germ[vote_germ.Gruppenart=="Partei"]
vote_germ=vote_germ[vote_germ.Gebietsname=="Bundesgebiet"]
# cleaning
#(nan --> 0
vote_germ['Prozent'] = vote_germ['Prozent'].fillna(0)
# , --> .
vote_germ['Prozent'] = (vote_germ['Prozent'].replace(',', '.', regex=True).astype(float))
# string --> int
vote_germ['Prozent'] = pd.to_numeric(vote_germ['Prozent'])
#print(vote_germ.to_string())
# 47 different states. Diving into: SPD, CDU/CSU, AfD, and "Others":
#vote_germ.loc[vote_germ['Gruppenname'] == "CDU", 'Gruppenname'] = "CDU/CSU"
#vote_germ.loc[vote_germ['Gruppenname'] == "CSU", 'Gruppenname'] = "CDU/CSU"
vote_germ.loc[vote_germ['Prozent'] < 6, 'Gruppenname'] = "Other"
vote_germ.loc[vote_germ['Gruppenname'] == "FDP", 'Gruppenname'] = "Other"
vote_germ.loc[vote_germ['Gruppenname'] == "GRÜNE", 'Gruppenname'] = "Other"
vote_chart = px.pie(vote_germ, values='Prozent', names='Gruppenname', color='Gruppenname',
color_discrete_map={'SPD':'#E3000F',
'CDU':'32302e',
'CSU':'#0080c8',
'AfD':'009ee0',
'Other':'grey'})
#vote_chart.show()
## Build web app with dash
app = dash.Dash(__name__)
app.layout = lambda: html.Div([
# H1-Header
html.H1(children="Does voting against vaccinations mean voting for COVID?",
style={'textAlign': 'center', 'fontFamily': 'Helvetica, Arial, sans-serif'}),
html.Div([
html.Div([
dcc.Graph(figure=vacc)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=cov)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=vote)
], style={'width': '33%', 'float': 'left'})
]),
html.Div([
html.Div([
dcc.Graph(figure=vacc)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=cov)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=vote)
], style={'width': '33%', 'float': 'left'})
])
])
if __name__ == '__main__':
app.run_server(debug=True, port=8080)
|
py | 1a2efe784b6d2430e080b7c557c24993e4843e6d | # -*- coding: utf-8 -*-
__version__ = '19.9.0.dev1'
PROJECT_NAME = "galaxy-data"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Datatype Framework and Datatypes'
PROJECT_EMAIL = '[email protected]'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
|
py | 1a2efed9ef854913b615cd765d6fefecf05ed99b | from __future__ import unicode_literals
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Layout, Submit
from django import forms
from django.contrib.auth import get_user_model
from . import models
User = get_user_model()
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('name'),
)
class Meta:
model = User
fields = ['name']
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('picture'),
Field('bio'),
Submit('update', 'Update', css_class="btn-success"),
)
class Meta:
model = models.Profile
fields = ['picture', 'bio']
|
py | 1a2eff72245f196576db87587bf0ecc1b0819cce | # SPDX-FileCopyrightText: 2019 Scott Shawcroft for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_bitmap_font.bdf`
====================================================
Loads BDF format fonts.
* Author(s): Scott Shawcroft
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
from fontio import Glyph
from .glyph_cache import GlyphCache
__version__ = "1.3.4"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Bitmap_Font.git"
class BDF(GlyphCache):
"""Loads glyphs from a BDF file in the given bitmap_class."""
def __init__(self, f, bitmap_class):
super().__init__()
self.file = f
self.name = f
self.file.seek(0)
self.bitmap_class = bitmap_class
line = self.file.readline()
line = str(line, "utf-8")
if not line or not line.startswith("STARTFONT 2.1"):
raise ValueError("Unsupported file version")
self.point_size = None
self.x_resolution = None
self.y_resolution = None
self._ascent = None
self._descent = None
@property
def descent(self):
"""The number of pixels below the baseline of a typical descender"""
if self._descent is None:
self.file.seek(0)
while True:
line = self.file.readline()
if not line:
break
if line.startswith(b"FONT_DESCENT "):
self._descent = int(line.split()[1])
break
return self._descent
@property
def ascent(self):
"""The number of pixels above the baseline of a typical ascender"""
if self._ascent is None:
self.file.seek(0)
while True:
line = self.file.readline()
line = str(line, "utf-8")
if not line:
break
if line.startswith("FONT_ASCENT "):
self._ascent = int(line.split()[1])
break
return self._ascent
def get_bounding_box(self):
"""Return the maximum glyph size as a 4-tuple of: width, height, x_offset, y_offset"""
self.file.seek(0)
while True:
line = self.file.readline()
line = str(line, "utf-8")
if not line:
break
if line.startswith("FONTBOUNDINGBOX "):
_, x, y, x_offset, y_offset = line.split()
return (int(x), int(y), int(x_offset), int(y_offset))
return None
def load_glyphs(self, code_points):
# pylint: disable=too-many-statements,too-many-branches,too-many-nested-blocks,too-many-locals
metadata = True
character = False
code_point = None
bytes_per_row = 1
desired_character = False
current_info = {}
current_y = 0
rounded_x = 1
if isinstance(code_points, int):
remaining = set()
remaining.add(code_points)
elif isinstance(code_points, str):
remaining = set(ord(c) for c in code_points)
elif isinstance(code_points, set):
remaining = code_points
else:
remaining = set(code_points)
for code_point in remaining.copy():
if code_point in self._glyphs and self._glyphs[code_point]:
remaining.remove(code_point)
if not remaining:
return
x, _, _, _ = self.get_bounding_box()
self.file.seek(0)
while True:
line = self.file.readline()
if not line:
break
if line.startswith(b"CHARS "):
metadata = False
elif line.startswith(b"SIZE"):
_, self.point_size, self.x_resolution, self.y_resolution = line.split()
elif line.startswith(b"COMMENT"):
pass
elif line.startswith(b"STARTCHAR"):
# print(lineno, line.strip())
# _, character_name = line.split()
character = True
elif line.startswith(b"ENDCHAR"):
character = False
if desired_character:
bounds = current_info["bounds"]
shift = current_info["shift"]
gc.collect()
self._glyphs[code_point] = Glyph(
current_info["bitmap"],
0,
bounds[0],
bounds[1],
bounds[2],
bounds[3],
shift[0],
shift[1],
)
remaining.remove(code_point)
if not remaining:
return
desired_character = False
elif line.startswith(b"BBX"):
if desired_character:
_, x, y, x_offset, y_offset = line.split()
x = int(x)
y = int(y)
x_offset = int(x_offset)
y_offset = int(y_offset)
current_info["bounds"] = (x, y, x_offset, y_offset)
current_info["bitmap"] = self.bitmap_class(x, y, 2)
elif line.startswith(b"BITMAP"):
if desired_character:
rounded_x = x // 8
if x % 8 > 0:
rounded_x += 1
bytes_per_row = rounded_x
if bytes_per_row % 4 > 0:
bytes_per_row += 4 - bytes_per_row % 4
current_y = 0
elif line.startswith(b"ENCODING"):
_, code_point = line.split()
code_point = int(code_point)
if code_point in remaining:
desired_character = True
current_info = {"bitmap": None, "bounds": None, "shift": None}
elif line.startswith(b"DWIDTH"):
if desired_character:
_, shift_x, shift_y = line.split()
shift_x = int(shift_x)
shift_y = int(shift_y)
current_info["shift"] = (shift_x, shift_y)
elif line.startswith(b"SWIDTH"):
pass
elif character:
if desired_character:
bits = int(line.strip(), 16)
width = current_info["bounds"][0]
start = current_y * width
x = 0
for i in range(rounded_x):
val = (bits >> ((rounded_x - i - 1) * 8)) & 0xFF
for j in range(7, -1, -1):
if x >= width:
break
bit = 0
if val & (1 << j) != 0:
bit = 1
current_info["bitmap"][start + x] = bit
x += 1
current_y += 1
elif metadata:
# print(lineno, line.strip())
pass
|
py | 1a2eff76347edd7feca6555985906f25b58cef71 | from django.apps import AppConfig
class AliasConfig(AppConfig):
name = 'alias'
def ready(self):
import alias.signals
|
py | 1a2f00607b5285c4e74d5722267267021df2950b | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-DeviceUx
GUID : ded165cf-485d-4770-a3e7-9c5f0320e80c
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=51, version=0)
class Microsoft_Windows_DeviceUx_51_0(Etw):
pattern = Struct(
"String1" / WString,
"Integer2" / Int32ul
)
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=55, version=0)
class Microsoft_Windows_DeviceUx_55_0(Etw):
pattern = Struct(
"String1" / WString,
"Integer2" / Int32ul
)
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=80, version=0)
class Microsoft_Windows_DeviceUx_80_0(Etw):
pattern = Struct(
"querycookie" / Int64ul,
"EnumerationTime" / Int32ul,
"CountOfItems" / Int32ul
)
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=81, version=0)
class Microsoft_Windows_DeviceUx_81_0(Etw):
pattern = Struct(
"querycookie" / Int64ul,
"EnumerationTime" / Int32ul,
"CountOfItems" / Int32ul
)
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=82, version=0)
class Microsoft_Windows_DeviceUx_82_0(Etw):
pattern = Struct(
"querycookie" / Int64ul
)
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=83, version=0)
class Microsoft_Windows_DeviceUx_83_0(Etw):
pattern = Struct(
"querycookie" / Int64ul
)
@declare(guid=guid("ded165cf-485d-4770-a3e7-9c5f0320e80c"), event_id=1001, version=0)
class Microsoft_Windows_DeviceUx_1001_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
|
py | 1a2f007c466894ea379b8ab52aff0595035ddc51 | import torch
from torch import nn
from torch.nn import functional as F
class DetLoss(nn.Module):
def __init__(self):
super().__init__()
self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none')
self.ori_criterion = nn.SmoothL1Loss(reduction='none')
self.box_criterion = nn.SmoothL1Loss(reduction='none')
def forward(self,
pred_heatmaps, heatmaps,
pred_sizemaps, sizemaps,
pred_orimaps , orimaps,
):
size_w, _ = heatmaps.max(dim=1, keepdim=True)
p_det = torch.sigmoid(pred_heatmaps * (1-2*heatmaps))
det_loss = (self.hm_criterion(pred_heatmaps, heatmaps)*p_det).mean() / p_det.mean()
box_loss = (size_w * self.box_criterion(pred_sizemaps, sizemaps)).mean() / size_w.mean()
ori_loss = (size_w * self.ori_criterion(pred_orimaps, orimaps)).mean() / size_w.mean()
return det_loss, box_loss, ori_loss
class SegLoss(nn.Module):
def __init__(self):
super().__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, pred_bev, bev):
return self.criterion(pred_bev, bev).mean()
class MotLoss(nn.Module):
def __init__(self, distill, smooth):
super().__init__()
self.bc_criterion = nn.L1Loss(reduction='none')
self.cmd_criterion = nn.BCELoss()
self.distill = distill
self.smooth = smooth
def forward(self, plan_locs, cast_locs, locs, pred_cmds, expert_locs, expert_cmds, cmds, idxs=None):
T = locs.size(1)
N = pred_cmds.size(1)
plan_locs = plan_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
plan_losses = self.bc_criterion(plan_locs, locs).mean(dim=[1,2])
if self.distill:
cast_loss = self.bc_criterion(cast_locs, expert_locs.detach()).mean()
cmd_loss = self.cmd_criterion(pred_cmds, expert_cmds.detach())
else:
cast_locs = cast_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
cast_loss = self.bc_criterion(cast_locs, locs).mean()
cmds_label = (1.-self.smooth) * F.one_hot(cmds, N) + self.smooth / N
cmd_loss = self.cmd_criterion(pred_cmds, cmds_label)
if idxs is None:
plan_loss = plan_losses.mean()
else:
plan_loss = plan_losses[idxs].mean()
return (plan_loss + cast_loss) / 2, cmd_loss
def others_forward(self, cast_locs, expert_locs, locs):
if self.distill:
return self.bc_criterion(cast_locs, expert_locs).mean()
else:
other_bc_losses = self.bc_criterion(cast_locs, locs).mean(dim=[2,3])
return other_bc_losses.min(1)[0].mean()
def bev_forward(self, plan_locs, cast_locs, locs, pred_cmds, cmds, idxs=None):
T = locs.size(1)
N = pred_cmds.size(1)
plan_locs = plan_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
plan_losses = self.bc_criterion(plan_locs, locs).mean(dim=[1,2])
cast_locs = cast_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
cast_loss = self.bc_criterion(cast_locs, locs).mean()
cmd_loss = self.cmd_criterion(pred_cmds, F.one_hot(cmds, N).float())
if idxs is None:
plan_loss = plan_losses.mean()
else:
plan_loss = plan_losses[idxs].mean()
return (plan_loss + cast_loss) / 2, cmd_loss
|
py | 1a2f02c74aa43cde7ade9c2f613bb4934f8f09fb | from django.db.models.signals import pre_save
from django.dispatch import receiver
from order.models import Order
from order.tpaga import revertedPaid
@receiver(pre_save, sender=Order)
def changeReverted(sender, instance, **kwargs):
try:
old = sender.objects.get(id=instance.id)
status = old.status
except:
status = 'created'
if instance.status == 'reverted':
isSuccess = revertedPaid(instance.token)
if not isSuccess:
instance.status = status
instance.save()
|
py | 1a2f0367777a5cca73e502e5988c7d1e28d94acf | import bing_face_api as bfa
if __name__ == '__main__':
'''
コマンドライン引数を使用する場合
顔認識する画像のディレクトリ
search_dir = sys.argv[0]
'''
# 顔認識する画像のディレクトリ
search_dir = "./image/original/"
# 顔認識する画像のファイル名を取得
img_path_list = bfa.get_image_path_list(search_dir)
# 顔認識
bfa.detect_image(img_path_list)
|
py | 1a2f04aac3c89598aba4363fc45a22d6066611ed | import pytest
from trackash.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
|
py | 1a2f04fc687ba5d8f741dfab722afddcc43a26c3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-05-16 13:47
from __future__ import unicode_literals
from django.db import migrations
def PopulateLevelCreatedDateField(apps, schema_editor):
level_model = apps.get_model("indicators", "Level")
levels = level_model.objects.all()
for l in levels:
if l.name == 'Goal':
l.create_date = '2015-10-03 19:03:50'
elif l.name == 'Output':
l.create_date = '2015-10-03 19:03:52'
elif l.name == 'Outcome':
l.create_date = '2015-10-03 19:03:51'
elif l.name == 'Activity':
l.create_date = '2015-10-03 19:03:53'
elif l.name == 'Impact':
l.create_date = '2015-10-03 19:03:54'
l.save()
class Migration(migrations.Migration):
dependencies = [
('indicators', '0007_auto_20170510_0749'),
]
operations = [
migrations.RunPython(PopulateLevelCreatedDateField),
]
|
py | 1a2f067ba59df834f1618b66a58564196c665557 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {'astropy.nddata.tests': ['data/*.fits']}
|
py | 1a2f0859309356f1b27d59011374a656f9780441 | # -*- coding: utf-8 -*-
import asyncio
from datetime import datetime
from cmyui import log, Ansi
from cmyui.osu import Mods
from discord import Embed
from discord.ext import commands
from discord.threads import Thread
from tinydb.operations import set as dbset
from tinydb.queries import Query
from objects.sakuro import Sakuro, ContextWrap
from osu.calculator import Calculator
from objects import glob, config
from utils.misc import convert_status_str, convert_str_status, make_safe_name, convert_grade_emoji, sakuru_only
from objects.user import UserHelper
from utils.wrappers import sakuroCommand
from utils.misc import BEATMAP_REGEX
QUEUE_EMOJIS = (
'1️⃣',
'2️⃣',
'3️⃣',
'4️⃣',
'5️⃣'
)
class AdminCog(commands.Cog, name='Admin'):
"""Utilities for admins."""
def __init__(self, bot: Sakuro):
self.bot = bot
self.hide = True
@sakuroCommand(name='reload', hidden=True)
@commands.is_owner()
async def _reload(self, ctx: ContextWrap, module: str):
"""Reloads a module."""
try:
self.bot.unload_extension(module)
self.bot.load_extension(module)
except Exception as e:
await ctx.send('\N{PISTOL}')
await ctx.send('{}: {}'.format(type(e).__name__, e))
else:
await ctx.send('\N{OK HAND SIGN}')
@sakuroCommand(hidden=True)
@commands.is_owner()
async def shutdown(self, ctx: ContextWrap) -> None:
await ctx.send('Night night..')
await self.bot.close()
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def replay(self, ctx: ContextWrap, nickname: str, mods: str, map_id: int):
player = await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info')
description = ""
if not player:
async with glob.http.get("https://sakuru.pw/api/search", params={
"q": nickname
}) as resp:
if resp.status == 200:
data = await resp.json()
if data['length'] == 0:
return await ctx.send(f"Nothing matched with {nickname} not found, check your spelling.")
embed = Embed(
color=ctx.author.color,
timestamp=datetime.now()
)
embed.set_author(name=f"Search queue for {nickname}")
for idx, row in enumerate(data['matches']):
description += f"**{idx + 1}.** [{row['name']}](https://sakuru.pw/u/{row['id']})\n"
embed.description = description
description = ""
message = await ctx.send(embed=embed)
for emoji in QUEUE_EMOJIS[:data['length']]:
await message.add_reaction(emoji)
try:
reaction, user = await self.bot.wait_for('reaction_add',
check=lambda r, u: r.message.id == message.id \
and r.emoji in QUEUE_EMOJIS \
and u == ctx.author,
timeout=60.0)
except asyncio.TimeoutError:
await ctx.send('Time is out!')
else:
player = await UserHelper.getOsuUserByName(
make_safe_name(
data['matches'][QUEUE_EMOJIS.index(reaction.emoji)]['name']),
'info'
)
await message.delete()
else:
return await ctx.send("Error! Try again.")
scores = await UserHelper.getUserScores(player['id'], 0, mods, 5, 'best', map_id)
if len(scores) == 0:
return await ctx.send(f"This player has no scores on `{map_id}`!")
map_fullname = ""
for idx, score in enumerate(scores):
calc = await Calculator.calculate(
score['beatmap']['id'],
0,
score['mods'],
score['acc'],
None
)
map_fullname = calc['map_fullname']
description += f"""** {idx + 1}. {f' +{Mods(score["mods"])!r}' if score['mods'] != 0 else ''}** [{calc['stars']:.2f}★]\n""" \
f"▸ {convert_grade_emoji(score['grade'])} ▸ **{score['pp']:.2f}PP**" \
f"""{f' *({calc["pp"]:.2f}PP for {score["acc"]:.2f}% FC)*' if score['grade'] not in ('S', 'SS', 'X', 'SH') else ''} """ \
f"▸ {score['acc']:.2f}%\n▸ {score['score']} ▸ x{score['max_combo']}/{score['beatmap']['max_combo']} " \
f"▸ [{score['n300']}/{score['n100']}/{score['n50']}/{score['nmiss']}]\n" \
f"▸ [Score Set <t:{datetime.fromisoformat(score['play_time']).timestamp().__int__()}:R>]" \
f"(https://osu.sakuru.pw/api/get_replay?id={score['id']})\n"
embed = Embed(color=ctx.author.color, description=description)
embed.set_author(name=f"Top {len(scores)} Plays for {player['name']} on {map_fullname}",
url=f"https://sakuru.pw/u/{player['id']}",
icon_url=f"https://sakuru.pw/static/flags/{player['country'].upper()}.png")
embed.set_footer(text="Click on Score Set to download replay.",
icon_url="https://sakuru.pw/static/ingame.png")
await ctx.send(embed=embed)
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def restrict(self, ctx: ContextWrap, nickname: str, *reason: str):
if not await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info'):
return await ctx.send(f"Player with nickname {nickname} not found.")
admin = await UserHelper.getDiscordUser(ctx.message.author.id)
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin",
params={
"secret": config.API_SECRET,
"action": "restrict",
"nickname": make_safe_name(nickname),
"reason": ' '.join(reason),
"admin": admin['safe_name']
}) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
return await ctx.send("Error occurred.")
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def unrestrict(self, ctx: ContextWrap, nickname: str, *reason: str):
if not await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info'):
return await ctx.send(f"Player with nickname {nickname} not found.")
admin = await UserHelper.getDiscordUser(ctx.message.author.id)
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin",
params={
"secret": config.API_SECRET,
"action": "unrestrict",
"nickname": make_safe_name(nickname),
"reason": ' '.join(reason),
"admin": admin['safe_name']
}) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
return await ctx.send("Error occurred.")
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def rqmap(self, ctx: ContextWrap, status: str, type: str):
if (
not isinstance(ctx.message.channel, Thread) or
not ctx.message.channel.parent_id == config.MAP_REQS
):
return
if ctx.message.channel.archived:
return
req_table = glob.db.table('map_reqs')
Requests = Query()
req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))
admin = await UserHelper.getDiscordUser(ctx.message.author.id)
if not admin:
return await ctx.send('who are yo')
if type not in ('map', 'set'):
msg = await ctx.reply('Invalid type! (map, set)')
await msg.delete(delay=15)
await ctx.message.delete(delay=15)
return
if status not in ('love', 'rank', 'unrank'):
msg = await ctx.reply('Invalid status! (love, rank, unrank)')
await msg.delete(delay=15)
await ctx.message.delete(delay=15)
return
if type == "map":
params = {
"set_id": req['beatmap']['set_id']
}
async with glob.http.get("https://osu.sakuru.pw/api/get_map_info", params=params) as resp:
if (
resp and resp.status == 200 and
resp.content.total_bytes != 2 # b'[]'
):
bmaps = await resp.json()
embed = Embed(
title=f"Pick a map to edit status on.",
timestamp=datetime.now(),
color=0xeaff00
)
description = ""
for idx, bmap in enumerate(bmaps['set']):
description += f"`#{idx + 1}.` **[{bmap['version']}]** - {convert_status_str(int(bmap['status']))}\n"
embed.description = description
emb_mess = await ctx.send("**Send position in chat to pick a map.**", embed=embed)
valid = False
while valid is False:
try:
reply = await self.bot.wait_for('message', check=lambda x: x.channel == ctx.channel and x.author == ctx.author and x.content.isdecimal(),
timeout=60.0)
except asyncio.TimeoutError:
msg = await ctx.send('Time is up!')
await msg.delete(delay=15)
await emb_mess.delete(delay=15)
return
else:
reply.content = int(reply.content)
if reply.content > len(bmaps) or reply.content <= 0:
msg = await ctx.send('Specified position is out of range.')
await reply.delete(delay=15)
await msg.delete(delay=15)
else:
if (bm_status := bmaps['set'][reply.content - 1]['status']) == convert_str_status(status):
msg = await ctx.send(f"This map is already {convert_status_str(bm_status)}")
await msg.delete(delay=15)
await reply.delete(delay=15)
else:
await reply.delete()
await emb_mess.delete()
valid = True
params = {
"secret": config.API_SECRET,
"action": "status_map",
"admin": admin['safe_name'],
"map_id": bmaps['set'][reply.content - 1]['id'],
"status": status
}
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin", params=params) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
pass
elif type =="set":
params = {
"set_id": req['beatmap']['set_id']
}
async with glob.http.get("https://osu.sakuru.pw/api/get_map_info", params=params) as resp:
if (
resp and resp.status == 200 and
resp.content.total_bytes != 2 # b'[]'
):
bmaps = await resp.json()
if all([x['status'] == convert_str_status(status) for x in bmaps['set']]):
msg = await ctx.send(f"This set is already {convert_status_str(bmaps['set'][0]['status'])}")
await ctx.message.delete(delay=15)
await msg.delete(delay=15)
return
params = {
"secret": config.API_SECRET,
"action": "status_set",
"admin": admin['safe_name'],
"set_id": req['beatmap']['set_id'],
"status": status
}
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin", params=params) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
pass
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def rqclose(self, ctx: ContextWrap):
if (
not isinstance(ctx.message.channel, Thread) or
not ctx.message.channel.parent_id == config.MAP_REQS
):
return
if ctx.message.channel.archived:
return
req_table = glob.db.table('map_reqs')
Requests = Query()
req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))
req_table.update(
dbset('active', False),
doc_ids=[req.doc_id]
)
first_message = await ctx.message.channel.parent.fetch_message(req['original_id'])
await first_message.delete()
await ctx.channel.delete()
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def rqreject(self, ctx: ContextWrap, *reason: str):
if (
not isinstance(ctx.message.channel, Thread) or
not ctx.message.channel.parent_id == config.MAP_REQS
):
return
if ctx.message.channel.archived:
return
req_table = glob.db.table('map_reqs')
Requests = Query()
req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))
first_message = await ctx.message.channel.parent.fetch_message(req['original_id'])
requester = ctx.guild.get_member(req['requester'])
params = {
"id": req['beatmap']['id']
}
async with glob.http.get("https://osu.sakuru.pw/api/get_map_info", params=params) as resp:
if (
resp and resp.status == 200 and
resp.content.total_bytes != 2 # b'[]'
):
bmap = (await resp.json())['map']
embed = Embed(
title=f"Map Request: {bmap['artist']} - {bmap['title']}",
color=ctx.author.color,
description=f"Your request has been rejected!\n**Reason:** `{' '.join(reason)}`\n\n**Nominator:** {ctx.author.mention}",
timestamp=datetime.now()
)
embed.set_footer(text="Sakuru.pw osu! Private Server.")
embed.set_thumbnail(url=ctx.author.avatar.url)
await requester.send(embed=embed)
req_table.update(
dbset('active', False),
doc_ids=[req.doc_id]
)
await first_message.delete()
await ctx.channel.delete()
def setup(bot):
log(f"Initiated {__name__} cog!", Ansi.CYAN)
bot.add_cog(AdminCog(bot))
|
py | 1a2f0ac319a265e0b76169a4682ed41ed56851e5 | #
# Archives, to the specified folder, the logged output generated by a benchmark
# run.
#
# @author A. Shawn Bandy
import os
import zipfile
import datetime
import requests
# Follows closely from:
# http://stackoverflow.com/a/34153816
#
# Paths to the log folders are generated by TFB and where those files
# should be archived.
#
path_in = os.path.abspath(os.path.normpath(os.path.expanduser(os.path.join( \
os.environ['TFB_REPOPARENT'], os.environ['TFB_REPONAME'], \
'results'))))
date_time = datetime.datetime.now()
dt_folder = date_time.strftime('%Y%m%d%H%M%S')
path_out = os.path.abspath(os.path.join(os.environ['TFB_LOGSFOLDER'], \
dt_folder))
if not os.path.exists(path_out):
os.makedirs(path_out)
zip_path = path_out + '/results.zip'
zip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(path_in):
for file in files:
zip_file.write(os.path.join(root, file))
zip_file.close()
results_upload_uri = os.environ['TFB_UPLOADURI']
if results_upload_uri != None:
with open(zip_path, 'rb') as file_to_upload:
requests.post(
results_upload_uri,
headers={'Content-Type': 'application/zip'},
data=file_to_upload)
|
py | 1a2f0b32b85926219cf94d50683571fd5d476b22 | from django.contrib import admin
from .models import Project, Course, Message, Demos
from django.contrib.auth.models import Group
class CourseAdmin(admin.ModelAdmin):
list_display = ('title', 'category', 'date_created', 'link')
class ProjectAdmin(admin.ModelAdmin):
list_display = ('title', 'date_created', 'link')
class MessageAdmin(admin.ModelAdmin):
list_display = ('email', 'name', 'text', 'received', 'replied', 'date_created')
class DemosAdmin(admin.ModelAdmin):
list_display = ('title', 'demo_url')
admin.site.register(Course, CourseAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(Demos, DemosAdmin)
admin.site.unregister(Group) |
py | 1a2f0caa5c880e7e202fdd47b38ad89340290b44 | from gpflow.kernels import Kernel
from gpflow.utilities import positive
from gpflow import Parameter
import tensorflow as tf
from tensorflow_probability import bijectors as tfb
class Batch_simple_SSK(Kernel):
"""
with hyperparameters:
1) match_decay float
decrease the contribution of long subsequences
3) max_subsequence_length int
largest subsequence considered
"""
def __init__(self,active_dims=[0],decay=0.1,max_subsequence_length=3,
alphabet = [], maxlen=0, batch_size=100):
super().__init__(active_dims=active_dims)
# constrain decay kernel params to between 0 and 1
self.logistic = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
self.decay_param= Parameter(decay, transform=self.logistic ,name="decay")
# use will use copies of the kernel params to stop building expensive computation graph
# we instead efficientely calculate gradients using dynamic programming
# These params are updated at every call to K and K_diag (to check if parameters have been updated)
self.decay = self.decay_param.numpy()
self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()
self.order_coefs=tf.ones(max_subsequence_length,dtype=tf.float64)
# store additional kernel parameters
self.max_subsequence_length = tf.constant(max_subsequence_length)
self.alphabet = tf.constant(alphabet)
self.alphabet_size=tf.shape(self.alphabet)[0]
self.maxlen = tf.constant(maxlen)
self.batch_size = tf.constant(batch_size)
# build a lookup table of the alphabet to encode input strings
self.table = tf.lookup.StaticHashTable(
initializer=tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(["PAD"]+alphabet),
values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)
# initialize helful construction matricies to be lazily computed once needed
self.D = None
self.dD_dgap = None
def K_diag(self, X):
r"""
The diagonal elements of the string kernel are always unity (due to normalisation)
"""
return tf.ones(tf.shape(X)[:-1],dtype=tf.float64)
def K(self, X1, X2=None):
r"""
Vectorized kernel calc.
Following notation from Beck (2017), i.e have tensors S,D,Kpp,Kp
Input is two tensors of shape (# strings , # characters)
and we calc the pair-wise kernel calcs between the elements (i.e n kern calcs for two lists of length n)
D is the tensor than unrolls the recursion and allows vecotrizaiton
"""
# Turn our inputs into lists of integers using one-hot embedding
# first split up strings and pad to fixed length and prep for gpu
# pad until all have length of self.maxlen
# turn into one-hot i.e. shape (# strings, #characters+1, alphabet size)
X1 = tf.strings.split(tf.squeeze(X1,1)).to_tensor("PAD",shape=[None,self.maxlen])
X1 = self.table.lookup(X1)
# keep track of original input sizes
X1_shape = tf.shape(X1)[0]
X1 = tf.one_hot(X1,self.alphabet_size+1,dtype=tf.float64)
if X2 is None:
X2 = X1
X2_shape = X1_shape
self.symmetric = True
else:
self.symmetric = False
X2 = tf.strings.split(tf.squeeze(X2,1)).to_tensor("PAD",shape=[None,self.maxlen])
X2 = self.table.lookup(X2)
X2_shape = tf.shape(X2)[0]
X2 = tf.one_hot(X2,self.alphabet_size+1,dtype=tf.float64)
# prep the decay tensors
self._precalc()
# combine all target strings and remove the ones in the first column that encode the padding (i.e we dont want them to count as a match)
X_full = tf.concat([X1,X2],0)[:,:,1:]
# get indicies of all possible pairings from X and X2
# this way allows maximum number of kernel calcs to be squished onto the GPU (rather than just doing individual rows of gram)
indicies_2, indicies_1 = tf.meshgrid(tf.range(0, X1_shape ),tf.range(X1_shape , tf.shape(X_full)[0]))
indicies = tf.concat([tf.reshape(indicies_1,(-1,1)),tf.reshape(indicies_2,(-1,1))],axis=1)
if self.symmetric:
# if symmetric then only calc upper matrix (fill in rest later)
indicies = tf.boolean_mask(indicies,tf.greater_equal(indicies[:,1]+ X1_shape ,indicies[:,0]))
else:
# if not symmetric need to calculate some extra kernel evals for the normalization later on
indicies = tf.concat([indicies,tf.tile(tf.expand_dims(tf.range(tf.shape(X_full)[0]),1),(1,2))],0)
# make kernel calcs in batches
num_batches = tf.cast(tf.math.ceil(tf.shape(indicies)[0]/self.batch_size),dtype=tf.int32)
k_split = tf.TensorArray(tf.float64, size=num_batches,clear_after_read=False,infer_shape=False)
# iterate through batches
for j in tf.range(num_batches):
# collect strings for this batch
indicies_batch = indicies[self.batch_size*j:self.batch_size*(j+1)]
X_batch = tf.gather(X_full,indicies_batch[:,0],axis=0)
X2_batch = tf.gather(X_full,indicies_batch[:,1],axis=0)
# Make S: the similarity tensor of shape (# strings, #characters, # characters)
#S = tf.matmul( tf.matmul(X_batch,self.sim),tf.transpose(X2_batch,perm=(0,2,1)))
S = tf.matmul(X_batch,tf.transpose(X2_batch,perm=(0,2,1)))
# collect results for the batch
result = self.kernel_calc(S)
k_split = k_split.write(j,result)
# combine batch results
k = tf.expand_dims(k_split.concat(),1)
k_split.close()
# put results into the right places in the gram matrix and normalize
if self.symmetric:
# if symmetric then only put in top triangle (inc diag)
mask = tf.linalg.band_part(tf.ones((X1_shape,X2_shape),dtype=tf.int64), 0, -1)
non_zero = tf.not_equal(mask, tf.constant(0, dtype=tf.int64))
# Extracting the indices of upper triangle elements
indices = tf.where(non_zero)
out = tf.SparseTensor(indices,tf.squeeze(k),dense_shape=tf.cast((X1_shape,X2_shape),dtype=tf.int64))
k_results = tf.sparse.to_dense(out)
# add in mising elements (lower diagonal)
k_results = k_results + tf.linalg.set_diag(tf.transpose(k_results),tf.zeros(X1_shape,dtype=tf.float64))
# normalise
X_diag_Ks = tf.linalg.diag_part(k_results)
norm = tf.tensordot(X_diag_Ks, X_diag_Ks,axes=0)
k_results = tf.divide(k_results, tf.sqrt(norm))
else:
# otherwise can just reshape into gram matrix
# but first take extra kernel calcs off end of k and use them to normalise
X_diag_Ks = tf.reshape(k[X1_shape*X2_shape:X1_shape*X2_shape+X1_shape],(-1,))
X2_diag_Ks = tf.reshape(k[-X2_shape:],(-1,))
k = k[0:X1_shape*X2_shape]
k_results = tf.transpose(tf.reshape(k,[X2_shape,X1_shape]))
# normalise
norm = tf.tensordot(X_diag_Ks, X2_diag_Ks,axes=0)
k_results = tf.divide(k_results, tf.sqrt(norm))
return k_results
def _precalc(self):
r"""
Update stored kernel params (incase they have changed)
and precalc D and dD_dgap as required for kernel calcs
following notation from Beck (2017)
"""
self.decay = self.decay_param.numpy()
self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()
tril = tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0)
# get upper triangle matrix of increasing intergers
values = tf.TensorArray(tf.int32, size= self.maxlen)
for i in tf.range(self.maxlen):
values = values.write(i,tf.range(-i-1,self.maxlen-1-i))
power = tf.cast(values.stack(),tf.float64)
values.close()
power = tf.linalg.band_part(power, 0, -1) - tf.linalg.band_part(power, 0, 0) + tril
tril = tf.transpose(tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0))-tf.eye(self.maxlen,dtype=tf.float64)
gaps = tf.fill([self.maxlen, self.maxlen],self.decay)
self.D = tf.pow(gaps*tril, power)
self.dD_dgap = tf.pow((tril * gaps), (power - 1.0)) * tril * power
@tf.custom_gradient
def kernel_calc(self,S):
# fake computations to ensure we take the custom gradients for these two params
a = tf.square(self.decay_param)
if self.symmetric:
k, dk_dgap = tf.stop_gradient(self.kernel_calc_with_grads(S))
else:
k = tf.stop_gradient(self.kernel_calc_without_grads(S))
def grad(dy, variables=None):
# get gradients of unconstrained params
grads= {}
if self.symmetric:
grads['decay:0'] = tf.reduce_sum(tf.multiply(dy,dk_dgap*tf.math.exp(self.logistic.forward_log_det_jacobian(self.decay_unconstrained,0))))
gradient = [grads[v.name] for v in variables]
else:
gradient = [None for v in variables]
return ((None),gradient)
return k, grad
def kernel_calc_without_grads(self,S):
# store squared match coef for easier calc later
match_sq = tf.square(self.decay)
# calc subkernels for each subsequence length (See Moss et al. 2020 for notation)
Kp = tf.TensorArray(tf.float64,size=self.max_subsequence_length,clear_after_read=False)
# fill in first entries
Kp = Kp.write(0, tf.ones(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))
# calculate dynamic programs
for i in tf.range(self.max_subsequence_length-1):
Kp_temp = tf.multiply(S, Kp.read(i))
Kp_temp0 = match_sq * Kp_temp
Kp_temp1 = tf.matmul(Kp_temp0,self.D)
Kp_temp2 = tf.matmul(self.D,Kp_temp1,transpose_a=True)
Kp = Kp.write(i+1,Kp_temp2)
# Final calculation. We gather all Kps
Kp_stacked = Kp.stack()
Kp.close()
# combine and get overall kernel
aux = tf.multiply(S, Kp_stacked)
aux = tf.reduce_sum(aux, -1)
sum2 = tf.reduce_sum(aux, -1)
Ki = sum2 * match_sq
k = tf.linalg.matvec(tf.transpose(Ki),self.order_coefs)
return k
def kernel_calc_with_grads(self,S):
# store squared match coef for easier calc later
match_sq = tf.square(self.decay)
# calc subkernels for each subsequence length (See Moss et al. 2020 for notation)
Kp = tf.TensorArray(tf.float64,size=self.max_subsequence_length,clear_after_read=False)
dKp_dgap = tf.TensorArray(tf.float64, size=self.max_subsequence_length, clear_after_read=False)
# fill in first entries
Kp = Kp.write(0, tf.ones(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))
dKp_dgap = dKp_dgap.write(0, tf.zeros(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))
# calculate dynamic programs
for i in tf.range(self.max_subsequence_length-1):
Kp_temp = tf.multiply(S, Kp.read(i))
Kp_temp0 = match_sq * Kp_temp
Kp_temp1 = tf.matmul(Kp_temp0,self.D)
Kp_temp2 = tf.matmul(self.D,Kp_temp1,transpose_a=True)
Kp = Kp.write(i+1,Kp_temp2)
dKp_dgap_temp_1 = tf.matmul(self.dD_dgap,Kp_temp1,transpose_a=True)
dKp_dgap_temp_2 = tf.multiply(S, dKp_dgap.read(i))
dKp_dgap_temp_2 = dKp_dgap_temp_2 * match_sq
dKp_dgap_temp_2 = tf.matmul(dKp_dgap_temp_2,self.D)
dKp_dgap_temp_2 = dKp_dgap_temp_2 + tf.matmul(Kp_temp0,self.dD_dgap)
dKp_dgap_temp_2 = tf.matmul(self.D,dKp_dgap_temp_2,transpose_a=True)
dKp_dgap = dKp_dgap.write(i+1,dKp_dgap_temp_1 + dKp_dgap_temp_2)
# Final calculation. We gather all Kps
Kp_stacked = Kp.stack()
Kp.close()
dKp_dgap_stacked = dKp_dgap.stack()
dKp_dgap.close()
# combine and get overall kernel
# get k
aux = tf.multiply(S, Kp_stacked)
aux = tf.reduce_sum(aux, -1)
sum2 = tf.reduce_sum(aux, -1)
Ki = sum2 * match_sq
k = tf.linalg.matvec(tf.transpose(Ki),self.order_coefs)
# get gap decay grads
temp = tf.multiply(S, dKp_dgap_stacked)
temp = tf.reduce_sum(temp, -1)
temp = tf.reduce_sum(temp, -1)
temp = temp * match_sq
dk_dgap = tf.linalg.matvec(tf.transpose(temp),self.order_coefs)
return k, dk_dgap
|
py | 1a2f0cb42c5e281612c2dd86f9dc9461dd6f0c31 | """
Compat. layer between LWR and Galaxy.
"""
|
py | 1a2f0d7d97d9d67d1ba8cf782e6bc6fb923479de | import unittest
import swift_idl as IDL
test_structure = {
"key.kind" : "source.lang.swift.decl.struct",
"key.offset" : 19,
"key.nameoffset" : 26,
"key.namelength" : 3,
"key.inheritedtypes" : [
{
"key.name" : "JSONDecodable"
}
],
"key.bodylength" : 110,
"key.accessibility" : "source.lang.swift.accessibility.internal",
"key.substructure" : [
{
"key.kind" : "source.lang.swift.decl.var.instance",
"key.offset" : 72,
"key.attributes" : [
{
"key.attribute" : "source.decl.attribute.__raw_doc_comment"
}
],
"key.nameoffset" : 76,
"key.namelength" : 2,
"key.length" : 15,
"key.accessibility" : "source.lang.swift.accessibility.internal",
"key.substructure" : [
],
"key.typename" : "Int",
"key.name" : "id"
},
{
"key.kind" : "source.lang.swift.decl.var.instance",
"key.offset" : 92,
"key.nameoffset" : 96,
"key.namelength" : 5,
"key.length" : 17,
"key.accessibility" : "source.lang.swift.accessibility.internal",
"key.substructure" : [
],
"key.typename" : "String",
"key.name" : "query"
},
{
"key.kind" : "source.lang.swift.decl.var.instance",
"key.offset" : 126,
"key.attributes" : [
{
"key.attribute" : "source.decl.attribute.__raw_doc_comment"
}
],
"key.nameoffset" : 130,
"key.namelength" : 1,
"key.length" : 13,
"key.accessibility" : "source.lang.swift.accessibility.internal",
"key.substructure" : [
],
"key.typename" : "String",
"key.name" : "z"
}
],
"key.name" : "Foo",
"key.length" : 138,
"key.bodyoffset" : 46
}
test_syntax = [
{ "offset" : 0, "length" : 6, "type" : "source.lang.swift.syntaxtype.keyword" },
{ "offset" : 7, "length" : 10, "type" : "source.lang.swift.syntaxtype.identifier" },
{ "offset" : 19, "length" : 6, "type" : "source.lang.swift.syntaxtype.keyword" },
{ "offset" : 26, "length" : 3, "type" : "source.lang.swift.syntaxtype.identifier" },
{ "offset" : 31, "length" : 13, "type" : "source.lang.swift.syntaxtype.typeidentifier" },
{ "offset" : 47, "length" : 21, "type" : "source.lang.swift.syntaxtype.comment" },
{ "offset" : 72, "length" : 3, "type" : "source.lang.swift.syntaxtype.keyword" },
{ "offset" : 76, "length" : 2, "type" : "source.lang.swift.syntaxtype.identifier" },
{ "offset" : 80, "length" : 3, "type" : "source.lang.swift.syntaxtype.typeidentifier" },
{ "offset" : 86, "length" : 1, "type" : "source.lang.swift.syntaxtype.number" },
{ "offset" : 92, "length" : 3, "type" : "source.lang.swift.syntaxtype.keyword" },
{ "offset" : 96, "length" : 5, "type" : "source.lang.swift.syntaxtype.identifier" },
{ "offset" : 103, "length" : 6, "type" : "source.lang.swift.syntaxtype.typeidentifier" },
{ "offset" : 110, "length" : 12, "type" : "source.lang.swift.syntaxtype.comment" },
{ "offset" : 126, "length" : 3, "type" : "source.lang.swift.syntaxtype.keyword" },
{ "offset" : 130, "length" : 1, "type" : "source.lang.swift.syntaxtype.identifier" },
{ "offset" : 133, "length" : 6, "type" : "source.lang.swift.syntaxtype.typeidentifier" },
{ "offset" : 144, "length" : 12, "type" : "source.lang.swift.syntaxtype.comment" }
]
test_source = '''import Foundation
struct Foo: JSONDecodable { // sample:"foo,,bar"
let id: Int = 3
let query: String // json:"q"
let z: String // json:"-"
}
'''
class SampleStructTest(unittest.TestCase):
def test_getSwiftTokens(self):
tk = IDL.getSwiftTokens(test_syntax, test_source)
self.assertEqual('import', tk[0].content)
self.assertEqual(1, tk[0].line)
self.assertEqual('source.lang.swift.syntaxtype.keyword', tk[0].tokenType)
self.assertEqual('}\n', tk[-1].content)
self.assertEqual(7, tk[-1].line)
self.assertEqual('omittedtoken', tk[-1].tokenType)
if __name__ == '__main__':
unittest.main()
|
py | 1a2f0e73329d5f6f22c7b9c749947d10b973a0fe | from os import getcwd
import sys
sys.path.append(getcwd() + '/..') # Add src/ dir to import path
import traceback
import logging
from os.path import join
from itertools import combinations
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import libs.osLib as ol
def removeDiagonal(A):
m = A.shape[0]
strided = np.lib.stride_tricks.as_strided
s0,s1 = A.strides
return strided(A.ravel()[1:], shape=(m-1, m), strides=(s0+s1, s1)).reshape(m, -1)
if __name__ == '__main__':
root = logging.getLogger()
root.setLevel(logging.DEBUG)
baseDir, outputDir = '../../data/adjacencyMatrices', '../../data/plots'
loadNodeMappings, loadAdjacencies = True, False
numClusters = 2
classMapping = {
'time': 'T',
'content': 'C',
'tag': 'G',
'location': 'L',
}
try:
# metapaths = [['time', 'content', 'time'], ['tag', 'content', 'tag'], ['location', 'content', 'location'] ] # ['time', 'content', 'time'] # #
metapaths = [['time', 'content', 'time']]
metapaths = [[classMapping[t] for t in metapath] for metapath in metapaths]
for metapath in metapaths:
nodeMapping = ol.loadPickle(join(baseDir, f'nodeMapping.pickle'))
# PathSim load
similarityM = ol.loadSparce(join(baseDir, f'similarity-{"".join(metapath)}.npz')).toarray()
# Sclump load
# similarityM = ol.loadNumpy(join(baseDir, f'SClump-similarity.npy'))
similarityM = removeDiagonal(similarityM) # Carefull here - we're removing the relation with itself but breaking the mapping from nodeMapping
# Remove all zeros
print(f'Orig shape: {similarityM.shape}')
similarityM = similarityM[~np.all(similarityM == 0, axis=1)]
similarityM = similarityM[:, ~np.all(similarityM == 0, axis=0)]
print(f'Without zeros shape: {similarityM.shape}')
# Plot simple value histogram
flattenSim = pd.Series(similarityM.flatten())
g = sns.distplot(flattenSim, kde=False, bins=10)
g.set_yscale('log')
plt.savefig(join(outputDir, f'similarityValueDistribution-{"".join(metapath)}.png'))
plt.title('Value count in Similarity Matrix')
print(similarityM.max())
# Count non-zeros per row
rowCountNonZero = np.count_nonzero(similarityM, axis=1)
# Count max value per row
rowCountMax = np.amax(similarityM, 1)
# Count min value (that's not a zero) per row
rowCountMinNonZero = np.where(similarityM > 0, similarityM, similarityM.max()).min(1)
# Count mean value (that's not a zero) per row
rowCountMeanNonZero = np.true_divide(similarityM.sum(1), (similarityM!=0).sum(1))
plotDf = None
for k, x in {
'Non zeros per row': rowCountNonZero,
'Max per row': rowCountMax,
'Mean per row (no zeros)': rowCountMeanNonZero,
'Min per row (no zeros)': rowCountMinNonZero,
}.items():
auxDf = pd.Series(x, name='Count').to_frame()
auxDf['Measure'] = k
plotDf = auxDf if plotDf is None else pd.concat([plotDf, auxDf], ignore_index=False)
# Make boxplot
fig, ax = plt.subplots(figsize=(15, 15))
g = sns.boxplot(ax=ax, data=plotDf, x='Measure', y='Count', palette="Set2", showfliers=True, showmeans=True)
g.set_yscale('log')
g.set_yticklabels(g.get_yticks(), size=16)
# g.set_xticklabels(g.get_xticks(), size=16)
plt.savefig(join(outputDir, f'statsPerRow-log-{"".join(metapath)}.png'))
plt.close()
# Make boxplot
fig, ax = plt.subplots(figsize=(15, 15))
g = sns.boxplot(ax=ax, data=plotDf, x='Measure', y='Count', palette="Set2", showfliers=False, showmeans=True)
g.set_yticklabels(g.get_yticks(), size=16)
# g.set_xticklabels(g.get_xticks(), size=16)
plt.savefig(join(outputDir, f'statsPerRow-{"".join(metapath)}.png'))
plt.close()
# Make violin plots
fig = plt.figure(figsize=(12, 12))
gs = fig.add_gridspec(3, 2)
ax = fig.add_subplot(gs[0, 0])
sns.violinplot(data=similarityM.flatten())
ax.set_xlabel("Similarity as is")
ax = fig.add_subplot(gs[0, 1])
sns.violinplot(data=rowCountNonZero)
ax.set_xlabel("Non zeros per row")
ax = fig.add_subplot(gs[1, 0])
sns.violinplot(rowCountMeanNonZero)
ax.set_xlabel("Mean per row (no zeros)")
ax = fig.add_subplot(gs[1, 1])
sns.violinplot(rowCountMinNonZero)
ax.set_xlabel("Min per row (no zeros)")
ax = fig.add_subplot(gs[2, 0])
sns.violinplot(data=rowCountMax)
ax.set_xlabel("Max per row")
fig.tight_layout()
plt.savefig(join(outputDir, f'statsViolinPerRow-{"".join(metapath)}.png'))
plt.close()
# Plot as matrix
"""
fig = plt.figure(figsize=(15, 15))
ax = plt.axes()
plt.spy(similarityM, precision=0.1, marker='.', markersize=0.05)
plt.savefig(join(outputDir, f'similarityMatrixPlot-{"".join(metapath)}.png'))
plt.close()
"""
# Select top k most similiar or wtv
# Pick their similarity vectors
# Plot them
except Exception as ex:
print(traceback.format_exc()) |
py | 1a2f0e93aaa88f132ac65f9112e7d1e015d40235 | import asyncio
import datetime
import logging
import json
import functools
import re
import csv
from io import StringIO, BytesIO
from pathlib import Path
from tabulate import tabulate
from typing import List, Literal, Optional, Union
import discord
from redbot.core import Config, checks, commands
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import pagify, humanize_timedelta, humanize_list, box
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import ReactionPredicate
from .api import DestinyAPI
from .converter import DestinyActivity, StatsPage, SearchInfo, DestinyEververseItemType
from .errors import Destiny2APIError, Destiny2MissingManifest
from .menus import BaseMenu, BasePages
DEV_BOTS = [552261846951002112]
# If you want parsing the manifest data to be easier add your
# bots ID to this list otherwise this should help performance
# on bots that are just running the cog like normal
BASE_URL = "https://www.bungie.net/Platform"
IMAGE_URL = "https://www.bungie.net"
AUTH_URL = "https://www.bungie.net/en/oauth/authorize"
TOKEN_URL = "https://www.bungie.net/platform/app/oauth/token/"
_ = Translator("Destiny", __file__)
log = logging.getLogger("red.trusty-cogs.Destiny")
@cog_i18n(_)
class Destiny(DestinyAPI, commands.Cog):
"""
Get information from the Destiny 2 API
"""
__version__ = "1.5.5"
__author__ = "TrustyJAID"
def __init__(self, bot):
self.bot = bot
default_global = {
"api_token": {"api_key": "", "client_id": "", "client_secret": ""},
"manifest_version": "",
}
default_user = {"oauth": {}, "account": {}}
self.config = Config.get_conf(self, 35689771456)
self.config.register_global(**default_global)
self.config.register_user(**default_user)
self.config.register_guild(clan_id=None)
self.throttle: float = 0
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
Thanks Sinbad!
"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nCog Version: {self.__version__}"
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
"""
Method for finding a user's data inside the cog and deleting it.
"""
await self.config.user_from_id(user_id).clear()
@commands.group()
async def destiny(self, ctx: commands.Context) -> None:
"""Get information from the Destiny 2 API"""
pass
@destiny.command()
async def forgetme(self, ctx: commands.Context) -> None:
"""
Remove your authorization to the destiny API on the bot
"""
await self.red_delete_data_for_user(requester="user", user_id=ctx.author.id)
await ctx.send(_("Your authorization has been reset."))
@destiny.group(aliases=["s"])
async def search(self, ctx: commands.Context) -> None:
"""
Search for a destiny item, vendor, record, etc.
"""
pass
async def get_weapon_possible_perks(self, weapon: dict) -> dict:
perks = {}
slot_counter = 1
count = 2
for socket in weapon["sockets"]["socketEntries"]:
if socket["singleInitialItemHash"] in [
4248210736,
2323986101,
0,
2285418970,
1282012138,
2993594586,
]:
continue
if socket["socketTypeHash"] in [2218962841, 1282012138, 1456031260]:
continue
if "randomizedPlugSetHash" in socket:
pool = (
await self.get_definition(
"DestinyPlugSetDefinition", [socket["randomizedPlugSetHash"]]
)
)[str(socket["randomizedPlugSetHash"])]
pool_perks = [v["plugItemHash"] for v in pool["reusablePlugItems"]]
all_perks = await self.get_definition(
"DestinyInventoryItemLiteDefinition", pool_perks
)
try:
# https://stackoverflow.com/questions/44914727/get-first-and-second-values-in-dictionary-in-cpython-3-6
it = iter(all_perks.values())
key_hash = next(it)["itemCategoryHashes"][0]
key_data = (
await self.get_definition("DestinyItemCategoryDefinition", [key_hash])
)[str(key_hash)]
key = key_data["displayProperties"]["name"]
if key in perks:
key = f"{key} {count}"
count += 1
except IndexError:
key = _("Perk {count}").format(count=slot_counter)
perks[key] = "\n".join(
[p["displayProperties"]["name"] for h, p in all_perks.items()]
)
slot_counter += 1
continue
if "reusablePlugSetHash" in socket:
pool = (
await self.get_definition(
"DestinyPlugSetDefinition", [socket["reusablePlugSetHash"]]
)
)[str(socket["reusablePlugSetHash"])]
pool_perks = [v["plugItemHash"] for v in pool["reusablePlugItems"]]
all_perks = await self.get_definition(
"DestinyInventoryItemLiteDefinition", pool_perks
)
try:
it = iter(all_perks.values())
key_hash = next(it)["itemCategoryHashes"][0]
key_data = (
await self.get_definition("DestinyItemCategoryDefinition", [key_hash])
)[str(key_hash)]
key = key_data["displayProperties"]["name"]
if key in perks:
key = f"{key} {count}"
count += 1
except IndexError:
key = _("Perk {count}").format(count=slot_counter)
perks[key] = "\n".join(
[p["displayProperties"]["name"] for h, p in all_perks.items()]
)
slot_counter += 1
continue
perk_hash = socket["singleInitialItemHash"]
perk = (await self.get_definition("DestinyInventoryItemLiteDefinition", [perk_hash]))[
str(perk_hash)
]
try:
it = iter(all_perks.values())
key_hash = next(it)["itemCategoryHashes"][0]
key_data = (
await self.get_definition("DestinyItemCategoryDefinition", [key_hash])
)[str(key_hash)]
key = key_data[0]["displayProperties"]["name"]
if key in perks:
key = f"{key} {count}"
count += 1
except (IndexError, KeyError):
key = _("Perk {count}").format(count=slot_counter)
perks[key] = perk["displayProperties"]["name"]
slot_counter += 1
return perks
@search.command(aliases=["item"])
@commands.bot_has_permissions(embed_links=True)
@commands.max_concurrency(1, commands.BucketType.default)
async def items(
self, ctx: commands.Context, details_or_lore: Optional[SearchInfo] = None, *, search: str
) -> None:
"""
Search for a specific item in Destiny 2
`[details_or_lore]` signify what information to display for the item
by default this command will show all available perks on weapons
using `details`, `true`, or `stats` will show the weapons stat bars
using `lore` here will instead display the weapons lore card instead if it exists.
"""
show_lore = True if details_or_lore is False else False
if search.startswith("lore "):
search = search.replace("lore ", "")
async with ctx.typing():
try:
items = await self.search_definition("DestinyInventoryItemDefinition", search)
except Destiny2MissingManifest as e:
await ctx.send(e)
return
if not items:
await ctx.send(_("`{search}` could not be found.").format(search=search))
return
embeds = []
# log.debug(items[0])
for item_hash, item in items.items():
if not (item["equippable"]):
continue
embed = discord.Embed()
description = item["flavorText"] + "\n\n"
damage_type = ""
try:
damage_data = (
await self.get_definition(
"DestinyDamageTypeDefinition", [item["defaultDamageTypeHash"]]
)
)[str(item["defaultDamageTypeHash"])]
damage_type = damage_data["displayProperties"]["name"]
except KeyError:
pass
if item["itemType"] in [3] and not show_lore:
stats_str = ""
rpm = ""
recoil = ""
magazine = ""
for stat_hash, value in item["stats"]["stats"].items():
if stat_hash in ["1935470627", "1480404414", "1885944937"]:
continue
stat_info = (
await self.get_definition("DestinyStatDefinition", [stat_hash])
)[str(stat_hash)]
stat_name = stat_info["displayProperties"]["name"]
if not stat_name:
continue
prog = "█" * int(value["value"] / 10)
empty = "░" * int((100 - value["value"]) / 10)
bar = f"{prog}{empty}"
if stat_hash == "4284893193":
rpm = f"{stat_name}: **{value['value']}**\n"
continue
if stat_hash == "3871231066":
recoil = f"{stat_name}: **{value['value']}**\n"
continue
if stat_hash == "2715839340":
magazine = f"{stat_name}: **{value['value']}**\n"
continue
if details_or_lore:
stats_str += f"{stat_name}: **{value['value']}** \n{bar}\n"
stats_str += rpm + recoil + magazine
description += stats_str
embed.description = description
perks = await self.get_weapon_possible_perks(item)
for key, value in perks.items():
embed.add_field(name=key, value=value[:1024])
if "loreHash" in item and (show_lore or item["itemType"] in [2]):
lore = (
await self.get_definition("DestinyLoreDefinition", [item["loreHash"]])
)[str(item["loreHash"])]
description += _("Lore: \n\n") + lore["displayProperties"]["description"]
if len(description) > 2048:
count = 0
for page in pagify(description, page_length=1024):
if count == 0:
embed.description = page
else:
embed.add_field(name=_("Lore Continued"), value=page)
count += 1
else:
embed.description = description
embed.title = damage_type + " " + item["itemTypeAndTierDisplayName"]
name = item["displayProperties"]["name"]
icon_url = IMAGE_URL + item["displayProperties"]["icon"]
embed.set_author(name=name, icon_url=icon_url)
embed.set_thumbnail(url=icon_url)
if item.get("screenshot", False):
embed.set_image(url=IMAGE_URL + item["screenshot"])
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def check_gilded_title(self, chars: dict, title: dict) -> bool:
"""
Checks a players records for a completed gilded title
"""
gilding_hash = title["titleInfo"].get("gildingTrackingRecordHash", None)
records = chars["profileRecords"]["data"]["records"]
if str(gilding_hash) in records:
for objective in records[str(gilding_hash)]["objectives"]:
if objective["complete"]:
return True
return False
@destiny.command(name="joinme")
@commands.bot_has_permissions(embed_links=True)
async def destiny_join_command(self, ctx: commands.Context) -> None:
"""
Get your Steam ID to give people to join your in-game fireteam
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
bungie_id = await self.config.user(ctx.author).oauth.membership_id()
creds = await self.get_bnet_user(ctx.author, bungie_id)
steam_id = ""
for cred in creds:
if "credentialAsString" in cred:
steam_id = cred["credentialAsString"]
join_code = f"\n```py\n/join {steam_id}\n```"
msg = _(
"Use the following code in game to join {author}'s Fireteam:{join_code}"
).format(author=ctx.author.display_name, join_code=join_code)
join_code = f"\n```py\n/join {steam_id}\n```"
await ctx.send(msg)
@destiny.group()
@commands.bot_has_permissions(embed_links=True)
async def clan(self, ctx: commands.Context) -> None:
"""
Clan settings
"""
return
@clan.command(name="info")
@commands.bot_has_permissions(embed_links=True)
async def show_clan_info(self, ctx: commands.Context, clan_id: Optional[str]):
"""
Display basic information about the clan set in this server
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if clan_id:
clan_re = re.compile(
r"(https:\/\/)?(www\.)?bungie\.net\/.*(groupid=(\d+))", flags=re.I
)
clan_invite = clan_re.search(clan_id)
if clan_invite:
clan_id = clan_invite.group(4)
else:
clan_id = await self.config.guild(ctx.guild).clan_id()
if not clan_id:
return await ctx.send(
_(
"No clan ID has been setup for this server. "
"Use `{prefix}destiny clan set` to set one."
).format(prefix=ctx.clean_prefix)
)
try:
clan_info = await self.get_clan_info(ctx.author, clan_id)
embed = await self.make_clan_embed(clan_info)
except Exception:
log.exception("Error getting clan info")
return await ctx.send(
_("I could not find any information about this servers clan.")
)
else:
await ctx.send(embed=embed)
async def make_clan_embed(self, clan_info: dict) -> discord.Embed:
clan_id = clan_info["detail"]["groupId"]
clan_name = clan_info["detail"]["name"]
clan_about = clan_info["detail"]["about"]
clan_motto = clan_info["detail"]["motto"]
clan_callsign = clan_info["detail"]["clanInfo"]["clanCallsign"]
clan_xp_data = clan_info["detail"]["clanInfo"]["d2ClanProgressions"]["584850370"]
weekly_progress = clan_xp_data["weeklyProgress"]
weekly_limit = clan_xp_data["weeklyLimit"]
level = clan_xp_data["level"]
level_cap = clan_xp_data["levelCap"]
members = clan_info["detail"]["memberCount"]
max_members = clan_info["detail"]["features"]["maximumMembers"]
clan_creation_date = datetime.datetime.strptime(
clan_info["detail"]["creationDate"], "%Y-%m-%dT%H:%M:%S.%fZ"
)
clan_create_str = clan_creation_date.strftime("%I:%M %p %Y-%m-%d")
clan_xp_str = _(
"Level: {level}/{level_cap}\nWeekly Progress: " "{weekly_progress}/{weekly_limit}"
).format(
level=level,
level_cap=level_cap,
weekly_progress=weekly_progress,
weekly_limit=weekly_limit,
)
join_link = f"https://www.bungie.net/en/ClanV2?groupid={clan_id}"
embed = discord.Embed(
title=f"{clan_name} [{clan_callsign}]", description=clan_about, url=join_link
)
embed.add_field(name=_("Motto"), value=clan_motto, inline=False)
embed.add_field(name=_("Clan XP"), value=clan_xp_str)
embed.add_field(name=_("Members"), value=f"{members}/{max_members}")
embed.add_field(name=_("Clan Founded"), value=clan_create_str)
return embed
@clan.command(name="set")
@commands.bot_has_permissions(embed_links=True)
@commands.admin_or_permissions(manage_guild=True)
async def set_clan_id(self, ctx: commands.Context, clan_id: str) -> None:
"""
Set the clan ID for this server
`<clan_id>` Must be either the clan's ID or you can provide
the clan invite link at the `clan profile` setting on bungie.net
example link: `https://www.bungie.net/en/ClanV2?groupid=1234567`
the numbers after `groupid=` is the clan ID.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
clan_re = re.compile(
r"(https:\/\/)?(www\.)?bungie\.net\/.*(groupid=(\d+))", flags=re.I
)
clan_invite = clan_re.search(clan_id)
if clan_invite:
clan_id = clan_invite.group(4)
try:
clan_info = await self.get_clan_info(ctx.author, clan_id)
embed = await self.make_clan_embed(clan_info)
except Exception:
log.exception("Error getting clan info")
return await ctx.send(_("I could not find a clan with that ID."))
else:
await self.config.guild(ctx.guild).clan_id.set(clan_id)
await ctx.send(_("Server's clan set to"), embed=embed)
async def destiny_pick_profile(
self, ctx: commands.Context, pending_users: dict
) -> Optional[dict]:
"""
Allows a clan admin to pick the user they want to approve in the clan
"""
users = pending_users["results"][:9]
embed = discord.Embed(
title=_("Pending Clan Members"),
description=_("React with the user you would like to approve into the clan."),
)
for index, user in enumerate(pending_users["results"]):
destiny_name = user["destinyUserInfo"]["LastSeenDisplayName"]
bungie_name = user["bungieNetUserInfo"]["displayName"]
msg = _("Destiny/Steam Name: {destiny_name}\nBungie.net Name: {bungie_name}").format(
destiny_name=destiny_name, bungie_name=bungie_name
)
embed.add_field(name=_("User {count}").format(count=index + 1), value=msg)
msg = await ctx.send(embed=embed)
emojis = ReactionPredicate.NUMBER_EMOJIS[1 : len(users) + 1]
start_adding_reactions(msg, emojis)
pred = ReactionPredicate.with_emojis(emojis, msg)
try:
await ctx.bot.wait_for("reaction_add", check=pred)
except asyncio.TimeoutError:
return None
else:
return users[pred.result]
@clan.command(name="pending")
@commands.bot_has_permissions(embed_links=True)
@commands.admin_or_permissions(manage_guild=True)
async def clan_pending(self, ctx: commands.Context) -> None:
"""
Display pending clan members.
Clan admin can further approve specified clan members
by reacting to the resulting message.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
clan_id = await self.config.guild(ctx.guild).clan_id()
if not clan_id:
return await ctx.send(
_(
"No clan ID has been setup for this server. "
"Use `{prefix}destiny clan set` to set one."
).format(prefix=ctx.clean_prefix)
)
clan_pending = await self.get_clan_pending(ctx.author, clan_id)
if not clan_pending["results"]:
return await ctx.send(_("There is no one pending clan approval."))
approved = await self.destiny_pick_profile(ctx, clan_pending)
if not approved:
return await ctx.send(_("No one will be approved into the clan."))
try:
destiny_name = approved["destinyUserInfo"]["LastSeenDisplayName"]
bungie_name = approved["bungieNetUserInfo"]["displayName"]
membership_id = approved["destinyUserInfo"]["membershipId"]
membership_type = approved["destinyUserInfo"]["membershipType"]
await self.approve_clan_pending(
ctx.author, clan_id, membership_type, membership_id, approved
)
except Destiny2APIError as e:
log.exception("error approving clan member.")
await ctx.send(str(e))
else:
await ctx.send(
_("{destiny_name} AKA {bungie_name} has been approved into the clan.").format(
destiny_name=destiny_name, bungie_name=bungie_name
)
)
@clan.command(name="roster")
@commands.bot_has_permissions(embed_links=True)
@commands.mod_or_permissions(manage_messages=True)
async def get_clan_roster(self, ctx: commands.Context, output_format: Optional[str]) -> None:
"""
Get the full clan roster
`[output_format]` if `csv` is provided this will upload a csv file of
the clan roster instead of displaying the output.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
clan_id = await self.config.guild(ctx.guild).clan_id()
if not clan_id:
return await ctx.send(
_(
"No clan ID has been setup for this server. "
"Use `{prefix}destiny clan set` to set one."
).format(prefix=ctx.clean_prefix)
)
clan = await self.get_clan_members(ctx.author, clan_id)
headers = [
"Discord Name",
"Discord ID",
"Destiny Name",
"Destiny ID",
"Bungie.net Name",
"Bungie.net ID",
"Last Seen Destiny",
"Steam ID",
"Join Date",
]
clan_mems = ""
rows = []
saved_users = await self.config.all_users()
for member in clan["results"]:
last_online = datetime.datetime.utcfromtimestamp(
int(member["lastOnlineStatusChange"])
)
join_date = datetime.datetime.strptime(member["joinDate"], "%Y-%m-%dT%H:%M:%SZ")
destiny_name = member["destinyUserInfo"]["LastSeenDisplayName"]
destiny_id = member["destinyUserInfo"]["membershipId"]
clan_mems += destiny_name + "\n"
discord_id = None
discord_name = None
bungie_id = None
bungie_name = None
steam_id = None
try:
bungie_id = member["bungieNetUserInfo"]["membershipId"]
bungie_name = member["bungieNetUserInfo"]["displayName"]
creds = await self.get_bnet_user(ctx.author, bungie_id)
steam_id = ""
for cred in creds:
if "credentialAsString" in cred:
steam_id = cred["credentialAsString"]
except Exception:
pass
for user_id, data in saved_users.items():
if data["oauth"]["membership_id"] == bungie_id:
discord_user = ctx.guild.get_member(int(user_id))
if discord_user:
discord_name = str(discord_user)
discord_id = discord_user.id
user_info = [
discord_name,
f"'{discord_id}" if discord_id else None,
destiny_name,
f"'{destiny_id}" if destiny_id else None,
bungie_name,
f"'{bungie_id}" if bungie_id else None,
last_online,
f"'{steam_id}" if steam_id else None,
str(join_date),
]
rows.append(user_info)
if output_format == "csv":
outfile = StringIO()
employee_writer = csv.writer(
outfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
employee_writer.writerow(headers)
for row in rows:
employee_writer.writerow(row)
outfile.seek(0)
file = discord.File(outfile, filename="clan_roster.csv")
await ctx.send(file=file)
elif output_format == "md":
data = tabulate(rows, headers=headers, tablefmt="github")
file = discord.File(BytesIO(data.encode()), filename="clan_roster.md")
await ctx.send(file=file)
else:
data = tabulate(rows, headers=headers, tablefmt="pretty")
for page in pagify(data, page_length=1990):
await ctx.send(box(page, lang="css"))
@destiny.command()
@commands.bot_has_permissions(embed_links=True)
async def user(self, ctx: commands.Context, user: discord.Member = None) -> None:
"""
Display a menu of your basic character's info
`[user]` A member on the server who has setup their account on this bot.
"""
async with ctx.typing():
if not await self.has_oauth(ctx, user):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if not user:
user = ctx.author
try:
chars = await self.get_characters(user)
# await self.save(chars, "character.json")
except Destiny2APIError as e:
log.error(e, exc_info=True)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
embeds = []
currency_datas = await self.get_definition(
"DestinyInventoryItemLiteDefinition",
[v["itemHash"] for v in chars["profileCurrencies"]["data"]["items"]],
)
player_currency = ""
for item in chars["profileCurrencies"]["data"]["items"]:
quantity = item["quantity"]
name = currency_datas[str(item["itemHash"])]["displayProperties"]["name"]
player_currency += f"{name}: **{quantity}**\n"
for char_id, char in chars["characters"]["data"].items():
info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (
await self.get_definition("DestinyGenderDefinition", [char["genderHash"]])
)[str(char["genderHash"])]
char_class = (
await self.get_definition("DestinyClassDefinition", [char["classHash"]])
)[str(char["classHash"])]
info += "{race} {gender} {char_class} ".format(
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
titles = ""
if "titleRecordHash" in char:
# TODO: Add fetch for Destiny.Definitions.Records.DestinyRecordDefinition
char_title = (
await self.get_definition(
"DestinyRecordDefinition", [char["titleRecordHash"]]
)
)[str(char["titleRecordHash"])]
title_info = "**{title_name}**\n{title_desc}\n"
try:
gilded = ""
if await self.check_gilded_title(chars, char_title):
gilded = _("Gilded ")
title_name = (
f"{gilded}"
+ char_title["titleInfo"]["titlesByGenderHash"][
str(char["genderHash"])
]
)
title_desc = char_title["displayProperties"]["description"]
titles += title_info.format(title_name=title_name, title_desc=title_desc)
except KeyError:
pass
embed = discord.Embed(title=info)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
if titles:
# embed.add_field(name=_("Titles"), value=titles)
embed.set_author(
name=f"{user.display_name} ({title_name})", icon_url=user.avatar_url
)
# log.debug(data)
stats_str = ""
time_played = humanize_timedelta(seconds=int(char["minutesPlayedTotal"]) * 60)
for stat_hash, value in char["stats"].items():
stat_info = (await self.get_definition("DestinyStatDefinition", [stat_hash]))[
str(stat_hash)
]
stat_name = stat_info["displayProperties"]["name"]
prog = "█" * int(value / 10)
empty = "░" * int((100 - value) / 10)
bar = f"{prog}{empty}"
if stat_hash == "1935470627":
bar = ""
stats_str += f"{stat_name}: **{value}** \n{bar}\n"
stats_str += _("Time Played Total: **{time}**").format(time=time_played)
embed.description = stats_str
embed = await self.get_char_colour(embed, char)
if titles:
embed.add_field(name=_("Titles"), value=titles)
embed.add_field(name=_("Current Currencies"), value=player_currency)
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@search.command()
@commands.bot_has_permissions(embed_links=True)
async def lore(self, ctx: commands.Context, entry: str = None) -> None:
"""
Find Destiny Lore
"""
try:
# the below is to prevent blocking reading the large
# ~130mb manifest files and save on API calls
task = functools.partial(self.get_entities, entity="DestinyLoreDefinition")
task = self.bot.loop.run_in_executor(None, task)
data: dict = await asyncio.wait_for(task, timeout=60)
except Exception:
return await ctx.send(_("The manifest needs to be downloaded for this to work."))
lore = []
for entry_hash, entries in data.items():
em = discord.Embed(title=entries["displayProperties"]["name"])
description = entries["displayProperties"]["description"]
if len(description) < 2048:
em.description = entries["displayProperties"]["description"]
elif len(description) > 2048 and len(description) < 6000:
em.description = description[:2048]
new_desc = description[:2048]
parts = [new_desc[i : i + 1024] for i in range(0, len(new_desc), 1024)]
for i in parts:
em.add_field(name=_("Continued"), value=i)
if entries["displayProperties"]["hasIcon"]:
icon = entries["displayProperties"]["icon"]
em.set_thumbnail(url=f"{IMAGE_URL}{icon}")
lore.append(em)
if entry:
for t in lore:
if entry.lower() in str(t.title).lower():
print(t.title)
lore.insert(0, lore.pop(lore.index(t)))
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def save(self, data: dict, loc: str = "sample.json"):
if self.bot.user.id not in DEV_BOTS:
return
base_path = Path(__file__).parent
path = base_path / loc
with path.open(encoding="utf-8", mode="w") as f:
json.dump(data, f, indent=4, sort_keys=False, separators=(",", " : "))
@destiny.command(aliases=["xûr"])
@commands.bot_has_permissions(embed_links=True)
async def xur(self, ctx: commands.Context, full: bool = False) -> None:
"""
Display a menu of Xûr's current wares
`[full=False]` Show perk definition on Xûr's current wares
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
try:
chars = await self.get_characters(ctx.author)
# await self.save(chars, "characters.json")
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
for char_id, char in chars["characters"]["data"].items():
# log.debug(char)
try:
xur = await self.get_vendor(ctx.author, char_id, "2190858386")
xur_def = (
await self.get_definition("DestinyVendorDefinition", ["2190858386"])
)["2190858386"]
except Destiny2APIError:
log.error("I can't seem to see Xûr at the moment")
today = datetime.datetime.utcnow()
friday = today.replace(hour=17, minute=0, second=0) + datetime.timedelta(
(4 - today.weekday()) % 7
)
next_xur = self.humanize_timedelta(timedelta=(friday - today))
await ctx.send(
_("Xûr's not around, come back in {next_xur}.").format(next_xur=next_xur)
)
return
break
# items = [v["itemHash"] for k, v in xur["sales"]["data"].items()]
embeds: List[discord.Embed] = []
# data = await self.get_definition("DestinyInventoryItemDefinition", items)
embed = discord.Embed(
colour=discord.Colour.red(),
description=xur_def["displayProperties"]["description"],
)
embed.set_thumbnail(
url=IMAGE_URL + xur_def["displayProperties"]["largeTransparentIcon"]
)
embed.set_author(name="Xûr's current wares")
# location = xur_def["locations"][0]["destinationHash"]
# log.debug(await self.get_definition("DestinyDestinationDefinition", [location]))
for index, item_base in xur["sales"]["data"].items():
item = (
await self.get_definition(
"DestinyInventoryItemDefinition", [item_base["itemHash"]]
)
)[str(item_base["itemHash"])]
if not (item["equippable"]):
continue
perk_hashes = [
str(p["singleInitialItemHash"]) for p in item["sockets"]["socketEntries"]
]
perk_data = await self.get_definition(
"DestinyInventoryItemDefinition", perk_hashes
)
perks = ""
item_embed = discord.Embed(title=item["displayProperties"]["name"])
item_embed.set_thumbnail(url=IMAGE_URL + item["displayProperties"]["icon"])
item_embed.set_image(url=IMAGE_URL + item["screenshot"])
for perk_hash, perk in perk_data.items():
properties = perk["displayProperties"]
if "Common" in perk["itemTypeAndTierDisplayName"]:
continue
if (
properties["name"] == "Empty Mod Socket"
or properties["name"] == "Default Ornament"
or properties["name"] == "Change Energy Type"
or properties["name"] == "Empty Catalyst Socket"
):
continue
if "name" in properties and "description" in properties:
if not properties["name"]:
continue
# await self.save(perk, properties["name"] + ".json")
if full:
perks += "**{0}** - {1}\n".format(
properties["name"], properties["description"]
)
else:
perks += "- **{0}**\n".format(properties["name"])
stats_str = ""
if "armor" in item["equippingBlock"]["uniqueLabel"]:
total = 0
for stat_hash, stat_data in xur["itemComponents"]["stats"]["data"][index][
"stats"
].items():
stat_info = (
await self.get_definition("DestinyStatDefinition", [stat_hash])
)[str(stat_hash)]
stat_name = stat_info["displayProperties"]["name"]
stat_value = stat_data["value"]
prog = "█" * int(stat_value / 6)
empty = "░" * int((42 - stat_value) / 6)
bar = f"{prog}{empty}"
stats_str += f"{stat_name}: \n{bar} **{stat_value}**\n"
total += stat_value
stats_str += _("Total: **{total}**\n").format(total=total)
msg = (
item["itemTypeAndTierDisplayName"]
+ "\n"
+ stats_str
+ (item["displayProperties"]["description"] + "\n" if full else "")
+ perks
)
item_embed.description = msg
embed.insert_field_at(
0, name="**__" + item["displayProperties"]["name"] + "__**\n", value=msg
)
embeds.insert(0, item_embed)
embeds.insert(0, embed)
# await ctx.send(embed=embed)
# await ctx.tick()
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@commands.bot_has_permissions(embed_links=True)
async def eververse(
self, ctx: commands.Context, *, item_types: Optional[DestinyEververseItemType]
) -> None:
"""
Display items currently available on the Eververse in a menu
`[item_types]` can be one of `ghosts`, `ships`, `sparrows`,
`shaders`, `ornaments` and `finishers` to filter specific items.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if not item_types:
item_types = {"item_types": [9, 19, 21, 22, 24, 29], "item_sub_types": [21, 20]}
try:
chars = await self.get_characters(ctx.author)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
embeds: List[discord.Embed] = []
eververse_sales = {}
for char_id, char in chars["characters"]["data"].items():
try:
ev = await self.get_vendor(ctx.author, char_id, "3361454721")
eververse_sales.update(ev["sales"]["data"])
except Destiny2APIError:
log.error("I can't seem to see the eververse at the moment", exc_info=True)
await ctx.send(_("I can't access the eververse at the moment."))
return
await self.save(eververse_sales, "eververse.json")
embeds = []
item_hashes = [i["itemHash"] for k, i in eververse_sales.items()]
item_defs = await self.get_definition("DestinyInventoryItemDefinition", item_hashes)
item_costs = [c["itemHash"] for k, i in eververse_sales.items() for c in i["costs"]]
item_cost_defs = await self.get_definition(
"DestinyInventoryItemDefinition", item_costs
)
for item_hash, vendor_item in eververse_sales.items():
item = item_defs[str(vendor_item["itemHash"])]
if (
item["itemType"] not in item_types["item_types"]
and item_types["item_types"] != []
):
# log.debug("ignoring item from type %s" % item["itemType"])
continue
if (
item["itemSubType"] not in item_types["item_sub_types"]
and item_types["item_sub_types"] != []
):
# log.debug("ignoring item from sub type %s" % item["itemSubType"])
continue
embed = discord.Embed()
embed.description = item["displayProperties"]["description"]
embed.title = item["itemTypeAndTierDisplayName"]
name = item["displayProperties"]["name"]
icon_url = IMAGE_URL + item["displayProperties"]["icon"]
embed.set_author(name=name, icon_url=icon_url)
embed.set_thumbnail(url=icon_url)
cost_str = ""
for costs in vendor_item["costs"]:
cost = costs["quantity"]
cost_name = item_cost_defs[str(costs["itemHash"])]["displayProperties"]["name"]
cost_str += f"{cost_name}: **{cost}**\n"
embed.add_field(name=_("Cost"), value=cost_str)
if "screenshot" in item:
embed.set_image(url=IMAGE_URL + item["screenshot"])
embeds.append(embed)
if embeds == []:
return await ctx.send(_("I can't access the eververse at the moment."))
# await ctx.tick()
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@commands.bot_has_permissions(embed_links=True)
async def spider(self, ctx: commands.Context) -> None:
"""
Display Spiders wares
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
try:
chars = await self.get_characters(ctx.author)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
for char_id, char in chars["characters"]["data"].items():
try:
spider = await self.get_vendor(ctx.author, char_id, "863940356")
spider_def = (
await self.get_definition("DestinyVendorDefinition", ["863940356"])
)["863940356"]
except Destiny2APIError:
log.error("I can't seem to see the Spider at the moment", exc_info=True)
await ctx.send(_("I can't access the Spider at the moment."))
return
break
# await self.save(spider, "spider.json")
currency_datas = await self.get_definition(
"DestinyInventoryItemLiteDefinition",
[v["itemHash"] for v in chars["profileCurrencies"]["data"]["items"]],
)
embed = discord.Embed(description=spider_def["displayProperties"]["description"])
embed.set_thumbnail(
url=IMAGE_URL + spider_def["displayProperties"]["largeTransparentIcon"]
)
embed.set_author(
name=spider_def["displayProperties"]["name"]
+ ", "
+ spider_def["displayProperties"]["subtitle"]
)
item_hashes = [i["itemHash"] for k, i in spider["sales"]["data"].items()]
item_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_hashes
)
item_costs = [
c["itemHash"] for k, i in spider["sales"]["data"].items() for c in i["costs"]
]
item_cost_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_costs
)
for key, data in spider["sales"]["data"].items():
item_hash = data["itemHash"]
item = item_defs[str(item_hash)]
if item["itemType"] in [0, 26]:
continue
try:
costs = data["costs"][0]
cost = item_cost_defs[str(costs["itemHash"])]
cost_str = str(costs["quantity"]) + " " + cost["displayProperties"]["name"]
except IndexError:
cost_str = "None"
embed.add_field(name=item["displayProperties"]["name"], value=cost_str)
await asyncio.sleep(0)
player_currency = ""
for item in chars["profileCurrencies"]["data"]["items"]:
quantity = item["quantity"]
name = currency_datas[str(item["itemHash"])]["displayProperties"]["name"]
player_currency += f"{name}: **{quantity}**\n"
embed.add_field(name=_("Current Currencies"), value=player_currency)
await ctx.send(embed=embed)
@destiny.command(aliases=["banshee-44"])
@commands.bot_has_permissions(embed_links=True)
async def banshee(self, ctx: commands.Context) -> None:
"""
Display Banshee-44's wares
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
try:
chars = await self.get_characters(ctx.author)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
for char_id, char in chars["characters"]["data"].items():
try:
banshee = await self.get_vendor(ctx.author, char_id, "672118013")
banshee_def = (
await self.get_definition("DestinyVendorDefinition", ["672118013"])
)["672118013"]
await self.save(banshee, "banshee.json")
except Destiny2APIError:
log.error(
"I can't seem to see the Banshee-44's wares at the moment", exc_info=True
)
await ctx.send(_("I can't access the Banshee-44 at the moment."))
return
break
# await self.save(spider, "spider.json")
embed = discord.Embed(description=banshee_def["displayProperties"]["description"])
embed.set_thumbnail(
url=IMAGE_URL + banshee_def["displayProperties"]["largeTransparentIcon"]
)
embed.set_author(
name=banshee_def["displayProperties"]["name"]
+ ", "
+ banshee_def["displayProperties"]["subtitle"]
)
item_hashes = [i["itemHash"] for k, i in banshee["sales"]["data"].items()]
item_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_hashes
)
item_costs = [
c["itemHash"] for k, i in banshee["sales"]["data"].items() for c in i["costs"]
]
item_cost_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_costs
)
for key, data in banshee["sales"]["data"].items():
item_hash = data["itemHash"]
item = item_defs[str(item_hash)]
if item["itemType"] in [0]:
continue
try:
costs = data["costs"][0]
cost = item_cost_defs[str(costs["itemHash"])]
cost_str = str(costs["quantity"]) + " " + cost["displayProperties"]["name"]
except IndexError:
cost_str = "None"
embed.add_field(name=item["displayProperties"]["name"], value=cost_str)
await asyncio.sleep(0)
await ctx.send(embed=embed)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def loadout(
self, ctx: commands.Context, full: Optional[bool] = False, user: discord.Member = None
) -> None:
"""
Display a menu of each character's equipped weapons and their info
`[full=False]` Display full information about weapons equipped.
`[user]` A member on the server who has setup their account on this bot.
"""
async with ctx.typing():
if not await self.has_oauth(ctx, user):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if not user:
user = ctx.author
try:
chars = await self.get_characters(user)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
embeds = []
for char_id, char in chars["characters"]["data"].items():
info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (
await self.get_definition("DestinyGenderDefinition", [char["genderHash"]])
)[str(char["genderHash"])]
char_class = (
await self.get_definition("DestinyClassDefinition", [char["classHash"]])
)[str(char["classHash"])]
info += "{race} {gender} {char_class} ".format(
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
titles = ""
if "titleRecordHash" in char:
# TODO: Add fetch for Destiny.Definitions.Records.DestinyRecordDefinition
char_title = (
await self.get_definition(
"DestinyRecordDefinition", [char["titleRecordHash"]]
)
)[str(char["titleRecordHash"])]
title_info = "**{title_name}**\n{title_desc}\n"
try:
gilded = ""
if await self.check_gilded_title(chars, char_title):
gilded = _("Gilded ")
title_name = (
f"{gilded}"
+ char_title["titleInfo"]["titlesByGenderHash"][
str(char["genderHash"])
]
)
title_desc = char_title["displayProperties"]["description"]
titles += title_info.format(title_name=title_name, title_desc=title_desc)
except KeyError:
pass
embed = discord.Embed(title=info)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
if titles:
# embed.add_field(name=_("Titles"), value=titles)
embed.set_author(
name=f"{user.display_name} ({title_name})", icon_url=user.avatar_url
)
char_items = chars["characterEquipment"]["data"][char_id]["items"]
item_list = [i["itemHash"] for i in char_items]
# log.debug(item_list)
items = await self.get_definition("DestinyInventoryItemDefinition", item_list)
# log.debug(items)
for item_hash, data in items.items():
# log.debug(data)
for item in char_items:
# log.debug(item)
if data["hash"] == item["itemHash"]:
instance_id = item["itemInstanceId"]
item_instance = chars["itemComponents"]["instances"]["data"][instance_id]
if not item_instance["isEquipped"]:
continue
if not (data["equippable"] and data["itemType"] == 3):
continue
name = data["displayProperties"]["name"]
desc = data["displayProperties"]["description"]
item_type = data["itemTypeAndTierDisplayName"]
try:
light = item_instance["primaryStat"]["value"]
except KeyError:
light = ""
perk_list = chars["itemComponents"]["perks"]["data"][instance_id]["perks"]
perk_hashes = [p["perkHash"] for p in perk_list]
perk_data = await self.get_definition(
"DestinySandboxPerkDefinition", perk_hashes
)
perks = ""
for perk_hash, perk in perk_data.items():
properties = perk["displayProperties"]
if "name" in properties and "description" in properties:
if full:
perks += "**{0}** - {1}\n".format(
properties["name"], properties["description"]
)
else:
perks += "- **{0}**\n".format(properties["name"])
value = f"**{light}** {item_type}\n{perks}"
embed.add_field(name=name, value=value, inline=True)
# log.debug(data)
stats_str = ""
for stat_hash, value in char["stats"].items():
stat_info = (await self.get_definition("DestinyStatDefinition", [stat_hash]))[
str(stat_hash)
]
stat_name = stat_info["displayProperties"]["name"]
prog = "█" * int(value / 10)
empty = "░" * int((100 - value) / 10)
bar = f"{prog}{empty}"
if stat_hash == "1935470627":
bar = ""
stats_str += f"{stat_name}: **{value}** \n{bar}\n"
embed.description = stats_str
embed = await self.get_char_colour(embed, char)
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def gambit(self, ctx: commands.Context) -> None:
"""
Display a menu of each characters gambit stats
"""
await ctx.invoke(self.stats, "allPvECompetitive")
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def pvp(self, ctx: commands.Context) -> None:
"""
Display a menu of each character's pvp stats
"""
await ctx.invoke(self.stats, "allPvP")
@destiny.command(aliases=["raids"])
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def raid(self, ctx: commands.Context) -> None:
"""
Display a menu for each character's RAID stats
"""
await ctx.invoke(self.stats, "raid")
@destiny.command(aliases=["qp"])
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def quickplay(self, ctx: commands.Context) -> None:
"""
Display a menu of past quickplay matches
"""
await ctx.invoke(self.history, 70)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def history(self, ctx: commands.Context, activity: DestinyActivity) -> None:
"""
Display a menu of each character's last 5 activities
`<activity>` The activity type to display stats on available types include:
all, story, strike, raid, allpvp, patrol, allpve, control, clash,
crimsondoubles, nightfall, heroicnightfall, allstrikes, ironbanner, allmayhem,
supremacy, privatematchesall, survival, countdown, trialsofthenine, social,
trialscountdown, trialssurvival, ironbannercontrol, ironbannerclash,
ironbannersupremacy, scorednightfall, scoredheroicnightfall, rumble, alldoubles,
doubles, privatematchesclash, privatematchescontrol, privatematchessupremacy,
privatematchescountdown, privatematchessurvival, privatematchesmayhem,
privatematchesrumble, heroicadventure, showdown, lockdown, scorched,
scorchedteam, gambit, allpvecompetitive, breakthrough, blackarmoryrun,
salvage, ironbannersalvage, pvpcompetitive, pvpquickplay, clashquickplay,
clashcompetitive, controlquickplay, controlcompetitive, gambitprime,
reckoning, menagerie, vexoffensive, nightmarehunt, elimination, momentum,
dungeon, sundial, trialsofosiris
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
user = ctx.author
try:
chars = await self.get_characters(user)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
RAID = {
"assists": _("Assists"),
"kills": _("Kills"),
"deaths": _("Deaths"),
"opponentsDefeated": _("Opponents Defeated"),
"efficiency": _("Efficiency"),
"killsDeathsRatio": _("KDR"),
"killsDeathsAssists": _("KDA"),
"score": _("Score"),
"activityDurationSeconds": _("Duration"),
"playerCount": _("Player Count"),
"teamScore": _("Team Score"),
"completed": _("Completed"),
}
embeds = []
for char_id, char in chars["characters"]["data"].items():
# log.debug(char)
char_info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (
await self.get_definition("DestinyGenderDefinition", [char["genderHash"]])
)[str(char["genderHash"])]
log.debug(gender)
char_class = (
await self.get_definition("DestinyClassDefinition", [char["classHash"]])
)[str(char["classHash"])]
char_info += "{user} - {race} {gender} {char_class} ".format(
user=user.display_name,
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
try:
data = await self.get_activity_history(user, char_id, activity)
except Exception:
log.error(
_(
"Something went wrong I couldn't get info on character {char_id} for activity {activity}"
).format(char_id=char_id, activity=activity)
)
continue
if not data:
continue
for activities in data["activities"]:
activity_hash = str(activities["activityDetails"]["directorActivityHash"])
activity_data = (
await self.get_definition("DestinyActivityDefinition", [activity_hash])
)[str(activity_hash)]
embed = discord.Embed(
title=activity_data["displayProperties"]["name"],
description=activity_data["displayProperties"]["description"],
)
date = datetime.datetime.strptime(activities["period"], "%Y-%m-%dT%H:%M:%SZ")
embed.timestamp = date
if activity_data["displayProperties"]["hasIcon"]:
embed.set_thumbnail(
url=IMAGE_URL + activity_data["displayProperties"]["icon"]
)
elif (
activity_data["pgcrImage"] != "/img/misc/missing_icon_d2.png"
and "emblemPath" in char
):
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
embed.set_author(name=char_info, icon_url=user.avatar_url)
for attr, name in RAID.items():
if activities["values"][attr]["basic"]["value"] < 0:
continue
embed.add_field(
name=name,
value=str(activities["values"][attr]["basic"]["displayValue"]),
)
embed = await self.get_char_colour(embed, char)
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@staticmethod
async def get_extra_attrs(stat_type: str, attrs: dict) -> dict:
"""Helper function to receive the total attributes we care about"""
EXTRA_ATTRS = {}
if stat_type == "allPvECompetitive":
EXTRA_ATTRS = {
"winLossRatio": _("Win Loss Ratio"),
"invasions": _("Invasions"),
"invasionKills": _("Invasion Kills"),
"invasionDeaths": _("Invasion Deaths"),
"invaderDeaths": _("Invader Deaths"),
"invaderKills": _("Invader Kills"),
"primevalKills": _("Primeval Kills"),
"blockerKills": _("Blocker Kills"),
"mobKills": _("Mob Kills"),
"highValueKills": _("High Value Targets Killed"),
"motesPickedUp": _("Motes Picked Up"),
"motesDeposited": _("Motes Deposited"),
"motesDenied": _("Motes Denied"),
"motesLost": _("Motes Lost"),
}
if stat_type == "allPvP":
EXTRA_ATTRS = {"winLossRatio": _("Win Loss Ratio")}
for k, v in EXTRA_ATTRS.items():
attrs[k] = v
return attrs
async def build_character_stats(
self, user: discord.Member, chars: dict, stat_type: str
) -> List[discord.Embed]:
embeds: List[discord.Embed] = []
for char_id, char in chars["characters"]["data"].items():
# log.debug(char)
try:
data = await self.get_historical_stats(user, char_id, 0)
except Exception:
log.error(
_("Something went wrong I couldn't get info on character {char_id}").format(
char_id=char_id
)
)
continue
if not data:
continue
try:
if stat_type != "allPvECompetitive":
embed = await self.build_stat_embed_char_basic(user, char, data, stat_type)
embeds.append(embed)
else:
data = data[stat_type]["allTime"]
embed = await self.build_stat_embed_char_gambit(user, char, data, stat_type)
embeds.append(embed)
except Exception:
log.error(
f"User {user.id} had an issue generating stats for character {char_id}",
exc_info=True,
)
continue
return embeds
async def build_stat_embed_char_basic(
self, user: discord.Member, char: dict, data: dict, stat_type: str
) -> discord.Embed:
char_info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (await self.get_definition("DestinyGenderDefinition", [char["genderHash"]]))[
str(char["genderHash"])
]
char_class = (await self.get_definition("DestinyClassDefinition", [char["classHash"]]))[
str(char["classHash"])
]
char_info += "{user} - {race} {gender} {char_class} ".format(
user=user.display_name,
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
ATTRS = {
"opponentsDefeated": _("Opponents Defeated"),
"efficiency": _("Efficiency"),
"bestSingleGameKills": _("Best Single Game Kills"),
"bestSingleGameScore": _("Best Single Game Score"),
"precisionKills": _("Precision Kills"),
"longestKillSpree": _("Longest Killing Spree"),
"longestSingleLife": _("Longest Single Life"),
"totalActivityDurationSeconds": _("Total time playing"),
"averageLifespan": _("Average Life Span"),
"weaponBestType": _("Best Weapon Type"),
}
embed = discord.Embed(title=stat_type.title())
embed.set_author(name=char_info, icon_url=user.avatar_url)
kills = data[stat_type]["allTime"]["kills"]["basic"]["displayValue"]
deaths = data[stat_type]["allTime"]["deaths"]["basic"]["displayValue"]
assists = data[stat_type]["allTime"]["assists"]["basic"]["displayValue"]
kda = f"{kills} | {deaths} | {assists}"
embed.add_field(name=_("Kills | Deaths | Assists"), value=kda)
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
for stat, values in data[stat_type]["allTime"].items():
if values["basic"]["value"] < 0 or stat not in ATTRS:
continue
embed.add_field(name=ATTRS[stat], value=str(values["basic"]["displayValue"]))
if "killsDeathsRatio" in data[stat_type] and "killsDeathsAssists" in data[stat_type]:
kdr = data[stat_type]["killsDeathsRatio"]
kda = data[stat_type]["killsDeathsAssists"]
if kdr or kda:
embed.add_field(name=_("KDR/KDA"), value=f"{kdr}/{kda}")
if (
"resurrectionsPerformed" in data[stat_type]
and "resurrectionsReceived" in data[stat_type]
):
res = data[stat_type]["resurrectionsPerformed"]
resur = data[stat_type]["resurrectionsReceived"]
if res or resur:
embed.add_field(name=_("Resurrections/Received"), value=f"{res}/{resur}")
return await self.get_char_colour(embed, char)
async def build_stat_embed_char_gambit(
self, user: discord.Member, char: dict, data: dict, stat_type: str
) -> discord.Embed:
char_info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (await self.get_definition("DestinyGenderDefinition", [char["genderHash"]]))[
str(char["genderHash"])
]
char_class = (await self.get_definition("DestinyClassDefinition", [char["classHash"]]))[
str(char["classHash"])
]
char_info += "{user} - {race} {gender} {char_class} ".format(
user=user.display_name,
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
ATTRS = {
"opponentsDefeated": _("Opponents Defeated"),
"efficiency": _("Efficiency"),
"bestSingleGameKills": _("Best Single Game Kills"),
"bestSingleGameScore": _("Best Single Game Score"),
"precisionKills": _("Precision Kills"),
"longestKillSpree": _("Longest Killing Spree"),
"longestSingleLife": _("Longest Single Life"),
"totalActivityDurationSeconds": _("Total time playing"),
"averageLifespan": _("Average Life Span"),
"weaponBestType": _("Best Weapon Type"),
"winLossRatio": _("Win Loss Ratio"),
}
embed = discord.Embed(title="Gambit")
embed.set_author(name=char_info, icon_url=user.avatar_url)
kills = data["kills"]["basic"]["displayValue"]
deaths = data["deaths"]["basic"]["displayValue"]
assists = data["assists"]["basic"]["displayValue"]
kda = f"{kills} | {deaths} | {assists}"
embed.add_field(name=_("Kills | Deaths | Assists"), value=kda)
small_blocker = data["smallBlockersSent"]["basic"]["displayValue"]
med_blocker = data["mediumBlockersSent"]["basic"]["displayValue"]
large_blocker = data["largeBlockersSent"]["basic"]["displayValue"]
blockers = f"S {small_blocker}, M {med_blocker}, L {large_blocker}"
embed.add_field(name=_("Blockers"), value=blockers)
invasions = _("Invasions: {invasions}").format(
invasions=data["invasions"]["basic"]["displayValue"]
)
invasion_kills = _("Kills: {kills}\nDeaths: {deaths}").format(
kills=data["invasionKills"]["basic"]["displayValue"],
deaths=data["invasionDeaths"]["basic"]["displayValue"],
)
embed.add_field(name=invasions, value=invasion_kills)
invaders = _("Killed: {killed}\nKilled By: {by}").format(
killed=data["invaderKills"]["basic"]["displayValue"],
by=data["invaderDeaths"]["basic"]["displayValue"],
)
embed.add_field(name=_("Invaders"), value=invaders)
motes_dep = data["motesDeposited"]["basic"]["value"]
try:
lost = 1 - (motes_dep / data["motesPickedUp"]["basic"]["value"])
motes_lost = "{:.2%}".format(lost)
except ZeroDivisionError:
motes_lost = "0%"
motes = _("{motes:,} ({lost} Lost)").format(motes=motes_dep, lost=motes_lost)
embed.add_field(name=_("Motes Deposited"), value=motes)
motes_denied = data["motesDenied"]["basic"]["value"]
embed.add_field(name=_("Motes Denied"), value="{:,}".format(motes_denied))
mob_kills = data["mobKills"]["basic"]["value"]
primeval_kills = data["primevalKills"]["basic"]["value"]
high_kills = data["highValueKills"]["basic"]["value"]
kills_msg = _("Primevals: {prime:,}\nHigh Value Targets: {high:,}\nMobs: {mobs:,}").format(
prime=primeval_kills, high=high_kills, mobs=mob_kills
)
embed.add_field(name=_("Kill Stats"), value=kills_msg)
if "killsDeathsRatio" in data and "killsDeathsAssists" in data:
kdr = data["killsDeathsRatio"]["basic"]["displayValue"]
kda = data["killsDeathsAssists"]["basic"]["displayValue"]
if kdr or kda:
embed.add_field(name=_("KDR/KDA"), value=f"{kdr}/{kda}")
if "resurrectionsPerformed" in data and "resurrectionsReceived" in data:
res = data["resurrectionsPerformed"]["basic"]["displayValue"]
resur = data["resurrectionsReceived"]["basic"]["displayValue"]
if res or resur:
embed.add_field(name=_("Resurrections/Received"), value=f"{res}/{resur}")
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
for stat, values in data.items():
if values["basic"]["value"] < 0 or stat not in ATTRS:
continue
embed.add_field(name=ATTRS[stat], value=str(values["basic"]["displayValue"]))
return await self.get_char_colour(embed, char)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def stats(self, ctx: commands.Context, stat_type: StatsPage, all: bool = True) -> None:
"""
Display each character's stats for a specific activity
`<activity>` The type of stats to display, available options are:
`raid`, `pvp`, `pve`, patrol, story, gambit, and strikes
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
user = ctx.author
try:
chars = await self.get_characters(user)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
# base stats should be available for all stat types
embeds = await self.build_character_stats(user, chars, stat_type)
if not embeds:
return await ctx.send(
_("No stats could be found for that activity and character.")
)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@checks.is_owner()
@commands.bot_has_permissions(add_reactions=True)
async def manifest(self, ctx: commands.Context, d1: bool = False) -> None:
"""
See the current manifest version and optionally re-download it
"""
if not d1:
try:
headers = await self.build_headers()
except Exception:
return await ctx.send(
_(
"You need to set your API authentication tokens with `[p]destiny token` first."
)
)
manifest_data = await self.request_url(
f"{BASE_URL}/Destiny2/Manifest/", headers=headers
)
version = await self.config.manifest_version()
if not version:
version = _("Not Downloaded")
msg = _("Current manifest version is {version}.").format(version=version)
redownload = _("re-download")
if manifest_data["version"] != version:
msg += _("\n\nThere is an update available to version {version}").format(
version=manifest_data["version"]
)
redownload = _("download")
await ctx.send(msg)
await ctx.trigger_typing()
msg = await ctx.send(
_("Would you like to {redownload} the manifest?").format(redownload=redownload)
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
react, user = await self.bot.wait_for("reaction_add", check=pred, timeout=15)
except asyncio.TimeoutError:
await msg.delete()
if pred.result:
try:
version = await self.get_manifest()
except Exception:
log.exception("Error getting destiny manifest")
return await ctx.send(_("There was an issue downloading the manifest."))
await msg.delete()
await ctx.send(f"Manifest {version} was downloaded.")
else:
await msg.delete()
else:
try:
version = await self.get_manifest(d1)
except Exception:
log.exception("Error getting D1 manifest")
return await ctx.send(_("There was an issue downloading the manifest."))
@destiny.command()
@checks.is_owner()
async def token(
self, ctx: commands.Context, api_key: str, client_id: str, client_secret: str
) -> None:
"""
Set the API tokens for Destiny 2's API
Required information is found at:
https://www.bungie.net/en/Application
select **Create New App**
Choose **Confidential** OAuth Client type
Select the scope you would like the bot to have access to
Set the redirect URL to https://localhost/
NOTE: It is strongly recommended to use this command in DM
"""
await self.config.api_token.api_key.set(api_key)
await self.config.api_token.client_id.set(client_id)
await self.config.api_token.client_secret.set(client_secret)
if ctx.channel.permissions_for(ctx.me).manage_messages:
await ctx.message.delete()
await ctx.send("Destiny 2 API credentials set!")
|
py | 1a2f0ebacc99295885853ad29a470b3803afd2c8 | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from .equipment_port_type_fragment import EquipmentPortTypeFragment, QUERY as EquipmentPortTypeFragmentQuery
from .edit_equipment_port_type_input import EditEquipmentPortTypeInput
QUERY: List[str] = EquipmentPortTypeFragmentQuery + ["""
mutation EditEquipmentPortTypeMutation($input: EditEquipmentPortTypeInput!) {
editEquipmentPortType(input: $input) {
...EquipmentPortTypeFragment
}
}
"""]
@dataclass
class EditEquipmentPortTypeMutation(DataClassJsonMixin):
@dataclass
class EditEquipmentPortTypeMutationData(DataClassJsonMixin):
@dataclass
class EquipmentPortType(EquipmentPortTypeFragment):
pass
editEquipmentPortType: EquipmentPortType
data: EditEquipmentPortTypeMutationData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, input: EditEquipmentPortTypeInput) -> EditEquipmentPortTypeMutationData:
# fmt: off
variables = {"input": input}
response_text = client.call(''.join(set(QUERY)), variables=variables)
return cls.from_json(response_text).data
|
py | 1a2f0f61afd76a87fe73e5a154f969f50391dd65 | from api.api_error import APIError
from api.api_message import APIMessage
from api.json_connector import JSONConnector
from api.api_config import APIConfig
from api.ptp_connector import PTPConnector
class BotMethods:
@staticmethod
def start_bot(req):
"""
Starts a PTP Bot object.
:param req:
:return:
"""
keys = req.keys()
if 'bot_name' not in keys or not req['bot_name']:
return APIError.create(message='No bot name in the request body.', code=400)
elif 'action_name' not in keys or not req['action_name']:
return APIError.create(message='No action name in the request body.', code=400)
bots = JSONConnector.get_json_file_content(
directory=APIConfig.json_save_path,
name=APIConfig.json_bots_file_name
)
bot_actions = JSONConnector.get_json_file_content(
directory=APIConfig.json_save_path,
name=APIConfig.json_bot_actions_file_name
)
found_action = {}
found_bot = {}
for item in bot_actions['bot_actions']:
print(req, item)
if req['bot_name'] == item['bot_name'] and req['action_name'] == item['action_name']:
found_action = item
for item in bots['bots']:
if req['bot_name'] == item['bot_name']:
found_bot = item
if found_action and found_bot:
access_info = {
'access_token': found_bot['access_token'],
'access_token_secret': found_bot['access_token_secret'],
'consumer_key': found_bot['consumer_key'],
'consumer_secret': found_bot['consumer_secret']
}
PTPConnector.start_bot(access_info, found_action['method'], {'actions': found_action['actions']})
return APIMessage.create(message='Bot successfully started.', code=200)
|
py | 1a2f1067e877603262ee3997806d4aac8f9c8561 | from fastapi import APIRouter, BackgroundTasks, Depends, File, UploadFile
from typing import List
from sqlalchemy.orm import Session
from api.utils.auth import get_db
from api.auth.auth import auth_check
from api.db.crud import templates as crud
from api.db.crud import settings as scrud
from api.db.schemas import templates as schemas
from api.db.models import containers
from api.db.database import engine
from api.actions import resources
from api.actions.apps import _update_self, check_self_update
from api.settings import Settings
from fastapi_jwt_auth import AuthJWT
containers.Base.metadata.create_all(bind=engine)
settings = Settings()
router = APIRouter()
@router.get(
"/variables",
response_model=List[schemas.TemplateVariables],
operation_id="authorize",
)
def read_template_variables(
db: Session = Depends(get_db), Authorize: AuthJWT = Depends()
):
auth_check(Authorize)
return crud.read_template_variables(db=db)
@router.post(
"/variables",
response_model=List[schemas.TemplateVariables],
)
def set_template_variables(
new_variables: List[schemas.TemplateVariables],
db: Session = Depends(get_db),
Authorize: AuthJWT = Depends(),
):
auth_check(Authorize)
return crud.set_template_variables(new_variables=new_variables, db=db)
@router.get(
"/export",
response_model=schemas.Import_Export,
)
def export_settings(db: Session = Depends(get_db), Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return scrud.export_settings(db=db)
@router.post(
"/export",
)
def import_settings(
db: Session = Depends(get_db),
upload: UploadFile = File(...),
Authorize: AuthJWT = Depends(),
):
auth_check(Authorize)
return scrud.import_settings(db=db, upload=upload)
@router.get(
"/prune/{resource}",
)
def prune_resources(resource: str, Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return resources.prune_resources(resource)
@router.get(
"/update",
)
def update_self(background_tasks: BackgroundTasks, Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return _update_self(background_tasks)
@router.get(
"/check/update",
)
def _check_self_update(Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return check_self_update()
|
py | 1a2f108938ac1fd13e7ad508326814a45b6c54b5 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
from base.basic_config_if import BasicConfig
from commons.configurator import Configurator
from cpp.incl_deps.include_deps_if import FileIncludeDepsSupply
from cpp.incl_deps.include_rule_checker_if import IncludeRulesFactory
from cpp.incl_deps.include_rule_checker_util import (IncludeRuleCheckerProcessor,
IncludeRuleCheckerOutputter)
import csv
import logging
import sys
config_basic = BasicConfig()
config_checker = IncludeRulesFactory()
config_file_include_deps_supply = FileIncludeDepsSupply()
def main():
logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)
Configurator().default()
if len(sys.argv) > 1:
file_links = csv.reader(open(sys.argv[1]), delimiter=',')
else:
file_links = config_file_include_deps_supply.get_file_include_deps()
illegal_links, total_count, rule_violations = IncludeRuleCheckerProcessor().check_links(file_links, config_checker.get_include_rules())
IncludeRuleCheckerOutputter().output(sys.stdout, illegal_links, total_count, rule_violations)
if __name__ == "__main__":
main()
|
py | 1a2f1094bfd1df43995d93f1d87fef39c68f3a48 | """
This is how to enable `language()` for one Autocomplete::
import autocomplete_light.shortcuts as al
from al.contrib.hvad import AutocompleteModelBase
al.register(YourModel, AutocompleteModelBase)
Or, enable it globally by updating your `autodiscover()` call like this::
import autocomplete_light.shortcuts as al
from al.contrib.hvad import AutocompleteModelBase
al.registry.autocomplete_model_base = AutocompleteModelBase
al.autodiscover()
In that case, you can just register as usual::
al.register(YourTranslatableModel)
"""
import autocomplete_light.shortcuts as al
class AutocompleteModel(al.AutocompleteModel):
""" Ensure that `.language()` is called. """
def __init__(self, request=None, values=None):
"""
Overridden init to call .language(). Note: this will replace the
base `choices`.
"""
if getattr(self.choices.model.objects, 'language', False):
self.choices = self.choices.model.objects.language()
super(AutocompleteModel, self).__init__(request, values)
class AutocompleteModelBase(AutocompleteModel, al.AutocompleteBase):
""" Drop-in replacement for AutocompleteModelBase """
pass
class AutocompleteModelTemplate(AutocompleteModel, al.AutocompleteTemplate):
""" Drop-in replacement for AutocompleteModelTemplate """
pass
|
py | 1a2f10ba8d33361465de1e4248a22b9c1099a771 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define statistical functions of a tensor
import numpy as np
from ..fluid.framework import Variable
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import core, in_dygraph_mode
from ..fluid import layers
from .search import where
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
from paddle import _C_ops
__all__ = []
def mean(x, axis=None, keepdim=False, name=None):
"""
Computes the mean of the input tensor's elements along ``axis``.
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform mean
calculations. ``axis`` should be int, list(int) or tuple(int). If
``axis`` is a list/tuple of dimension(s), mean is calculated along
all element(s) of ``axis`` . ``axis`` or element(s) of ``axis``
should be in range [-D, D), where D is the dimensions of ``x`` . If
``axis`` or element(s) of ``axis`` is less than 0, it works the
same way as :math:`axis + D` . If ``axis`` is None, mean is
calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of average along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.]],
[[13., 14., 15., 16.],
[17., 18., 19., 20.],
[21., 22., 23., 24.]]])
out1 = paddle.mean(x)
# [12.5]
out2 = paddle.mean(x, axis=-1)
# [[ 2.5 6.5 10.5]
# [14.5 18.5 22.5]]
out3 = paddle.mean(x, axis=-1, keepdim=True)
# [[[ 2.5]
# [ 6.5]
# [10.5]]
# [[14.5]
# [18.5]
# [22.5]]]
out4 = paddle.mean(x, axis=[0, 2])
# [ 8.5 12.5 16.5]
"""
if isinstance(axis, int):
axis = [axis]
reduce_all = True if axis is None \
or len(axis)==0 \
or len(axis) == len(x.shape) else False
if axis is None or len(axis) == 0:
axis = [0]
if in_dygraph_mode():
return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x/input', ['float32', 'float64'],
'mean/reduce_mean')
check_type(axis, 'axis/dim', (int, list, tuple), 'mean/reduce_mean')
if isinstance(axis, (list, tuple)):
for item in axis:
check_type(item, 'elements of axis/dim', (int), 'mean/reduce_mean')
helper = LayerHelper('mean', **locals())
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
return out
def var(x, axis=None, unbiased=True, keepdim=False, name=None):
"""
Computes the variance of ``x`` along ``axis`` .
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform
variance calculations. ``axis`` should be int, list(int) or
tuple(int). If ``axis`` is a list/tuple of dimension(s), variance
is calculated along all element(s) of ``axis`` . ``axis`` or
element(s) of ``axis`` should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is less
than 0, it works the same way as :math:`axis + D` . If ``axis`` is
None, variance is calculated over all elements of ``x``. Default
is None.
unbiased (bool, optional): Whether to use the unbiased estimation. If
``unbiased`` is True, the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along ``axis`` , otherwise the divisor is :math:`N`. Default is True.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of variance along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.var(x)
# [2.66666667]
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var')
u = mean(x, axis, True, name)
out = paddle.sum((x - u)**2, axis, keepdim=keepdim, name=name)
n = paddle.cast(paddle.numel(x), x.dtype) \
/ paddle.cast(paddle.numel(out), x.dtype)
if unbiased:
one_const = paddle.ones([1], x.dtype)
n = where(n > one_const, n - 1., one_const)
out /= n
return out
def std(x, axis=None, unbiased=True, keepdim=False, name=None):
"""
Computes the standard-deviation of ``x`` along ``axis`` .
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform
standard-deviation calculations. ``axis`` should be int, list(int)
or tuple(int). If ``axis`` is a list/tuple of dimension(s),
standard-deviation is calculated along all element(s) of ``axis`` .
``axis`` or element(s) of ``axis`` should be in range [-D, D),
where D is the dimensions of ``x`` . If ``axis`` or element(s) of
``axis`` is less than 0, it works the same way as :math:`axis + D` .
If ``axis`` is None, standard-deviation is calculated over all
elements of ``x``. Default is None.
unbiased (bool, optional): Whether to use the unbiased estimation. If
``unbiased`` is True, the standard-deviation is calculated via the
unbiased estimator. If ``unbiased`` is True, the divisor used in
the computation is :math:`N - 1`, where :math:`N` represents the
number of elements along ``axis`` , otherwise the divisor is
:math:`N`. Default is True.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of standard-deviation along ``axis`` of ``x``, with the
same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.std(x)
# [1.63299316]
out2 = paddle.std(x, axis=1)
# [1. 2.081666]
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std')
out = var(**locals())
return paddle.sqrt(out)
def numel(x, name=None):
"""
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1] in static mode
or a scalar value in imperative mode
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
Returns:
Tensor: The number of elements for the input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32')
numel = paddle.numel(x) # 140
"""
if in_dygraph_mode():
return _C_ops.size(x)
if not isinstance(x, Variable):
raise TypeError("x must be a Tensor in numel")
helper = LayerHelper('numel', **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out})
return out
def median(x, axis=None, keepdim=False, name=None):
"""
Compute the median along the specified axis.
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
axis (int, optional): The axis along which to perform median calculations ``axis`` should be int.
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
If ``axis`` is None, median is calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of median along ``axis`` of ``x``. If data type of ``x`` is float64, data type of results will be float64, otherwise data type will be float32.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12).reshape([3, 4])
# x is [[0 , 1 , 2 , 3 ],
# [4 , 5 , 6 , 7 ],
# [8 , 9 , 10, 11]]
y1 = paddle.median(x)
# y1 is [5.5]
y2 = paddle.median(x, axis=0)
# y2 is [4., 5., 6., 7.]
y3 = paddle.median(x, axis=1)
# y3 is [1.5, 5.5, 9.5]
y4 = paddle.median(x, axis=0, keepdim=True)
# y4 is [[4., 5., 6., 7.]]
"""
if not isinstance(x, Variable):
raise TypeError("In median, the input x should be a Tensor.")
is_flatten = axis is None
dims = len(x.shape)
if is_flatten:
x = paddle.flatten(x)
axis = 0
else:
if not isinstance(axis, int) or not (axis < dims and axis >= -dims):
raise ValueError(
"In median, axis should be none or an integer in range [-rank(x), rank(x))."
)
if axis < 0:
axis += dims
sz = x.shape[axis]
kth = sz >> 1
tensor_topk, idx = paddle.topk(x, kth + 1, axis=axis, largest=False)
dtype = 'float64' if x.dtype == core.VarDesc.VarType.FP64 else 'float32'
if sz & 1 == 0:
out_tensor = paddle.slice(
tensor_topk, axes=[axis], starts=[kth - 1],
ends=[kth]) + paddle.slice(
tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1])
out_tensor = paddle.cast(out_tensor, dtype=dtype) / 2
else:
out_tensor = paddle.cast(
paddle.slice(
tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1]),
dtype=dtype)
if not keepdim or is_flatten:
if not is_flatten:
newshape = x.shape[:axis] + x.shape[axis + 1:]
elif not keepdim:
newshape = [1]
else:
newshape = [1] * dims
else:
newshape = out_tensor.shape
out_tensor = out_tensor.reshape(newshape, name=name)
return out_tensor
|
py | 1a2f10fd6a45707e43e946a6846e81991cb09110 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class XiaobaiheItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
username = Field()
text = Field()
url = Field()
|
py | 1a2f11bfbdbd20d17ab6370d274d33ac02747150 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); |
py | 1a2f11e2e7df60a0c636e06ecccbea6464b55fb8 | """ Tests for the various cli programs """
from pyontutils.integration_test_helper import _TestCliBase, Folders
class TestCli(Folders, _TestCliBase):
commands = (
['googapis', '--help'],
['graphml-to-ttl', '--help'],
['necromancy', '--help'],
['ontload', '--help'],
['overlaps', '--help'],
['qnamefix', '--help'],
['scigraph-codegen', '--help'],
['scig', '--help'],
['ttlfmt', '--help'],
)
|
py | 1a2f11ff27e35d8d4438e0a97443a7435d568591 | #!/usr/bin/python
import math
import matplotlib.pyplot as plt
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.structures.factory import GraphFactory
from graphtheory.structures.points import Point
from graphtheory.forests.treeplot import TreePlot
from graphtheory.forests.treeplot import TreePlotRadiusAngle
V = 20
gf = GraphFactory(Graph)
G = gf.make_tree(V)
#G.show()
assert G.e() == V-1
algorithm = TreePlotRadiusAngle(G)
algorithm.run()
#print ( algorithm.point_dict ) # (radius, angle)
D = dict() # node ---> point on the plane
for node in algorithm.point_dict:
(radius, angle) = algorithm.point_dict[node]
D[node] = Point(radius * math.cos(angle), radius * math.sin(angle))
#print ( D )
for edge in G.iteredges():
x = [D[edge.source].x, D[edge.target].x]
y = [D[edge.source].y, D[edge.target].y]
plt.plot(x, y, 'k-') # black line
x = [D[node].x for node in G.iternodes()]
y = [D[node].y for node in G.iternodes()]
plt.plot(x, y, 'bo') # blue circle
plt.title("Random tree")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# EOF
|
py | 1a2f12775a7a02c804df13765fdebe726aaa3ea6 | import pandas as pd
import numpy as np
import joblib
import Levenshtein
import argparse
import ast
from scipy import stats
from src import nlp_preprocessing
def preprocess_txt(txt: str):
"""Executa preprocessamento textual padrão"""
cleaned_txt = nlp_preprocessing.clean_text(txt)
token_txt = nlp_preprocessing.custom_tokenizer(cleaned_txt)
return " ".join(token_txt)
def get_top_docs(query, cleaned_doc_list, doc_titles, get_titles=True):
"""Gera as recomedações a partir de uma query e listas de referência"""
cleaned_query = preprocess_txt(query)
dists = [Levenshtein.distance(cleaned_query, doc)
for doc in cleaned_doc_list]
mask = np.array(dists).argsort()[:10]
if get_titles:
return doc_titles.iloc[mask].tolist()
else:
return doc_titles.iloc[mask].index.tolist()
def load_data():
"""Carrega os dados"""
df = pd.concat([
pd.read_pickle("data/train_query.pickle"),
pd.read_pickle("data/test_query.pickle")
])
return df
def series_mode(serie: pd.Series):
"""Calcula a moda de uma série"""
return stats.mode(serie)[0][0]
def remove_duplicates(df, group="product_id",
num_cols=["price", "weight", "minimum_quantity"],
cat_cols=["title", "concatenated_tags"]) -> pd.DataFrame:
"""Função que remove os registros duplicados juntando os por média e moda
a depender dos tipos de coluna"""
mode_stats = {col: series_mode for col in cat_cols}
mean_stats = {col: "mean" for col in num_cols}
agg_stats = dict(**mode_stats, **mean_stats)
return df.groupby(group).agg(agg_stats)
def make_predictions(query, clf_model):
"""Função que realiza as recomendações com predição de categoria
majoritária"""
df = load_data()
prod_titles = (df[["product_id", "title"]].
drop_duplicates().set_index("product_id")["title"])
cleaned_prod_titles = [preprocess_txt(txt) for txt in prod_titles]
prod_id_select = get_top_docs(query,
cleaned_prod_titles,
prod_titles,
False)
selected_df = df.loc[df["product_id"].isin(prod_id_select)]
selected_df = remove_duplicates(selected_df)
predicted_cats = clf_model.predict(selected_df)
major_cat = stats.mode(predicted_cats)[0][0]
print(major_cat)
for _id, title in selected_df["title"].iteritems():
print(f"{_id} - {title}")
# função auxiliar do modelo de classificação
def select_txt(X: pd.DataFrame, col: str):
return X[col]
# função auxiliar do modelo de classificação
def select_base_features(X: pd.DataFrame):
return X[["price", "weight", "minimum_quantity"]]
def load_args() -> pd.DataFrame:
"""Função de carregamento de configurações.
Returns:
pd.DataFrame: resultado a ser categorizado.
"""
# criando os atributos que vão ser recebidos e parseando-os
parser = argparse.ArgumentParser()
parser.add_argument("-c",
"--category",
help="Texto de registro a ser categorizado",
type=str)
parser.add_argument("-r",
"--recommendation",
help="Sistema de recomendação de produtos",
type=str)
args = parser.parse_args()
# extraindo dos atributos recebidos o registro a ser categorizado
# e adequando-o para a predição
if args.category is not None:
product_dict = ast.literal_eval(args.category)
product_df = pd.Series(product_dict).to_frame().T
else:
product_df = None
return product_df, args.recommendation
def predict_single_category(df, clf_model):
product_category = clf_model.predict(df)[0]
print(product_category)
def main():
# carregando o modelo
rf_clf_pipeline = joblib.load("assets/category_rf_clf_pipeline.joblib")
# carregando o registro a ser categorizado
product_df, query = load_args()
# fazendo a previsão da categoria
if product_df is not None:
predict_single_category(product_df, rf_clf_pipeline)
if query is not None:
make_predictions(query, rf_clf_pipeline)
if __name__ == "__main__":
main() |
py | 1a2f129fc6e9152c4f93a72acd1cfc2e585a8c32 | import re
import os
import struct
import argparse
import collections
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from tensorflow.core.example import example_pb2
from utils import is_num
parser = argparse.ArgumentParser()
# Path
parser.add_argument('--dataset', type=str, choices=['restaurant', 'beer'], default='restaurant')
parser.add_argument('--vocab_fname', type=str, default='./data/vocab.txt')
args = parser.parse_args()
"""
Preprocessing script file
"""
def tokenize_sent(sent, lemtzr, stopword):
tokens = tokenize(sent.strip().lower())
tokens = re.sub(r'[^A-Za-z0-9]+',' ', ' '.join(tokens))
tokens = [tok for tok in tokens.split() if tok not in stopword]
tokens = [tok if not is_num(tok) else '<NUM>' for tok in tokens]
tokens = [lemtzr.lemmatize(tok) for tok in tokens]
return tokens
def tokenize_train_file(fname):
"""
Tokenize the raw train data(unlabeled).
"""
split_fname = fname.split('/')
new_fname = '/'.join([el if idx != len(split_fname) - 1 else 'parsed_' + el for idx, el in enumerate(split_fname)])
if os.path.exists(new_fname): return new_fname
with open(fname, 'r', encoding='utf8') as f:
ls = f.readlines()
parsed_data = []
lemtzr = WordNetLemmatizer()
stopword = stopwords.words('english')
for line in ls:
tokens = tokenize_sent(line, lemtzr, stopword)
parsed_data.append(tokens)
save_file(parsed_data, new_fname)
return new_fname
def tokenize_labeled_test_file(fname, label_fname):
"""
Tokenize the raw test data (labelled).
"""
split_fname = fname.split('/')
new_fname = '/'.join([el if idx != len(split_fname) - 1 else 'parsed_' + el for idx, el in enumerate(split_fname)])
label_map_fname = '/'.join([el if idx != len(split_fname) - 1 else 'label_map.txt' for idx, el in enumerate(split_fname)])
if os.path.exists(new_fname): return label_map_fname, new_fname
with open(fname, 'r', encoding='utf8') as f1, open(label_fname, 'r', encoding='utf8') as f2:
ls1, ls2 = f1.readlines(), f2.readlines()
parsed_data = []
lemtzr = WordNetLemmatizer()
stopword = stopwords.words('english')
for line in ls1:
tokens = tokenize_sent(line, lemtzr, stopword)
parsed_data.append(tokens)
assert len(ls1) == len(ls2) == len(parsed_data)
new_parsed, new_ls2 = [], []
for parsed, label in zip(parsed_data, ls2):
if 'Positive' in label or 'Neutral' in label:
continue
new_parsed.append(parsed)
new_ls2.append(label)
assert len(new_parsed) == len(new_ls2)
parsed_data, ls2 = new_parsed, new_ls2
label_text = list(set([tok for line in ls2 for tok in line.strip().split()]))
label_map = dict()
print("Label for this dataset with assigned index is as follows.")
for idx, label in enumerate(label_text):
print('{}: {}'.format(label, idx))
label_map[label] = idx
with open(label_map_fname, 'w') as f:
for key,val in label_map.items():
f.write("{} {} ||| ".format(key, val))
for idx, data in enumerate(parsed_data):
labels = ls2[idx].strip().split()
assert all([label in list(label_map.keys()) for label in labels])
parsed_data[idx].insert(0, '|||')
for label in labels:
parsed_data[idx].insert(0, str(label_map[label]))
save_file(parsed_data, new_fname)
return label_map_fname, new_fname
def build_vocab(parsed_train_fname, vocab_file, vocab_size=30000):
"""
Build vocab based on frequency of each word in train set.
Save vocab file and return vocab list.
"""
if os.path.exists(vocab_file):
with open(vocab_file, 'r', encoding='utf8') as f:
ls = f.readlines()
assert len(ls) == vocab_size
vocab = [line.strip() for line in ls]
return vocab
with open(parsed_train_fname, 'r', encoding='utf8') as f:
ls = f.readlines()
tokens = [tok for line in ls for tok in line.strip().split()]
counts = dict(collections.Counter(tokens))
import operator
vocab = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)
print("TOTAL VOCAB SIZE: {}".format(len(vocab)))
for idx,tok in enumerate(vocab):
if tok[1] <= 10:
print("WORDS MORE THAN 10: {}".format(idx))
break
vocab = [tok[0] for tok in vocab][:idx]
vocab.append('<UNK>')
vocab.append('<PAD>')
assert all([isinstance(tok, str) for tok in vocab])
with open(vocab_file, 'w') as f:
f.write('\n'.join([tok for tok in vocab]))
return vocab
def save_file(data, new_fname):
"""
Change the "raw_fname" into parsed fname, then save "data" into parsed fname.
"""
assert isinstance(data, list)
with open(new_fname, 'w') as f: f.write('\n'.join([" ".join(one_sample) for one_sample in data]))
def tokenize(sent):
assert isinstance(sent, str)
return word_tokenize(sent)
def make_binary_dataset(fname, is_label=False):
"""
Make a binary data file for learning.
"""
binary_fname = fname.replace('.txt', '.bin')
if os.path.exists(binary_fname): return
with open(fname, 'r', encoding='utf8') as f:
ls = f.readlines()
data = [line.strip() for line in ls]
assert all(['|||' in dat for dat in data]) if is_label else all(['|||' not in dat for dat in data])
with open(binary_fname, 'wb') as f:
for line in data:
if is_label:
split_line = line.split('|||')
assert len(split_line) == 2
label, text = split_line[0].strip(), split_line[1].strip()
else:
text = line
example = example_pb2.Example()
example.features.feature['text'].bytes_list.value.extend([text.encode()])
if is_label:
example.features.feature['label'].bytes_list.value.extend([label.encode()])
example_str = example.SerializeToString()
str_len = len(example_str)
f.write(struct.pack('q', str_len))
f.write(struct.pack('%ds' % str_len, example_str))
return
def main():
train_fname = './data/datasets/{}/train.txt'.format(args.dataset)
test_fname = './data/datasets/{}/test.txt'.format(args.dataset)
test_label_fname = './data/datasets/{}/test_label.txt'.format(args.dataset)
vocab_fname = './data/vocab.txt'
vocab_size = args.vocab_size
parsed_train_fname = tokenize_train_file(train_fname)
label_map, parsed_test_fname = tokenize_labeled_test_file(test_fname, test_label_fname)
build_vocab(parsed_train_fname, vocab_fname, vocab_size=vocab_size)
make_binary_dataset(parsed_train_fname, False)
make_binary_dataset(parsed_test_fname, True)
if __name__ == '__main__':
main() |
py | 1a2f13906d5d70ee00a1c331f3fdf608d4b11f45 | from pykit import logutil
print logutil.get_root_log_fn()
|
py | 1a2f14470add748350681a749085c0b0bb7f4168 | #!/usr/bin/env python3
from os import environ
from curio import Channel, run
syncword = environ.get('RRIDBOT_SYNC')
chan = ('localhost', 12345)
async def consumer():
ch = Channel(chan)
c = await ch.accept(authkey=syncword.encode())
myset = set()
while True:
try:
msg = await c.recv()
except (EOFError, ConnectionResetError) as e: # in the event that the client closes
print('resetting')
myset = set()
c = await ch.accept(authkey=syncword.encode())
continue
if msg is None: # explicit reset
myset = set()
else:
op, uri = msg.split(' ', 1)
print(op, uri)
if op == 'add':
if uri in myset:
await c.send(True)
else:
myset.add(uri)
await c.send(False)
elif op == 'del':
myset.discard(uri)
await c.send(False)
else:
await c.send('ERROR')
print(myset)
if __name__ == '__main__':
run(consumer)
|
py | 1a2f1469fdb17a381028170642ef12b2ee49d969 | from setuptools import setup
setup(
name = 'PyXiaomiGateway',
packages = ['xiaomi_gateway'],
install_requires=['cryptography>=2.1.1'],
version = '0.11.1',
description = 'A library to communicate with the Xiaomi Gateway',
author='Daniel Hoyer Iversen',
url='https://github.com/Danielhiversen/PyXiaomiGateway/',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Home Automation',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
py | 1a2f146b723483bb72f845343fd511e3a7fafc90 | from nose.tools import * # noqa
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory, BookmarkCollectionFactory
from scripts.analytics.addon_snapshot import AddonSnapshot
from website.models import Node
from framework.auth.core import User
from website.settings import ADDONS_AVAILABLE
from addons.github.tests.factories import GitHubAccountFactory
from addons.github.model import GitHubNodeSettings, GitHubUserSettings
from addons.googledrive.tests.factories import GoogleDriveAccountFactory
from addons.googledrive.model import GoogleDriveNodeSettings, GoogleDriveUserSettings
class TestAddonCount(OsfTestCase):
def setUp(self):
super(TestAddonCount, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.external_account = GitHubAccountFactory(display_name='hmoco1')
self.user_settings = self.user.get_or_add_addon('github')
self.user_settings.save()
self.user.external_accounts.append(self.external_account)
self.user.save()
self.node.add_addon('github', Auth(self.user))
self.node_addon = self.node.get_addon('github')
self.node_addon.user = self.user.fullname
self.node_addon.repo = '29 #Strafford APTS'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.external_account
self.node_addon.save()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
def tearDown(self):
GitHubNodeSettings.remove()
GitHubUserSettings.remove()
GoogleDriveNodeSettings.remove()
GoogleDriveUserSettings.remove()
def test_run_for_all_addon(self):
results = AddonSnapshot().get_events()
names = [res['provider']['name'] for res in results]
for addon in ADDONS_AVAILABLE:
assert_in(addon.short_name, names)
def test_one_user_one_node_one_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(github_res['nodes']['total'], 1)
def test_one_user_one_node_one_addon_one_node_linked(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(github_res['nodes']['total'], 1)
def test_one_user_with_multiple_githubs(self):
oauth_settings2 = GitHubAccountFactory(display_name='hmoco2')
oauth_settings2.save()
self.user.external_accounts.append(oauth_settings2)
self.user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 1)
def test_one_user_with_multiple_addons(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(googledrive_res['users']['enabled'], 0)
self.user.add_addon('googledrive')
oauth_settings = GoogleDriveAccountFactory()
oauth_settings.save()
self.user.external_accounts.append(oauth_settings)
self.user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(googledrive_res['users']['enabled'], 1)
def test_many_users_each_with_a_different_github(self):
user = AuthUserFactory()
user.add_addon('github')
oauth_settings2 = GitHubAccountFactory(display_name='hmoco2')
oauth_settings2.save()
user.external_accounts.append(oauth_settings2)
user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 2)
assert_equal(github_res['users']['authorized'], 1)
assert_equal(github_res['users']['linked'], 1)
def test_many_users_each_with_the_same_github_enabled(self):
user = AuthUserFactory()
user.add_addon('github')
user.external_accounts.append(self.external_account)
user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 2)
def test_github_enabled_not_linked_or_authorized(self):
user = AuthUserFactory()
user.add_addon('github')
user.external_accounts.append(self.external_account)
user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 2)
assert_equal(github_res['users']['authorized'], 1)
assert_equal(github_res['users']['linked'], 1)
def test_one_node_with_multiple_addons(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['nodes']['total'], 1)
assert_equal(googledrive_res['nodes']['total'], 0)
self.user.add_addon('googledrive')
user_addon = self.user.get_addon('googledrive')
oauth_settings = GoogleDriveAccountFactory()
oauth_settings.save()
self.user.external_accounts.append(oauth_settings)
self.user.save()
self.node.add_addon('googledrive', Auth(self.user))
node_addon = self.node.get_addon('googledrive')
node_addon.user = self.user.fullname
node_addon.user_settings = user_addon
node_addon.external_account = oauth_settings
node_addon.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['nodes']['total'], 1)
assert_equal(googledrive_res['nodes']['total'], 1)
def test_many_nodes_with_one_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['total'], 1)
node = ProjectFactory(creator=self.user)
node.add_addon('github', Auth(self.user))
node_addon = node.get_addon('github')
node_addon.user = self.user.fullname
node_addon.repo = '8 (circle)'
node_addon.user_settings = self.user_addon
node_addon.external_account = self.external_account
node_addon.save()
node.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['total'], 2)
def test_node_count_deleted_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['deleted'], 0)
node = ProjectFactory(creator=self.user)
node.add_addon('github', Auth(self.user))
node_addon = node.get_addon('github')
node_addon.delete()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['deleted'], 1)
def test_node_count_disconected_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['disconnected'], 0)
node = ProjectFactory(creator=self.user)
node.add_addon('github', Auth(self.user))
node_addon = node.get_addon('github')
node_addon.external_account = None
node_addon.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['disconnected'], 1)
def test_all_users_have_wiki_osfstorage_enabled(self):
all_user_count = User.find().count()
results = AddonSnapshot().get_events()
osfstorage_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]
wiki_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]
assert_equal(osfstorage_res['users']['enabled'], all_user_count)
assert_equal(wiki_res['users']['enabled'], all_user_count)
def test_wiki_deleted_shows_as_deleted(self):
node = ProjectFactory(creator=self.user)
node.delete_addon('wiki', auth=Auth(self.user))
results = AddonSnapshot().get_events()
wiki_res = [res for res in results if res['provider']['name'] == 'wiki'][0]
assert_equal(wiki_res['nodes']['deleted'], 1)
def test_node_settings_has_no_owner_not_connected(self):
self.node_addon.owner = None
self.node_addon.save()
results = AddonSnapshot().get_events()
storage_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(storage_res['nodes']['connected'], 0)
def test_bookmark_collection_not_counted(self):
BookmarkCollectionFactory(creator=self.user)
all_node_count = Node.find().count()
results = AddonSnapshot().get_events()
storage_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]
assert_equal(storage_res['nodes']['connected'], all_node_count - 1)
|
py | 1a2f168617074cd9c152689e717fcdbd2d133936 | import aiohttp_csrf
import pytest
from aiohttp import web
SESSION_NAME = COOKIE_NAME = 'csrf_token'
FORM_FIELD_NAME = HEADER_NAME = 'X-CSRF-TOKEN'
@pytest.yield_fixture
def init_app():
def go(
loop,
policy,
storage,
handlers,
error_renderer=None,
):
app = web.Application()
kwargs = {
'policy': policy,
'storage': storage,
}
if error_renderer is not None:
kwargs['error_renderer'] = error_renderer
aiohttp_csrf.setup(app, **kwargs)
for method, url, handler in handlers:
app.router.add_route(
method,
url,
handler,
)
return app
yield go
@pytest.fixture(params=[
(aiohttp_csrf.policy.FormPolicy, (FORM_FIELD_NAME,)),
(aiohttp_csrf.policy.FormAndHeaderPolicy, (HEADER_NAME, FORM_FIELD_NAME)),
])
def csrf_form_policy(request):
_class, args = request.param
return _class(*args)
@pytest.fixture(params=[
(aiohttp_csrf.policy.HeaderPolicy, (HEADER_NAME,)),
(aiohttp_csrf.policy.FormAndHeaderPolicy, (HEADER_NAME, FORM_FIELD_NAME)),
])
def csrf_header_policy(request):
_class, args = request.param
return _class(*args)
@pytest.fixture(params=[
(aiohttp_csrf.storage.SessionStorage, (SESSION_NAME,)),
(aiohttp_csrf.storage.CookieStorage, (COOKIE_NAME,)),
])
def csrf_storage(request):
_class, args = request.param
return _class(*args)
|
py | 1a2f19cc0f4e0c144822e2b2571cdbfe170f1ec3 | #!/home/access/Downloads/Desktop/Django_IPs/week4_IP/neighbourhood/virtual/bin/python3.8
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
py | 1a2f1a7039bf078ca8ad698605d42a9ba5d862d2 | """
sphinxcontrib.openapi.openapi30
-------------------------------
The OpenAPI 3.0.0 spec renderer. Based on ``sphinxcontrib-httpdomain``.
:copyright: (c) 2016, Ihor Kalnytskyi.
:license: BSD, see LICENSE for details.
"""
import copy
import collections
import collections.abc
from datetime import datetime
import itertools
import json
import re
from urllib import parse
from http.client import responses as http_status_codes
from sphinx.util import logging
from sphinxcontrib.openapi import utils
LOG = logging.getLogger(__name__)
# https://github.com/OAI/OpenAPI-Specification/blob/3.0.2/versions/3.0.0.md#data-types
_TYPE_MAPPING = {
('integer', 'int32'): 1, # integer
('integer', 'int64'): 1, # long
('number', 'float'): 1.0, # float
('number', 'double'): 1.0, # double
('boolean', None): True, # boolean
('string', None): 'string', # string
('string', 'byte'): 'c3RyaW5n', # b'string' encoded in base64, # byte
('string', 'binary'): '01010101', # binary
('string', 'date'): datetime.now().date().isoformat(), # date
('string', 'date-time'): datetime.now().isoformat(), # dateTime
('string', 'password'): '********', # password
# custom extensions to handle common formats
('string', 'email'): '[email protected]',
('string', 'zip-code'): '90210',
('string', 'uri'): 'https://example.com',
# additional fallthrough cases
('integer', None): 1, # integer
('number', None): 1.0, # <fallthrough>
}
_READONLY_PROPERTY = object() # sentinel for values not included in requests
def _dict_merge(dct, merge_dct):
"""Recursive dict merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Arguments:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
"""
for k in merge_dct.keys():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.abc.Mapping)):
_dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def _parse_schema(schema, method):
"""
Convert a Schema Object to a Python object.
Args:
schema: An ``OrderedDict`` representing the schema object.
"""
if method and schema.get('readOnly', False):
return _READONLY_PROPERTY
# allOf: Must be valid against all of the subschemas
if 'allOf' in schema:
schema_ = copy.deepcopy(schema['allOf'][0])
for x in schema['allOf'][1:]:
_dict_merge(schema_, x)
return _parse_schema(schema_, method)
# anyOf: Must be valid against any of the subschemas
# TODO(stephenfin): Handle anyOf
# oneOf: Must be valid against exactly one of the subschemas
if 'oneOf' in schema:
# we only show the first one since we can't show everything
return _parse_schema(schema['oneOf'][0], method)
if 'enum' in schema:
# we only show the first one since we can't show everything
return schema['enum'][0]
schema_type = schema.get('type', 'object')
if schema_type == 'array':
# special case oneOf and anyOf so that we can show examples for all
# possible combinations
if 'oneOf' in schema['items']:
return [
_parse_schema(x, method) for x in schema['items']['oneOf']
]
if 'anyOf' in schema['items']:
return [
_parse_schema(x, method) for x in schema['items']['anyOf']
]
return [_parse_schema(schema['items'], method)]
if schema_type == 'object':
if method and 'properties' in schema and \
all(v.get('readOnly', False)
for v in schema['properties'].values()):
return _READONLY_PROPERTY
results = []
for name, prop in schema.get('properties', {}).items():
result = _parse_schema(prop, method)
if result != _READONLY_PROPERTY:
results.append((name, result))
return collections.OrderedDict(results)
if (schema_type, schema.get('format')) in _TYPE_MAPPING:
return _TYPE_MAPPING[(schema_type, schema.get('format'))]
return _TYPE_MAPPING[(schema_type, None)] # unrecognized format
def _example(media_type_objects, method=None, endpoint=None, status=None,
nb_indent=0):
"""
Format examples in `Media Type Object` openapi v3 to HTTP request or
HTTP response example.
If method and endpoint is provided, this function prints a request example
else status should be provided to print a response example.
Arguments:
media_type_objects (Dict[str, Dict]): Dict containing
Media Type Objects.
method: The HTTP method to use in example.
endpoint: The HTTP route to use in example.
status: The HTTP status to use in example.
"""
indent = ' '
extra_indent = indent * nb_indent
if method is not None:
method = method.upper()
else:
try:
# one of possible values for status might be 'default'.
# in the case, just fallback to '-'
status_text = http_status_codes[int(status)]
except (ValueError, KeyError):
status_text = '-'
# Provide request samples for GET requests
if method == 'GET':
media_type_objects[''] = {
'examples': {'Example request': {'value': ''}}}
for content_type, content in media_type_objects.items():
examples = content.get('examples')
example = content.get('example')
# Try to get the example from the schema
if example is None and 'schema' in content:
example = content['schema'].get('example')
if examples is None:
examples = {}
if not example:
if re.match(r"application/[a-zA-Z\+]*json", content_type) is \
None:
LOG.info('skipping non-JSON example generation.')
continue
example = _parse_schema(content['schema'], method=method)
if method is None:
examples['Example response'] = {
'value': example,
}
else:
examples['Example request'] = {
'value': example,
}
for example in examples.values():
# According to OpenAPI v3 specs, string examples should be left unchanged
if not isinstance(example['value'], str):
example['value'] = json.dumps(
example['value'], indent=4, separators=(',', ': '))
for example_name, example in examples.items():
if 'summary' in example:
example_title = '{example_name} - {example[summary]}'.format(
**locals())
else:
example_title = example_name
yield ''
yield '{extra_indent}**{example_title}:**'.format(**locals())
yield ''
yield '{extra_indent}.. sourcecode:: http'.format(**locals())
yield ''
# Print http request example
if method:
yield '{extra_indent}{indent}{method} {endpoint} HTTP/1.1' \
.format(**locals())
yield '{extra_indent}{indent}Host: example.com' \
.format(**locals())
if content_type:
yield '{extra_indent}{indent}Content-Type: {content_type}'\
.format(**locals())
# Print http response example
else:
yield '{extra_indent}{indent}HTTP/1.1 {status} {status_text}' \
.format(**locals())
yield '{extra_indent}{indent}Content-Type: {content_type}' \
.format(**locals())
yield ''
for example_line in example['value'].splitlines():
yield '{extra_indent}{indent}{example_line}'.format(**locals())
if example['value'].splitlines():
yield ''
def convert_json_schema(schema, directive=':<json'):
"""
Convert json schema to `:<json` sphinx httpdomain.
"""
output = []
def _convert(schema, name='', required=False):
"""
Fill the output list, with 2-tuple (name, template)
i.e: ('user.age', 'str user.age: the age of user')
This allow to sort output by field name
"""
type_ = schema.get('type', 'any')
required_properties = schema.get('required', ())
if type_ == 'object' and schema.get('properties'):
for prop, next_schema in schema.get('properties', {}).items():
_convert(
next_schema, '{name}.{prop}'.format(**locals()),
(prop in required_properties))
elif type_ == 'array':
_convert(schema['items'], name + '[]')
else:
if name:
name = name.lstrip('.')
constraints = []
if required:
constraints.append('required')
if schema.get('readOnly', False):
constraints.append('read only')
if constraints:
constraints = '({})'.format(', '.join(constraints))
else:
constraints = ''
if schema.get('description', ''):
if constraints:
output.append((
name,
'{type_} {name}:'
' {schema[description]}'
' {constraints}'.format(**locals())))
else:
output.append((
name,
'{type_} {name}:'
' {schema[description]}'.format(**locals())))
else:
if constraints:
output.append(
(name,
'{type_} {name}:'
' {constraints}'.format(**locals())))
else:
output.append(
(name,
'{type_} {name}:'.format(**locals())))
_convert(schema)
for _, render in sorted(output):
yield '{} {}'.format(directive, render)
def _httpresource(endpoint, method, properties, convert, render_examples,
render_request):
# https://github.com/OAI/OpenAPI-Specification/blob/3.0.2/versions/3.0.0.md#operation-object
parameters = properties.get('parameters', [])
responses = properties['responses']
query_param_examples = []
indent = ' '
yield '.. http:{0}:: {1}'.format(method, endpoint)
yield ' :synopsis: {0}'.format(properties.get('summary', 'null'))
yield ''
if 'summary' in properties:
for line in properties['summary'].splitlines():
yield '{indent}**{line}**'.format(**locals())
yield ''
if 'description' in properties:
for line in convert(properties['description']).splitlines():
yield '{indent}{line}'.format(**locals())
yield ''
# print request's path params
for param in filter(lambda p: p['in'] == 'path', parameters):
yield indent + ':param {type} {name}:'.format(
type=param['schema']['type'],
name=param['name'])
for line in convert(param.get('description', '')).splitlines():
yield '{indent}{indent}{line}'.format(**locals())
# print request's query params
for param in filter(lambda p: p['in'] == 'query', parameters):
yield indent + ':query {type} {name}:'.format(
type=param['schema']['type'],
name=param['name'])
for line in convert(param.get('description', '')).splitlines():
yield '{indent}{indent}{line}'.format(**locals())
if param.get('required', False):
yield '{indent}{indent}(Required)'.format(**locals())
example = _parse_schema(param['schema'], method)
example = param.get('example', example)
if param.get('explode', False) and isinstance(example, list):
for v in example:
query_param_examples.append((param['name'], v))
elif param.get('explode', False) and isinstance(example, dict):
for k, v in example.items():
query_param_examples.append((k, v))
else:
query_param_examples.append((param['name'], example))
# print request content
if render_request:
request_content = properties.get('requestBody', {}).get('content', {})
if request_content and 'application/json' in request_content:
schema = request_content['application/json']['schema']
yield ''
for line in convert_json_schema(schema):
yield '{indent}{line}'.format(**locals())
yield ''
req_properties = json.dumps(schema.get('properties', {}), indent=2,
separators=(',', ':'))
yield '{indent}**Request body:**'.format(**locals())
yield ''
yield '{indent}.. sourcecode:: json'.format(**locals())
yield ''
for line in req_properties.splitlines():
# yield indent + line
yield '{indent}{indent}{line}'.format(**locals())
# yield ''
# print request example
if render_examples:
endpoint_examples = endpoint
if query_param_examples:
endpoint_examples = endpoint + "?" + \
parse.urlencode(query_param_examples)
# print request example
request_content = properties.get('requestBody', {}).get('content', {})
for line in _example(
request_content,
method,
endpoint=endpoint_examples,
nb_indent=1):
yield line
# print response status codes
for status, response in responses.items():
yield '{indent}:status {status}:'.format(**locals())
for line in convert(response['description']).splitlines():
yield '{indent}{indent}{line}'.format(**locals())
# print response example
if render_examples:
for line in _example(
response.get('content', {}), status=status, nb_indent=2):
yield line
# print request header params
for param in filter(lambda p: p['in'] == 'header', parameters):
yield indent + ':reqheader {name}:'.format(**param)
for line in convert(param.get('description', '')).splitlines():
yield '{indent}{indent}{line}'.format(**locals())
if param.get('required', False):
yield '{indent}{indent}(Required)'.format(**locals())
# print response headers
for status, response in responses.items():
for headername, header in response.get('headers', {}).items():
yield indent + ':resheader {name}:'.format(name=headername)
for line in convert(header['description']).splitlines():
yield '{indent}{indent}{line}'.format(**locals())
for cb_name, cb_specs in properties.get('callbacks', {}).items():
yield ''
yield indent + '.. admonition:: Callback: ' + cb_name
yield ''
for cb_endpoint in cb_specs.keys():
for cb_method, cb_properties in cb_specs[cb_endpoint].items():
for line in _httpresource(
cb_endpoint,
cb_method,
cb_properties,
convert=convert,
render_examples=render_examples,
render_request=render_request):
if line:
yield indent+indent+line
else:
yield ''
yield ''
def _header(title):
yield title
yield '=' * len(title)
yield ''
def openapihttpdomain(spec, **options):
generators = []
# OpenAPI spec may contain JSON references, common properties, etc.
# Trying to render the spec "As Is" will require to put multiple
# if-s around the code. In order to simplify flow, let's make the
# spec to have only one (expected) schema, i.e. normalize it.
utils.normalize_spec(spec, **options)
# Paths list to be processed
paths = []
# If 'paths' are passed we've got to ensure they exist within an OpenAPI
# spec; otherwise raise error and ask user to fix that.
if 'paths' in options:
if not set(options['paths']).issubset(spec['paths']):
raise ValueError(
'One or more paths are not defined in the spec: %s.' % (
', '.join(set(options['paths']) - set(spec['paths'])),
)
)
paths = options['paths']
# Check against regular expressions to be included
if 'include' in options:
for i in options['include']:
ir = re.compile(i)
for path in spec['paths']:
if ir.match(path):
paths.append(path)
# If no include nor paths option, then take full path
if 'include' not in options and 'paths' not in options:
paths = spec['paths']
# Remove paths matching regexp
if 'exclude' in options:
_paths = []
for e in options['exclude']:
er = re.compile(e)
for path in paths:
if not er.match(path):
_paths.append(path)
paths = _paths
render_request = False
if 'request' in options:
render_request = True
convert = utils.get_text_converter(options)
# https://github.com/OAI/OpenAPI-Specification/blob/3.0.2/versions/3.0.0.md#paths-object
if 'group' in options:
groups = collections.OrderedDict(
[(x['name'], []) for x in spec.get('tags', {})]
)
for endpoint in paths:
for method, properties in spec['paths'][endpoint].items():
key = properties.get('tags', [''])[0]
groups.setdefault(key, []).append(_httpresource(
endpoint,
method,
properties,
convert,
render_examples='examples' in options,
render_request=render_request))
for key in groups.keys():
if key:
generators.append(_header(key))
else:
generators.append(_header('default'))
generators.extend(groups[key])
else:
for endpoint in paths:
for method, properties in spec['paths'][endpoint].items():
generators.append(_httpresource(
endpoint,
method,
properties,
convert,
render_examples='examples' in options,
render_request=render_request))
return iter(itertools.chain(*generators))
|
py | 1a2f1ae20a49098d260527b2657c7419d419c579 |
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_email(name,receiver):
# Creating message subject and sender
subject = 'Welcome to Our Neighbourhood'
sender = '[email protected]'
#passing in the context vairables
text_content = render_to_string('email/notification.txt',{"name": name})
html_content = render_to_string('email/notification.html',{"name": name})
msg = EmailMultiAlternatives(subject,text_content,sender,[receiver])
msg.attach_alternative(html_content,'text/html')
msg.send()
|
py | 1a2f1b41bca226cb4c68047e44cadac023211df9 | """Project: Eskapade - A python-based package for data analysis.
Module: spark_analysis.data_conversion
Created: 2017/05/30
Description:
Converters between Spark, Pandas, and Python data formats
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
import uuid
import pyspark
from eskapadespark.helpers import apply_transform_funcs
from eskapade.logger import Logger
SPARK_SQL_TYPES = pyspark.sql.types._type_mappings
logger = Logger()
def create_spark_df(spark, data, schema=None, process_methods=None, **kwargs):
"""Create a Spark data frame from data in a different format.
A Spark data frame is created with either a specified schema or a schema
inferred from the input data. The schema can be specified with the
keyword argument "schema".
Functions to transform the data frame after creation can be specified by
the keyword argument "process_methods". The value of this argument is
an iterable of (function, arguments, keyword arguments) tuples to apply.
The data frame is created with the createDataFrame function of the
SparkSession. Remaining keyword arguments are passed to this function.
>>> spark = pyspark.sql.SparkSession.builder.getOrCreate()
>>> df = create_spark_df(spark,
>>> [[1, 1.1, 'one'], [2, 2.2, 'two']],
>>> schema=['int', 'float', 'str'],
>>> process_methods=[('repartition', (), {'numPartitions': 6})])
>>> df.show()
+---+-----+---+
|int|float|str|
+---+-----+---+
| 2| 2.2|two|
| 1| 1.1|one|
+---+-----+---+
:param pyspark.sql.SparkSession spark: SparkSession instance
:param data: input dataset
:param schema: schema of created data frame
:param iterable process_methods: methods to apply on the data frame after creation
:returns: created data frame
:rtype: pyspark.sql.DataFrame
"""
# check if data-frame schema was provided
if isinstance(schema, int):
# infer schema from a single row (prevents Spark >= 1.6.1 from checking schema of all rows)
def get_row(data, ind):
"""Get row."""
try:
return data.iloc[ind].tolist()
except AttributeError:
pass
try:
row = data.first()
if ind > 0:
logger.warning('Inferring data-frame schema from first row, instead of row with index {i:d}', i=ind)
return row
except AttributeError:
pass
try:
return data[ind]
except TypeError:
raise TypeError('Unable to get row from data of type "{!s}" to infer schema.'.format(type(data)))
row = get_row(data, schema)
def to_python_type(var):
"""Get item."""
try:
return var.item()
except AttributeError:
return var
schema = pyspark.sql.types._infer_schema(tuple(to_python_type(it) for it in row))
try:
for t, n in zip(schema.fields, data.columns):
t.name = str(n)
except AttributeError:
pass
elif isinstance(schema, dict):
# create schema from dictionary of (name, data type) pairs
schema = df_schema(schema)
kwargs['schema'] = schema
# check if input is a data frame
if isinstance(data, pyspark.sql.DataFrame):
if not kwargs['schema']:
kwargs['schema'] = data.schema
data = data.rdd
# create and transform data frame
df = spark.createDataFrame(data, **kwargs)
if process_methods:
df = apply_transform_funcs(df, process_methods)
return df
def df_schema(schema_spec):
"""Create Spark data-frame schema.
Create a schema for a Spark data frame from a dictionary of (name, data
type) pairs, describing the columns. Data types are specified by Python
types or by Spark-SQL types from the pyspark.sql.types module.
>>> from collections import OrderedDict as odict
>>> schema_dict = odict()
>>> schema_dict['foo'] = pyspark.sql.types.IntegerType()
>>> schema_dict['bar'] = odict([('descr', str), ('val', float)])
>>> print(schema_dict)
OrderedDict([('foo', IntegerType), ('bar', OrderedDict([('descr', <class 'str'>), ('val', <class 'float'>)]))])
>>> spark = pyspark.sql.SparkSession.builder.getOrCreate()
>>> df = spark.createDataFrame([(1, ('one', 1.1)), (2, ('two', 2.2))], schema=df_schema(schema_dict))
>>> df.show()
+---+---------+
|foo| bar|
+---+---------+
| 1|[one,1.1]|
| 2|[two,2.2]|
+---+---------+
:param dict schema_spec: schema specification
:returns: data-frame schema
:rtype: pyspark.sql.types.StructType
:raises: TypeError if data type is specified incorrectly
"""
def get_field(name, data_type):
"""Return a struct field for specified data type."""
# treat dictionaries as struct types
if isinstance(data_type, dict):
data_type = pyspark.sql.types.StructType([get_field(*spec) for spec in data_type.items()])
# convert Python types to Spark-SQL types
data_type = SPARK_SQL_TYPES.get(data_type, data_type)
# convert Spark-SQL type classes to Spark-SQL types
if isinstance(data_type, type) and issubclass(data_type, pyspark.sql.types.DataType):
data_type = data_type()
# check and return data type
if not isinstance(data_type, pyspark.sql.types.DataType):
raise TypeError('Type specifications for data-frame schemas must be DataTypes or dictionaries')
return pyspark.sql.types.StructField(str(name), data_type)
# return a struct type with a list of struct fields for specified data types
return pyspark.sql.types.StructType([get_field(*spec) for spec in schema_spec.items()])
def hive_table_from_df(spark, df, db, table):
"""Create a Hive table from a Spark data frame.
:param pyspark.sql.SparkSession spark: SparkSession instance
:param pyspark.sql.DataFrame df: input data frame
:param str db: database for table
:param str table: name of table
"""
# register temporary table
temp_name = '{0:s}_{1:s}'.format(table, uuid.uuid4().hex)
df.createOrReplaceTempView(temp_name)
# create table
table_spec = '.'.join(s for s in (db, table) if s)
create_table_query = 'CREATE TABLE {spec} AS SELECT {cols} FROM {name}'\
.format(name=temp_name, spec=table_spec, cols=', '.join(c for c in df.columns))
logger.debug(create_table_query)
spark.sql(create_table_query)
|
py | 1a2f1ba9fc07fd45ad77d1130f313f8ae5c254b2 | from sympy.combinatorics import Permutation as Perm
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.core import Basic, Tuple
from sympy.core.compatibility import as_int
from sympy.sets import FiniteSet
from sympy.utilities.iterables import (minlex, unflatten, flatten)
rmul = Perm.rmul
class Polyhedron(Basic):
"""
Represents the polyhedral symmetry group (PSG).
Explanation
===========
The PSG is one of the symmetry groups of the Platonic solids.
There are three polyhedral groups: the tetrahedral group
of order 12, the octahedral group of order 24, and the
icosahedral group of order 60.
All doctests have been given in the docstring of the
constructor of the object.
References
==========
.. [1] http://mathworld.wolfram.com/PolyhedralGroup.html
"""
_edges = None
def __new__(cls, corners, faces=[], pgroup=[]):
"""
The constructor of the Polyhedron group object.
Explanation
===========
It takes up to three parameters: the corners, faces, and
allowed transformations.
The corners/vertices are entered as a list of arbitrary
expressions that are used to identify each vertex.
The faces are entered as a list of tuples of indices; a tuple
of indices identifies the vertices which define the face. They
should be entered in a cw or ccw order; they will be standardized
by reversal and rotation to be give the lowest lexical ordering.
If no faces are given then no edges will be computed.
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> Polyhedron(list('abc'), [(1, 2, 0)]).faces
FiniteSet((0, 1, 2))
>>> Polyhedron(list('abc'), [(1, 0, 2)]).faces
FiniteSet((0, 1, 2))
The allowed transformations are entered as allowable permutations
of the vertices for the polyhedron. Instance of Permutations
(as with faces) should refer to the supplied vertices by index.
These permutation are stored as a PermutationGroup.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.interactive import init_printing
>>> from sympy.abc import w, x, y, z
>>> init_printing(pretty_print=False, perm_cyclic=False)
Here we construct the Polyhedron object for a tetrahedron.
>>> corners = [w, x, y, z]
>>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)]
Next, allowed transformations of the polyhedron must be given. This
is given as permutations of vertices.
Although the vertices of a tetrahedron can be numbered in 24 (4!)
different ways, there are only 12 different orientations for a
physical tetrahedron. The following permutations, applied once or
twice, will generate all 12 of the orientations. (The identity
permutation, Permutation(range(4)), is not included since it does
not change the orientation of the vertices.)
>>> pgroup = [Permutation([[0, 1, 2], [3]]), \
Permutation([[0, 1, 3], [2]]), \
Permutation([[0, 2, 3], [1]]), \
Permutation([[1, 2, 3], [0]]), \
Permutation([[0, 1], [2, 3]]), \
Permutation([[0, 2], [1, 3]]), \
Permutation([[0, 3], [1, 2]])]
The Polyhedron is now constructed and demonstrated:
>>> tetra = Polyhedron(corners, faces, pgroup)
>>> tetra.size
4
>>> tetra.edges
FiniteSet((0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3))
>>> tetra.corners
(w, x, y, z)
It can be rotated with an arbitrary permutation of vertices, e.g.
the following permutation is not in the pgroup:
>>> tetra.rotate(Permutation([0, 1, 3, 2]))
>>> tetra.corners
(w, x, z, y)
An allowed permutation of the vertices can be constructed by
repeatedly applying permutations from the pgroup to the vertices.
Here is a demonstration that applying p and p**2 for every p in
pgroup generates all the orientations of a tetrahedron and no others:
>>> all = ( (w, x, y, z), \
(x, y, w, z), \
(y, w, x, z), \
(w, z, x, y), \
(z, w, y, x), \
(w, y, z, x), \
(y, z, w, x), \
(x, z, y, w), \
(z, y, x, w), \
(y, x, z, w), \
(x, w, z, y), \
(z, x, w, y) )
>>> got = []
>>> for p in (pgroup + [p**2 for p in pgroup]):
... h = Polyhedron(corners)
... h.rotate(p)
... got.append(h.corners)
...
>>> set(got) == set(all)
True
The make_perm method of a PermutationGroup will randomly pick
permutations, multiply them together, and return the permutation that
can be applied to the polyhedron to give the orientation produced
by those individual permutations.
Here, 3 permutations are used:
>>> tetra.pgroup.make_perm(3) # doctest: +SKIP
Permutation([0, 3, 1, 2])
To select the permutations that should be used, supply a list
of indices to the permutations in pgroup in the order they should
be applied:
>>> use = [0, 0, 2]
>>> p002 = tetra.pgroup.make_perm(3, use)
>>> p002
Permutation([1, 0, 3, 2])
Apply them one at a time:
>>> tetra.reset()
>>> for i in use:
... tetra.rotate(pgroup[i])
...
>>> tetra.vertices
(x, w, z, y)
>>> sequentially = tetra.vertices
Apply the composite permutation:
>>> tetra.reset()
>>> tetra.rotate(p002)
>>> tetra.corners
(x, w, z, y)
>>> tetra.corners in all and tetra.corners == sequentially
True
Notes
=====
Defining permutation groups
---------------------------
It is not necessary to enter any permutations, nor is necessary to
enter a complete set of transformations. In fact, for a polyhedron,
all configurations can be constructed from just two permutations.
For example, the orientations of a tetrahedron can be generated from
an axis passing through a vertex and face and another axis passing
through a different vertex or from an axis passing through the
midpoints of two edges opposite of each other.
For simplicity of presentation, consider a square --
not a cube -- with vertices 1, 2, 3, and 4:
1-----2 We could think of axes of rotation being:
| | 1) through the face
| | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4
3-----4 3) lines 1-4 or 2-3
To determine how to write the permutations, imagine 4 cameras,
one at each corner, labeled A-D:
A B A B
1-----2 1-----3 vertex index:
| | | | 1 0
| | | | 2 1
3-----4 2-----4 3 2
C D C D 4 3
original after rotation
along 1-4
A diagonal and a face axis will be chosen for the "permutation group"
from which any orientation can be constructed.
>>> pgroup = []
Imagine a clockwise rotation when viewing 1-4 from camera A. The new
orientation is (in camera-order): 1, 3, 2, 4 so the permutation is
given using the *indices* of the vertices as:
>>> pgroup.append(Permutation((0, 2, 1, 3)))
Now imagine rotating clockwise when looking down an axis entering the
center of the square as viewed. The new camera-order would be
3, 1, 4, 2 so the permutation is (using indices):
>>> pgroup.append(Permutation((2, 0, 3, 1)))
The square can now be constructed:
** use real-world labels for the vertices, entering them in
camera order
** for the faces we use zero-based indices of the vertices
in *edge-order* as the face is traversed; neither the
direction nor the starting point matter -- the faces are
only used to define edges (if so desired).
>>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup)
To rotate the square with a single permutation we can do:
>>> square.rotate(square.pgroup[0])
>>> square.corners
(1, 3, 2, 4)
To use more than one permutation (or to use one permutation more
than once) it is more convenient to use the make_perm method:
>>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations
>>> square.reset() # return to initial orientation
>>> square.rotate(p011)
>>> square.corners
(4, 2, 3, 1)
Thinking outside the box
------------------------
Although the Polyhedron object has a direct physical meaning, it
actually has broader application. In the most general sense it is
just a decorated PermutationGroup, allowing one to connect the
permutations to something physical. For example, a Rubik's cube is
not a proper polyhedron, but the Polyhedron class can be used to
represent it in a way that helps to visualize the Rubik's cube.
>>> from sympy.utilities.iterables import flatten, unflatten
>>> from sympy import symbols
>>> from sympy.combinatorics import RubikGroup
>>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD'])
>>> def show():
... pairs = unflatten(r2.corners, 2)
... print(pairs[::2])
... print(pairs[1::2])
...
>>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2))
>>> show()
[(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)]
[(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)]
>>> r2.rotate(0) # cw rotation of F
>>> show()
[(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)]
[(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)]
Predefined Polyhedra
====================
For convenience, the vertices and faces are defined for the following
standard solids along with a permutation group for transformations.
When the polyhedron is oriented as indicated below, the vertices in
a given horizontal plane are numbered in ccw direction, starting from
the vertex that will give the lowest indices in a given face. (In the
net of the vertices, indices preceded by "-" indicate replication of
the lhs index in the net.)
tetrahedron, tetrahedron_faces
------------------------------
4 vertices (vertex up) net:
0 0-0
1 2 3-1
4 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3)
cube, cube_faces
----------------
8 vertices (face up) net:
0 1 2 3-0
4 5 6 7-4
6 faces:
(0, 1, 2, 3)
(0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4)
(4, 5, 6, 7)
octahedron, octahedron_faces
----------------------------
6 vertices (vertex up) net:
0 0 0-0
1 2 3 4-1
5 5 5-5
8 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4)
(1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5)
dodecahedron, dodecahedron_faces
--------------------------------
20 vertices (vertex up) net:
0 1 2 3 4 -0
5 6 7 8 9 -5
14 10 11 12 13-14
15 16 17 18 19-15
12 faces:
(0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6)
(2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5)
(5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12)
(8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19)
icosahedron, icosahedron_faces
------------------------------
12 vertices (face up) net:
0 0 0 0 -0
1 2 3 4 5 -1
6 7 8 9 10 -6
11 11 11 11 -11
20 faces:
(0, 1, 2) (0, 2, 3) (0, 3, 4)
(0, 4, 5) (0, 1, 5) (1, 2, 6)
(2, 3, 7) (3, 4, 8) (4, 5, 9)
(1, 5, 10) (2, 6, 7) (3, 7, 8)
(4, 8, 9) (5, 9, 10) (1, 6, 10)
(6, 7, 11) (7, 8, 11) (8, 9, 11)
(9, 10, 11) (6, 10, 11)
>>> from sympy.combinatorics.polyhedron import cube
>>> cube.edges
FiniteSet((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7))
If you want to use letters or other names for the corners you
can still use the pre-calculated faces:
>>> corners = list('abcdefgh')
>>> Polyhedron(corners, cube.faces).corners
(a, b, c, d, e, f, g, h)
References
==========
.. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf
"""
faces = [minlex(f, directed=False, is_set=True) for f in faces]
corners, faces, pgroup = args = \
[Tuple(*a) for a in (corners, faces, pgroup)]
obj = Basic.__new__(cls, *args)
obj._corners = tuple(corners) # in order given
obj._faces = FiniteSet(*faces)
if pgroup and pgroup[0].size != len(corners):
raise ValueError("Permutation size unequal to number of corners.")
# use the identity permutation if none are given
obj._pgroup = PermutationGroup(
pgroup or [Perm(range(len(corners)))] )
return obj
@property
def corners(self):
"""
Get the corners of the Polyhedron.
The method ``vertices`` is an alias for ``corners``.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c, d
>>> p = Polyhedron(list('abcd'))
>>> p.corners == p.vertices == (a, b, c, d)
True
See Also
========
array_form, cyclic_form
"""
return self._corners
vertices = corners
@property
def array_form(self):
"""Return the indices of the corners.
The indices are given relative to the original position of corners.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron
>>> tetrahedron = tetrahedron.copy()
>>> tetrahedron.array_form
[0, 1, 2, 3]
>>> tetrahedron.rotate(0)
>>> tetrahedron.array_form
[0, 2, 3, 1]
>>> tetrahedron.pgroup[0].array_form
[0, 2, 3, 1]
See Also
========
corners, cyclic_form
"""
corners = list(self.args[0])
return [corners.index(c) for c in self.corners]
@property
def cyclic_form(self):
"""Return the indices of the corners in cyclic notation.
The indices are given relative to the original position of corners.
See Also
========
corners, array_form
"""
return Perm._af_new(self.array_form).cyclic_form
@property
def size(self):
"""
Get the number of corners of the Polyhedron.
"""
return len(self._corners)
@property
def faces(self):
"""
Get the faces of the Polyhedron.
"""
return self._faces
@property
def pgroup(self):
"""
Get the permutations of the Polyhedron.
"""
return self._pgroup
@property
def edges(self):
"""
Given the faces of the polyhedra we can get the edges.
Examples
========
>>> from sympy.combinatorics import Polyhedron
>>> from sympy.abc import a, b, c
>>> corners = (a, b, c)
>>> faces = [(0, 1, 2)]
>>> Polyhedron(corners, faces).edges
FiniteSet((0, 1), (0, 2), (1, 2))
"""
if self._edges is None:
output = set()
for face in self.faces:
for i in range(len(face)):
edge = tuple(sorted([face[i], face[i - 1]]))
output.add(edge)
self._edges = FiniteSet(*output)
return self._edges
def rotate(self, perm):
"""
Apply a permutation to the polyhedron *in place*. The permutation
may be given as a Permutation instance or an integer indicating
which permutation from pgroup of the Polyhedron should be
applied.
This is an operation that is analogous to rotation about
an axis by a fixed increment.
Notes
=====
When a Permutation is applied, no check is done to see if that
is a valid permutation for the Polyhedron. For example, a cube
could be given a permutation which effectively swaps only 2
vertices. A valid permutation (that rotates the object in a
physical way) will be obtained if one only uses
permutations from the ``pgroup`` of the Polyhedron. On the other
hand, allowing arbitrary rotations (applications of permutations)
gives a way to follow named elements rather than indices since
Polyhedron allows vertices to be named while Permutation works
only with indices.
Examples
========
>>> from sympy.combinatorics import Polyhedron, Permutation
>>> from sympy.combinatorics.polyhedron import cube
>>> cube = cube.copy()
>>> cube.corners
(0, 1, 2, 3, 4, 5, 6, 7)
>>> cube.rotate(0)
>>> cube.corners
(1, 2, 3, 0, 5, 6, 7, 4)
A non-physical "rotation" that is not prohibited by this method:
>>> cube.reset()
>>> cube.rotate(Permutation([[1, 2]], size=8))
>>> cube.corners
(0, 2, 1, 3, 4, 5, 6, 7)
Polyhedron can be used to follow elements of set that are
identified by letters instead of integers:
>>> shadow = h5 = Polyhedron(list('abcde'))
>>> p = Permutation([3, 0, 1, 2, 4])
>>> h5.rotate(p)
>>> h5.corners
(d, a, b, c, e)
>>> _ == shadow.corners
True
>>> copy = h5.copy()
>>> h5.rotate(p)
>>> h5.corners == copy.corners
False
"""
if not isinstance(perm, Perm):
perm = self.pgroup[perm]
# and we know it's valid
else:
if perm.size != self.size:
raise ValueError('Polyhedron and Permutation sizes differ.')
a = perm.array_form
corners = [self.corners[a[i]] for i in range(len(self.corners))]
self._corners = tuple(corners)
def reset(self):
"""Return corners to their original positions.
Examples
========
>>> from sympy.combinatorics.polyhedron import tetrahedron as T
>>> T = T.copy()
>>> T.corners
(0, 1, 2, 3)
>>> T.rotate(0)
>>> T.corners
(0, 2, 3, 1)
>>> T.reset()
>>> T.corners
(0, 1, 2, 3)
"""
self._corners = self.args[0]
def _pgroup_calcs():
"""Return the permutation groups for each of the polyhedra and the face
definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces,
icosahedron_faces
Explanation
===========
(This author didn't find and didn't know of a better way to do it though
there likely is such a way.)
Although only 2 permutations are needed for a polyhedron in order to
generate all the possible orientations, a group of permutations is
provided instead. A set of permutations is called a "group" if::
a*b = c (for any pair of permutations in the group, a and b, their
product, c, is in the group)
a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds)
there is an identity permutation, I, such that I*a = a*I for all elements
in the group
a*b = I (the inverse of each permutation is also in the group)
None of the polyhedron groups defined follow these definitions of a group.
Instead, they are selected to contain those permutations whose powers
alone will construct all orientations of the polyhedron, i.e. for
permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``,
``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of
permutation ``i``) generate all permutations of the polyhedron instead of
mixed products like ``a*b``, ``a*b**2``, etc....
Note that for a polyhedron with n vertices, the valid permutations of the
vertices exclude those that do not maintain its faces. e.g. the
permutation BCDE of a square's four corners, ABCD, is a valid
permutation while CBDE is not (because this would twist the square).
Examples
========
The is_group checks for: closure, the presence of the Identity permutation,
and the presence of the inverse for each of the elements in the group. This
confirms that none of the polyhedra are true groups:
>>> from sympy.combinatorics.polyhedron import (
... tetrahedron, cube, octahedron, dodecahedron, icosahedron)
...
>>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron)
>>> [h.pgroup.is_group for h in polyhedra]
...
[True, True, True, True, True]
Although tests in polyhedron's test suite check that powers of the
permutations in the groups generate all permutations of the vertices
of the polyhedron, here we also demonstrate the powers of the given
permutations create a complete group for the tetrahedron:
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> for h in polyhedra[:1]:
... G = h.pgroup
... perms = set()
... for g in G:
... for e in range(g.order()):
... p = tuple((g**e).array_form)
... perms.add(p)
...
... perms = [Permutation(p) for p in perms]
... assert PermutationGroup(perms).is_group
In addition to doing the above, the tests in the suite confirm that the
faces are all present after the application of each permutation.
References
==========
.. [1] http://dogschool.tripod.com/trianglegroup.html
"""
def _pgroup_of_double(polyh, ordered_faces, pgroup):
n = len(ordered_faces[0])
# the vertices of the double which sits inside a give polyhedron
# can be found by tracking the faces of the outer polyhedron.
# A map between face and the vertex of the double is made so that
# after rotation the position of the vertices can be located
fmap = dict(zip(ordered_faces,
range(len(ordered_faces))))
flat_faces = flatten(ordered_faces)
new_pgroup = []
for i, p in enumerate(pgroup):
h = polyh.copy()
h.rotate(p)
c = h.corners
# reorder corners in the order they should appear when
# enumerating the faces
reorder = unflatten([c[j] for j in flat_faces], n)
# make them canonical
reorder = [tuple(map(as_int,
minlex(f, directed=False, is_set=True)))
for f in reorder]
# map face to vertex: the resulting list of vertices are the
# permutation that we seek for the double
new_pgroup.append(Perm([fmap[f] for f in reorder]))
return new_pgroup
tetrahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3
(1, 2, 3), # bottom
]
# cw from top
#
_t_pgroup = [
Perm([[1, 2, 3], [0]]), # cw from top
Perm([[0, 1, 2], [3]]), # cw from front face
Perm([[0, 3, 2], [1]]), # cw from back right face
Perm([[0, 3, 1], [2]]), # cw from back left face
Perm([[0, 1], [2, 3]]), # through front left edge
Perm([[0, 2], [1, 3]]), # through front right edge
Perm([[0, 3], [1, 2]]), # through back edge
]
tetrahedron = Polyhedron(
range(4),
tetrahedron_faces,
_t_pgroup)
cube_faces = [
(0, 1, 2, 3), # upper
(0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4
(4, 5, 6, 7), # lower
]
# U, D, F, B, L, R = up, down, front, back, left, right
_c_pgroup = [Perm(p) for p in
[
[1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U
[4, 0, 3, 7, 5, 1, 2, 6], # cw from F face
[4, 5, 1, 0, 7, 6, 2, 3], # cw from R face
[1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge
[6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge
[6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge
[3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge
[4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge
[6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge
[0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex
[5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex
[5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex
[7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL
]]
cube = Polyhedron(
range(8),
cube_faces,
_c_pgroup)
octahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4
(1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4
]
octahedron = Polyhedron(
range(6),
octahedron_faces,
_pgroup_of_double(cube, cube_faces, _c_pgroup))
dodecahedron_faces = [
(0, 1, 2, 3, 4), # top
(0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5
(3, 4, 9, 13, 8), (0, 4, 9, 14, 5),
(5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18,
12), # lower 5
(8, 12, 18, 19, 13), (9, 13, 19, 15, 14),
(15, 16, 17, 18, 19) # bottom
]
def _string_to_perm(s):
rv = [Perm(range(20))]
p = None
for si in s:
if si not in '01':
count = int(si) - 1
else:
count = 1
if si == '0':
p = _f0
elif si == '1':
p = _f1
rv.extend([p]*count)
return Perm.rmul(*rv)
# top face cw
_f0 = Perm([
1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11,
12, 13, 14, 10, 16, 17, 18, 19, 15])
# front face cw
_f1 = Perm([
5, 0, 4, 9, 14, 10, 1, 3, 13, 15,
6, 2, 8, 19, 16, 17, 11, 7, 12, 18])
# the strings below, like 0104 are shorthand for F0*F1*F0**4 and are
# the remaining 4 face rotations, 15 edge permutations, and the
# 10 vertex rotations.
_dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in '''
0104 140 014 0410
010 1403 03104 04103 102
120 1304 01303 021302 03130
0412041 041204103 04120410 041204104 041204102
10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()]
dodecahedron = Polyhedron(
range(20),
dodecahedron_faces,
_dodeca_pgroup)
icosahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5),
(1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9),
(3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6),
(6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)]
icosahedron = Polyhedron(
range(12),
icosahedron_faces,
_pgroup_of_double(
dodecahedron, dodecahedron_faces, _dodeca_pgroup))
return (tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces,
dodecahedron_faces, icosahedron_faces)
# -----------------------------------------------------------------------
# Standard Polyhedron groups
#
# These are generated using _pgroup_calcs() above. However to save
# import time we encode them explicitly here.
# -----------------------------------------------------------------------
tetrahedron = Polyhedron(
Tuple(0, 1, 2, 3),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 1, 3),
Tuple(1, 2, 3)),
Tuple(
Perm(1, 2, 3),
Perm(3)(0, 1, 2),
Perm(0, 3, 2),
Perm(0, 3, 1),
Perm(0, 1)(2, 3),
Perm(0, 2)(1, 3),
Perm(0, 3)(1, 2)
))
cube = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7),
Tuple(
Tuple(0, 1, 2, 3),
Tuple(0, 1, 5, 4),
Tuple(1, 2, 6, 5),
Tuple(2, 3, 7, 6),
Tuple(0, 3, 7, 4),
Tuple(4, 5, 6, 7)),
Tuple(
Perm(0, 1, 2, 3)(4, 5, 6, 7),
Perm(0, 4, 5, 1)(2, 3, 7, 6),
Perm(0, 4, 7, 3)(1, 5, 6, 2),
Perm(0, 1)(2, 4)(3, 5)(6, 7),
Perm(0, 6)(1, 2)(3, 5)(4, 7),
Perm(0, 6)(1, 7)(2, 3)(4, 5),
Perm(0, 3)(1, 7)(2, 4)(5, 6),
Perm(0, 4)(1, 7)(2, 6)(3, 5),
Perm(0, 6)(1, 5)(2, 4)(3, 7),
Perm(1, 3, 4)(2, 7, 5),
Perm(7)(0, 5, 2)(3, 4, 6),
Perm(0, 5, 7)(1, 6, 3),
Perm(0, 7, 2)(1, 4, 6)))
octahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 1, 4),
Tuple(1, 2, 5),
Tuple(2, 3, 5),
Tuple(3, 4, 5),
Tuple(1, 4, 5)),
Tuple(
Perm(5)(1, 2, 3, 4),
Perm(0, 4, 5, 2),
Perm(0, 1, 5, 3),
Perm(0, 1)(2, 4)(3, 5),
Perm(0, 2)(1, 3)(4, 5),
Perm(0, 3)(1, 5)(2, 4),
Perm(0, 4)(1, 3)(2, 5),
Perm(0, 5)(1, 4)(2, 3),
Perm(0, 5)(1, 2)(3, 4),
Perm(0, 4, 1)(2, 3, 5),
Perm(0, 1, 2)(3, 4, 5),
Perm(0, 2, 3)(1, 5, 4),
Perm(0, 4, 3)(1, 5, 2)))
dodecahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
Tuple(
Tuple(0, 1, 2, 3, 4),
Tuple(0, 1, 6, 10, 5),
Tuple(1, 2, 7, 11, 6),
Tuple(2, 3, 8, 12, 7),
Tuple(3, 4, 9, 13, 8),
Tuple(0, 4, 9, 14, 5),
Tuple(5, 10, 16, 15, 14),
Tuple(6, 10, 16, 17, 11),
Tuple(7, 11, 17, 18, 12),
Tuple(8, 12, 18, 19, 13),
Tuple(9, 13, 19, 15, 14),
Tuple(15, 16, 17, 18, 19)),
Tuple(
Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19),
Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12),
Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13),
Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14),
Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10),
Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11),
Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19),
Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16),
Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17),
Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18),
Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18),
Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19),
Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15),
Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14),
Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17),
Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11),
Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13),
Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10),
Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12),
Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14),
Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17),
Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18),
Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19),
Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15),
Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16),
Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18),
Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19),
Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9),
Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16),
Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17)))
icosahedron = Polyhedron(
Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
Tuple(
Tuple(0, 1, 2),
Tuple(0, 2, 3),
Tuple(0, 3, 4),
Tuple(0, 4, 5),
Tuple(0, 1, 5),
Tuple(1, 6, 7),
Tuple(1, 2, 7),
Tuple(2, 7, 8),
Tuple(2, 3, 8),
Tuple(3, 8, 9),
Tuple(3, 4, 9),
Tuple(4, 9, 10),
Tuple(4, 5, 10),
Tuple(5, 6, 10),
Tuple(1, 5, 6),
Tuple(6, 7, 11),
Tuple(7, 8, 11),
Tuple(8, 9, 11),
Tuple(9, 10, 11),
Tuple(6, 10, 11)),
Tuple(
Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10),
Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8),
Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9),
Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5),
Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6),
Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7),
Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11),
Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11),
Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10),
Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11),
Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11),
Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9),
Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10),
Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10),
Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7),
Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8),
Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7),
Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9),
Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6),
Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8),
Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10),
Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11),
Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11),
Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10),
Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11),
Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11),
Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8),
Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9),
Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10),
Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4),
Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5)))
tetrahedron_faces = list(tuple(arg) for arg in tetrahedron.faces)
cube_faces = list(tuple(arg) for arg in cube.faces)
octahedron_faces = list(tuple(arg) for arg in octahedron.faces)
dodecahedron_faces = list(tuple(arg) for arg in dodecahedron.faces)
icosahedron_faces = list(tuple(arg) for arg in icosahedron.faces)
|
py | 1a2f1d56fc766affd2507772267dadcf41f8a7af | # datadotworld module has been imported as dw
import datadotworld as dw
# We've written a SPARQL query for you and assigned it to the `sparql_query` variable:
sparql_query = "PREFIX GOT: <https://tutorial.linked.data.world/d/sparqltutorial/> SELECT ?FName ?LName WHERE {?person GOT:col-got-house \"Stark\" . ?person GOT:col-got-fname ?FName . ?person GOT:col-got-lname ?LName .}"
# Use the pre-defined SPARQL query to query dataset http://data.world/tutorial/sparqltutorial and return the results to a queryResults variable
queryResults = dw.query("http://data.world/tutorial/sparqltutorial" , sparql_query, query_type = 'sparql')
# Use the dataframe property of the resulting query to create a dataframe variable named `houseStark`
houseStark = queryResults.dataframe
# Use pp.pprint() to print the dataframe to the screen.
pp.pprint(houseStark)
|
py | 1a2f1ed612e9240a51a9e4eab2cde64ae038814e | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.common import Configuration
from pyflink.table import EnvironmentSettings
from pyflink.testing.test_case_utils import PyFlinkTestCase
class EnvironmentSettingsTests(PyFlinkTestCase):
def test_mode_selection(self):
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_streaming_mode
environment_settings = builder.in_streaming_mode().build()
self.assertTrue(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_streaming_mode()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_batch_mode
environment_settings = builder.in_batch_mode().build()
self.assertFalse(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_batch_mode()
self.assertFalse(environment_settings.is_streaming_mode())
def test_with_built_in_catalog_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_CATALOG = gateway.jvm.TableConfigOptions.TABLE_CATALOG_NAME.defaultValue()
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), DEFAULT_BUILTIN_CATALOG)
environment_settings = builder.with_built_in_catalog_name("my_catalog").build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), "my_catalog")
def test_with_built_in_database_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_DATABASE = gateway.jvm.TableConfigOptions.TABLE_DATABASE_NAME.defaultValue()
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_database_name(),
DEFAULT_BUILTIN_DATABASE)
environment_settings = builder.with_built_in_database_name("my_database").build()
self.assertEqual(environment_settings.get_built_in_database_name(), "my_database")
def test_to_configuration(self):
expected_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
config = expected_settings.to_configuration()
self.assertEqual("BATCH", config.get_string("execution.runtime-mode", "stream"))
def test_from_configuration(self):
config = Configuration()
config.set_string("execution.runtime-mode", "batch")
actual_setting = EnvironmentSettings.from_configuration(config)
self.assertFalse(actual_setting.is_streaming_mode(), "Use batch mode.")
|
py | 1a2f200557e96a55d1fb6a17f5f1b0e0467a0c05 | #
# This file is part of ravstack. Ravstack is free software available under
# the terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2015 the ravstack authors. See the file "AUTHORS" for a
# complete list.
"""Ravello Ironic command-line utility.
Usage:
ravstack [options] setup
ravstack [options] proxy-create
ravstack [options] node-create [-c <cpus>] [-m <memory>]
[-D <disk>] [-n <count>]
ravstack [options] node-dump
ravstack [options] node-list [--all [--cached]]
ravstack [options] node-start <node>
ravstack [options] node-stop <node>
ravstack [options] node-reboot <node>
ravstack [options] node-get-boot-device <node>
ravstack [options] node-set-boot-device <node> <bootdev>
ravstack [options] node-get-macs <node> [--cached]
ravstack [options] fixup
ravstack [options] endpoint-resolve <port> [-t <timeout>]
[--start-port <base>] [--num-ports <count>]
ravstack --help
Command help:
setup Create ravstack directories and config file.
proxy-create Create SSH -> Ravello API proxy.
node-create Create a new node.
node-dump Dump node definitions to specified file.
node-list List powered on nodes. (--all lists all nodes)
node-start Start a node.
node-stop Stop a node.
node-reboot Reboot a node.
node-get-boot-device Return boot device for <node>.
node-set-boot-device Set boot device for <node> to <bootdev>.
The boot device may be "hd" or "network".
node-get-macs Return MAC addresses for <node>.
fixup Fix Ravello and OS config after one or
more nodes were deployed.
endpoint-resolve Resolve an endpoint for a local service using
a public IP address or under portmapping.
Options:
-d, --debug Enable debugging.
-v, --verbose Be verbose.
--log-stderr Show logs on standard error.
-u <username>, --username=<username>
Ravello API username.
-p <password>, --password=<password>
Ravello API password.
-a <application>, --application=<application>
The Ravello application name.
--all List all nodes.
--cached Allow use of cached information.
Options for `node-create`:
-c <cpus>, --cpus=<cpus>
The number of CPUs. [default: 2]
-m <memory>, --memory=<memory>
The amount of memory in MB. [default: 8192]
-D <disk>, --disk=<disk>
The size of the disk in GB. [default: 60]
-n <count>, --count=<count>
The number of nodes to create. [default: 1]
Options for `endpoint-resolve`:
-t <timeout>, --timeout <timeout>
Timeout. [default: 2]
--start-port <port> Starting port for endpoint resolution with
portmapping. [default: 10000]
--num-ports <count> Number of ports to scan for endpoint resulution
with portmapping. [default: 50]
"""
from __future__ import absolute_import, print_function
import docopt
from . import factory, setup, node, proxy, fixup, endpoint, runtime
from .runtime import CONF
def main():
"""Ravstack main entry point."""
args = docopt.docopt(__doc__)
CONF.update_from_args(args)
CONF.update_to_env()
runtime.setup_logging() # logging configuration might have changed
env = factory.get_environ(args)
if args['setup']:
setup.do_setup(env)
elif args['proxy-create']:
proxy.do_create(env)
elif args['node-create']:
node.do_create(env)
elif args['node-dump']:
node.do_dump(env)
elif args['node-list'] and not args.get('--all'):
node.do_list_running(env, False)
elif args['node-list']:
node.do_list_all(env)
elif args['node-start']:
node.do_start(env, args['<node>'])
elif args['node-stop']:
node.do_stop(env, args['<node>'])
elif args['node-reboot']:
node.do_reboot(env, args['<node>'])
elif args['node-get-boot-device']:
node.do_get_boot_device(env, args['<node>'])
elif args['node-set-boot-device']:
node.do_set_boot_device(env, args['<node>'], args['<bootdev>'])
elif args['node-get-macs']:
node.do_get_macs(env, args['<node>'], False)
elif args['fixup']:
fixup.do_fixup(env)
elif args['endpoint-resolve']:
endpoint.do_resolve(env, args['<port>'])
def run_main():
"""Setuptools entry point."""
runtime.run_main(main)
if __name__ == '__main__':
run_main()
|
py | 1a2f2082e47d81d711d46d913e34e347eef20e68 | from sympy import (Abs, exp, Expr, I, pi, Q, Rational, refine, S, sqrt,
atan, atan2, nan, Symbol)
from sympy.abc import x, y, z
from sympy.core.relational import Eq, Ne
from sympy.functions.elementary.piecewise import Piecewise
from sympy.utilities.pytest import slow
def test_Abs():
assert refine(Abs(x), Q.positive(x)) == x
assert refine(1 + Abs(x), Q.positive(x)) == 1 + x
assert refine(Abs(x), Q.negative(x)) == -x
assert refine(1 + Abs(x), Q.negative(x)) == 1 - x
assert refine(Abs(x**2)) != x**2
assert refine(Abs(x**2), Q.real(x)) == x**2
@slow
def test_pow1():
assert refine((-1)**x, Q.even(x)) == 1
assert refine((-1)**x, Q.odd(x)) == -1
assert refine((-2)**x, Q.even(x)) == 2**x
# nested powers
assert refine(sqrt(x**2)) != Abs(x)
assert refine(sqrt(x**2), Q.complex(x)) != Abs(x)
assert refine(sqrt(x**2), Q.real(x)) == Abs(x)
assert refine(sqrt(x**2), Q.positive(x)) == x
assert refine((x**3)**(S(1)/3)) != x
assert refine((x**3)**(S(1)/3), Q.real(x)) != x
assert refine((x**3)**(S(1)/3), Q.positive(x)) == x
assert refine(sqrt(1/x), Q.real(x)) != 1/sqrt(x)
assert refine(sqrt(1/x), Q.positive(x)) == 1/sqrt(x)
@slow
def test_pow2():
# powers of (-1)
assert refine((-1)**(x + y), Q.even(x)) == (-1)**y
assert refine((-1)**(x + y + z), Q.odd(x) & Q.odd(z)) == (-1)**y
assert refine((-1)**(x + y + 1), Q.odd(x)) == (-1)**y
assert refine((-1)**(x + y + 2), Q.odd(x)) == (-1)**(y + 1)
assert refine((-1)**(x + 3)) == (-1)**(x + 1)
@slow
def test_pow3():
# continuation
assert refine((-1)**((-1)**x/2 - S.Half), Q.integer(x)) == (-1)**x
assert refine((-1)**((-1)**x/2 + S.Half), Q.integer(x)) == (-1)**(x + 1)
assert refine((-1)**((-1)**x/2 + 5*S.Half), Q.integer(x)) == (-1)**(x + 1)
@slow
def test_pow4():
assert refine((-1)**((-1)**x/2 - 7*S.Half), Q.integer(x)) == (-1)**(x + 1)
assert refine((-1)**((-1)**x/2 - 9*S.Half), Q.integer(x)) == (-1)**x
# powers of Abs
assert refine(Abs(x)**2, Q.real(x)) == x**2
assert refine(Abs(x)**3, Q.real(x)) == Abs(x)**3
assert refine(Abs(x)**2) == Abs(x)**2
def test_exp():
x = Symbol('x', integer=True)
assert refine(exp(pi*I*2*x)) == 1
assert refine(exp(pi*I*2*(x + Rational(1, 2)))) == -1
assert refine(exp(pi*I*2*(x + Rational(1, 4)))) == I
assert refine(exp(pi*I*2*(x + Rational(3, 4)))) == -I
def test_Relational():
assert not refine(x < 0, ~Q.is_true(x < 0))
assert refine(x < 0, Q.is_true(x < 0))
assert refine(x < 0, Q.is_true(0 > x)) == True
assert refine(x < 0, Q.is_true(y < 0)) == (x < 0)
assert not refine(x <= 0, ~Q.is_true(x <= 0))
assert refine(x <= 0, Q.is_true(x <= 0))
assert refine(x <= 0, Q.is_true(0 >= x)) == True
assert refine(x <= 0, Q.is_true(y <= 0)) == (x <= 0)
assert not refine(x > 0, ~Q.is_true(x > 0))
assert refine(x > 0, Q.is_true(x > 0))
assert refine(x > 0, Q.is_true(0 < x)) == True
assert refine(x > 0, Q.is_true(y > 0)) == (x > 0)
assert not refine(x >= 0, ~Q.is_true(x >= 0))
assert refine(x >= 0, Q.is_true(x >= 0))
assert refine(x >= 0, Q.is_true(0 <= x)) == True
assert refine(x >= 0, Q.is_true(y >= 0)) == (x >= 0)
assert not refine(Eq(x, 0), ~Q.is_true(Eq(x, 0)))
assert refine(Eq(x, 0), Q.is_true(Eq(x, 0)))
assert refine(Eq(x, 0), Q.is_true(Eq(0, x))) == True
assert refine(Eq(x, 0), Q.is_true(Eq(y, 0))) == Eq(x, 0)
assert not refine(Ne(x, 0), ~Q.is_true(Ne(x, 0)))
assert refine(Ne(x, 0), Q.is_true(Ne(0, x))) == True
assert refine(Ne(x, 0), Q.is_true(Ne(x, 0)))
assert refine(Ne(x, 0), Q.is_true(Ne(y, 0))) == (Ne(x, 0))
def test_Piecewise():
assert refine(Piecewise((1, x < 0), (3, True)), Q.is_true(x < 0)) == 1
assert refine(Piecewise((1, x < 0), (3, True)), ~Q.is_true(x < 0)) == 3
assert refine(Piecewise((1, x < 0), (3, True)), Q.is_true(y < 0)) == \
Piecewise((1, x < 0), (3, True))
assert refine(Piecewise((1, x > 0), (3, True)), Q.is_true(x > 0)) == 1
assert refine(Piecewise((1, x > 0), (3, True)), ~Q.is_true(x > 0)) == 3
assert refine(Piecewise((1, x > 0), (3, True)), Q.is_true(y > 0)) == \
Piecewise((1, x > 0), (3, True))
assert refine(Piecewise((1, x <= 0), (3, True)), Q.is_true(x <= 0)) == 1
assert refine(Piecewise((1, x <= 0), (3, True)), ~Q.is_true(x <= 0)) == 3
assert refine(Piecewise((1, x <= 0), (3, True)), Q.is_true(y <= 0)) == \
Piecewise((1, x <= 0), (3, True))
assert refine(Piecewise((1, x >= 0), (3, True)), Q.is_true(x >= 0)) == 1
assert refine(Piecewise((1, x >= 0), (3, True)), ~Q.is_true(x >= 0)) == 3
assert refine(Piecewise((1, x >= 0), (3, True)), Q.is_true(y >= 0)) == \
Piecewise((1, x >= 0), (3, True))
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(x, 0)))\
== 1
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(0, x)))\
== 1
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), ~Q.is_true(Eq(x, 0)))\
== 3
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), ~Q.is_true(Eq(0, x)))\
== 3
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(y, 0)))\
== Piecewise((1, Eq(x, 0)), (3, True))
assert refine(Piecewise((1, Ne(x, 0)), (3, True)), Q.is_true(Ne(x, 0)))\
== 1
assert refine(Piecewise((1, Ne(x, 0)), (3, True)), ~Q.is_true(Ne(x, 0)))\
== 3
assert refine(Piecewise((1, Ne(x, 0)), (3, True)), Q.is_true(Ne(y, 0)))\
== Piecewise((1, Ne(x, 0)), (3, True))
def test_atan2():
assert refine(atan2(y, x), Q.real(y) & Q.positive(x)) == atan(y/x)
assert refine(atan2(y, x), Q.negative(y) & Q.positive(x)) == atan(y/x)
assert refine(atan2(y, x), Q.negative(y) & Q.negative(x)) == atan(y/x) - pi
assert refine(atan2(y, x), Q.positive(y) & Q.negative(x)) == atan(y/x) + pi
assert refine(atan2(y, x), Q.zero(y) & Q.negative(x)) == pi
assert refine(atan2(y, x), Q.positive(y) & Q.zero(x)) == pi/2
assert refine(atan2(y, x), Q.negative(y) & Q.zero(x)) == -pi/2
assert refine(atan2(y, x), Q.zero(y) & Q.zero(x)) == nan
def test_func_args():
class MyClass(Expr):
# A class with nontrivial .func
def __init__(self, *args):
self.my_member = ""
@property
def func(self):
def my_func(*args):
obj = MyClass(*args)
obj.my_member = self.my_member
return obj
return my_func
x = MyClass()
x.my_member = "A very important value"
assert x.my_member == refine(x).my_member
def test_eval_refine():
from sympy.core.expr import Expr
class MockExpr(Expr):
def _eval_refine(self, assumptions):
return True
mock_obj = MockExpr()
assert refine(mock_obj)
def test_refine_issue_12724():
expr1 = refine(Abs(x * y), Q.positive(x))
expr2 = refine(Abs(x * y * z), Q.positive(x))
assert expr1 == x * Abs(y)
assert expr2 == x * Abs(y * z)
y1 = Symbol('y1', real = True)
expr3 = refine(Abs(x * y1**2 * z), Q.positive(x))
assert expr3 == x * y1**2 * Abs(z)
|
py | 1a2f20b4f57c9954cfe57f7236c48aedc8151d94 | from __future__ import print_function
import numpy as np
def q(y_true, y_pred):
'''q value as described in Tropsha, Gramatica, Gombar:
The Importance of Being Earnest'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
y_mean = np.mean(y_true)
return 1 - np.sum((y_true - y_pred) ** 2) / np.sum((y_true - y_mean) ** 2)
def linreg(x, y):
'''Computes a linear regression through the origin using OLS'''
x = np.array(x)
y = np.array(y)
x = x[:, np.newaxis]
a, _, _, _ = np.linalg.lstsq(x, y, rcond=-1.)
r2 = q(y, (x * a)[:, 0])
return (r2, a) |
py | 1a2f20b95d23ec33820bade8a811fd92a94e6e44 | import os
from dateutil.parser import parse as date_parser
from flask import request, current_app
from flask_restful.fields import Integer, List, Nested, Raw, String
from werkzeug.utils import secure_filename
from analysisweb.api import db
from analysisweb_user.models import Measurement, MeasurementFile
from . import (
ResourceBase,
MetaResource,
ResourceInvalidInputException,
ResourceForbiddenActionException,
ResourceNotFoundException,
IDField,
)
class MeasurementResource(ResourceBase):
db_table = Measurement
measurement_file = {
"label": String,
"path": String(
attribute=lambda x: "files/measurement/{}/{}".format(
x.measurement_id, x.path
)
),
}
fields = {
"id": Integer,
"label": String,
"start_date": String,
"end_date": String,
"meta_data": Raw,
"files": List(Nested(measurement_file)),
"jobs": List(IDField),
}
def get(self, id_):
"""
Receive a measurement
---
summary: Find a measurement by ID
tags:
- measurements
parameters:
- name: id
in: path
description: ID of measurement to return
required: true
schema:
type: integer
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref: "#/components/schemas/Measurement"
400:
description: Invalid ID supplied
404:
description: Measurement not found
"""
try:
resource = self.get_resource(id_)
except (ResourceInvalidInputException, ResourceNotFoundException) as e:
return {"status": str(e)}, e.response_code
return self.dump_resource(resource), 200
def delete(self, id_):
"""
Delete a measurement
---
summary: Deletes a measurement
tags:
- measurements
parameters:
- name: id
in: path
description: ID of measurement to return
required: true
schema:
type: integer
responses:
200:
description: Measurement deleted and returned
content:
application/json:
schema:
$ref: "#/components/schemas/Measurement"
400:
description: Invalid ID supplied
404:
description: Measurement not found
405:
description: Cannot delete measurement associated with a job
"""
try:
resource = self.get_resource(id_)
except (ResourceInvalidInputException, ResourceNotFoundException) as e:
return {"status": str(e)}, e.response_code
try:
return self.delete_resource(
current_app.config["MEASUREMENT_FILES_FOLDER"], resource
)
except ResourceForbiddenActionException as e:
return {"status": str(e)}, e.response_code
def put(self, id_):
"""
Update the basic information about a measurement
---
summary: Updates a measurement with new data
tags:
- measurements
parameters:
- name: id
in: path
description: ID of measurement to return
required: true
schema:
type: integer
requestBody:
content:
multipart/form-data:
schema:
properties:
start_date:
type: string
format: date-time
end_date:
type: string
format: date-time
label:
type: string
meta_data:
type: string
responses:
200:
description: Measurement updated and returned
content:
application/json:
schema:
$ref: "#/components/schemas/Measurement"
400:
description: Invalid ID supplied or invalid input
404:
description: Measurement not found
"""
try:
resource = self.get_resource(id_)
except (ResourceInvalidInputException, ResourceNotFoundException) as e:
return {"status": str(e)}, e.response_code
try:
self._update_measurement(resource)
except ResourceInvalidInputException as e:
return {"status": str(e)}, e.response_code
return self.dump_resource(resource), 200
def _update_measurement(self, resource):
start_date, end_date = self.parse_dates(
str(resource.start_date), str(resource.end_date)
)
resource.start_date = start_date
resource.end_date = end_date
resource.label = request.form.get("label", resource.label)
self.load_metadata(request.form.get("meta_data", "{}"), resource)
db.session.commit()
@staticmethod
def parse_dates(start=None, end=None):
try:
start_date = date_parser(request.form.get("start_date", start))
end_date = date_parser(request.form.get("end_date", end))
except ValueError as e:
raise ResourceInvalidInputException(str(e))
if end_date < start_date:
raise ResourceInvalidInputException("end date < start date")
return start_date, end_date
class MeasurementListResource(ResourceBase):
db_table = Measurement
fields = MeasurementResource.fields
def get(self):
"""
Obtain a list of measurements
---
summary: Retrieve a list of measurements
tags:
- measurements
responses:
200:
description: OK
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Measurement"
"""
return self.get_all(), 200
def post(self):
"""
Add a new measurement
---
summary: Add a new measurement
tags:
- measurements
requestBody:
content:
multipart/form-data:
schema:
properties:
start_date:
type: string
format: date-time
end_date:
type: string
format: date-time
label:
type: string
meta_data:
type: string
files:
type: array
items:
$ref: "#/components/schemas/File"
responses:
201:
description: Measurement created
400:
description: Invalid input
"""
try:
measurement_id = self._add_measurement()
except ResourceInvalidInputException as e:
return {"status": str(e)}, e.response_code
return {"status": "success", "id": measurement_id}, 201
def _add_measurement(self):
self._validate_form_data()
start_date, end_date = MeasurementResource.parse_dates()
m = Measurement(
start_date=start_date, end_date=end_date, label=request.form["label"]
)
db.session.add(m)
db.session.flush()
measurement_id = m.id
self.load_metadata(request.form.get("meta_data", "{}"), m)
file_folder = os.path.join(
current_app.config["MEASUREMENT_FILES_FOLDER"], str(measurement_id)
)
os.makedirs(file_folder)
print(request.files)
self._add_measurement_files(m, request.files.items(), file_folder)
db.session.commit()
return measurement_id
@staticmethod
def _add_measurement_files(measurement, file_list, path):
"""
Add files to a measurement
Parameters
----------
measurement: Measurement
the measurement to which add the files
file_list: list of werkzeug.Files
the given list of files
path: str
the folder in which to upload the files to
"""
for label, file in file_list:
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(path, filename))
f = MeasurementFile(label=label, path=filename, measurement=measurement)
db.session.add(f)
@staticmethod
def _validate_form_data():
if (
"start_date" not in request.form
or "end_date" not in request.form
or "label" not in request.form
or not request.files
):
raise ResourceInvalidInputException("Missing input")
class MeasurementMetaResource(MetaResource):
def get(self):
return self.load_meta("user_meta.json")
|
py | 1a2f20d5bffc38dc6ca1d9ea88d5489f700e4fac | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 50750.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
|
py | 1a2f21a734886d5cebf0d7bfc1fdad145163cde6 | from typing import TYPE_CHECKING
from ..email_common import get_email_subject, get_email_template_or_default
from . import constants
from .tasks import (
send_email_with_link_to_download_file_task,
send_export_failed_email_task,
send_set_staff_password_email_task,
send_staff_order_confirmation_email_task,
send_staff_password_reset_email_task,
)
if TYPE_CHECKING:
from .plugin import AdminEmailPlugin
def send_set_staff_password_email(
payload: dict, config: dict, plugin: "AdminEmailPlugin"
):
recipient_email = payload["recipient_email"]
template = get_email_template_or_default(
plugin,
constants.SET_STAFF_PASSWORD_TEMPLATE_FIELD,
constants.SET_STAFF_PASSWORD_DEFAULT_TEMPLATE,
constants.DEFAULT_EMAIL_TEMPLATES_PATH,
)
subject = get_email_subject(
plugin.configuration,
constants.SET_STAFF_PASSWORD_SUBJECT_FIELD,
constants.SET_STAFF_PASSWORD_DEFAULT_SUBJECT,
)
send_set_staff_password_email_task.delay(
recipient_email, payload, config, subject, template
)
def send_csv_export_success(payload: dict, config: dict, plugin: "AdminEmailPlugin"):
recipient_email = payload.get("recipient_email")
if recipient_email:
template = get_email_template_or_default(
plugin,
constants.CSV_EXPORT_SUCCESS_TEMPLATE_FIELD,
constants.CSV_EXPORT_SUCCESS_DEFAULT_TEMPLATE,
constants.DEFAULT_EMAIL_TEMPLATES_PATH,
)
subject = get_email_subject(
plugin.configuration,
constants.CSV_EXPORT_SUCCESS_SUBJECT_FIELD,
constants.CSV_EXPORT_SUCCESS_DEFAULT_SUBJECT,
)
send_email_with_link_to_download_file_task.delay(
recipient_email, payload, config, subject, template
)
def send_staff_order_confirmation(
payload: dict, config: dict, plugin: "AdminEmailPlugin"
):
recipient_list = payload.get("recipient_list")
template = get_email_template_or_default(
plugin,
constants.STAFF_ORDER_CONFIRMATION_TEMPLATE_FIELD,
constants.STAFF_ORDER_CONFIRMATION_DEFAULT_TEMPLATE,
constants.DEFAULT_EMAIL_TEMPLATES_PATH,
)
subject = get_email_subject(
plugin.configuration,
constants.STAFF_ORDER_CONFIRMATION_SUBJECT_FIELD,
constants.STAFF_ORDER_CONFIRMATION_DEFAULT_SUBJECT,
)
send_staff_order_confirmation_email_task.delay(
recipient_list, payload, config, subject, template
)
def send_csv_export_failed(payload: dict, config: dict, plugin: "AdminEmailPlugin"):
recipient_email = payload.get("recipient_email")
if recipient_email:
template = get_email_template_or_default(
plugin,
constants.CSV_EXPORT_FAILED_TEMPLATE_FIELD,
constants.CSV_EXPORT_FAILED_TEMPLATE_DEFAULT_TEMPLATE,
constants.DEFAULT_EMAIL_TEMPLATES_PATH,
)
subject = get_email_subject(
plugin.configuration,
constants.CSV_EXPORT_FAILED_SUBJECT_FIELD,
constants.CSV_EXPORT_FAILED_DEFAULT_SUBJECT,
)
send_export_failed_email_task.delay(
recipient_email, payload, config, subject, template
)
def send_staff_reset_password(payload: dict, config: dict, plugin: "AdminEmailPlugin"):
recipient_email = payload.get("recipient_email")
if recipient_email:
template = get_email_template_or_default(
plugin,
constants.STAFF_PASSWORD_RESET_TEMPLATE_FIELD,
constants.STAFF_PASSWORD_RESET_DEFAULT_TEMPLATE,
constants.DEFAULT_EMAIL_TEMPLATES_PATH,
)
subject = get_email_subject(
plugin.configuration,
constants.STAFF_PASSWORD_RESET_SUBJECT_FIELD,
constants.STAFF_PASSWORD_RESET_DEFAULT_SUBJECT,
)
send_staff_password_reset_email_task.delay(
recipient_email, payload, config, subject, template
)
|
py | 1a2f221771ff1b30867835352a207e9f01e1e3bd | # -*- coding: utf-8 -*-
"""
pygments.lexers.haskell
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Haskell and related languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
default, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
from pygments import unistring as uni
__all__ = ['HaskellLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer',
'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer',
'LiterateCryptolLexer', 'KokaLexer']
line_re = re.compile('.*?\n')
class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
.. versionadded:: 0.8
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
flags = re.MULTILINE | re.UNICODE
reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else',
'family', 'if', 'in', 'infix[lr]?', 'instance',
'let', 'newtype', 'of', 'then', 'type', 'where', '_')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
# (r'--\s*|.*$', Comment.Doc),
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r"'[^\\]'", String.Char), # this has to come before the TH quote
(r'^[_' + uni.Ll + r'][\w\']*', Name.Function),
(r"'?[_" + uni.Ll + r"][\w']*", Name),
(r"('')?[" + uni.Lu + r"][\w\']*", Keyword.Type),
(r"(')[" + uni.Lu + r"][\w\']*", Keyword.Type),
(r"(')\[[^\]]*\]", Keyword.Type), # tuples and lists get special treatment in GHC
(r"(')\([^)]*\)", Keyword.Type), # ..
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([' + uni.Lu + r'][\w.]*)(\s+)(as)(\s+)([' + uni.Lu + r'][\w.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([' + uni.Lu + r'][\w.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([' + uni.Lu + r'][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[\w.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([' + uni.Lu + r'][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[' + uni.Lu + r'][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[' + uni.Lu + r']\w*', Keyword.Type),
(r'(_[\w\']+|[' + uni.Ll + r'][\w\']*)', Name.Function),
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
# NOTE: the next four states are shared in the AgdaLexer; make sure
# any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'\{-', Comment.Multiline, '#push'),
(r'-\}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
class IdrisLexer(RegexLexer):
"""
A lexer for the dependently typed programming language Idris.
Based on the Haskell and Agda Lexer.
.. versionadded:: 2.0
"""
name = 'Idris'
aliases = ['idris', 'idr']
filenames = ['*.idr']
mimetypes = ['text/x-idris']
reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else',
'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto',
'namespace', 'codata', 'mutual', 'private', 'public', 'abstract',
'total', 'partial',
'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with',
'pattern', 'term', 'syntax', 'prefix',
'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit',
'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access',
'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language')
tokens = {
'root': [
# Comments
(r'^(\s*)(%%%s)' % '|'.join(directives),
bygroups(Text, Keyword.Reserved)),
(r'(\s*)(--(?![!#$%&*+./<=>?@^|_~:\\]).*?)$', bygroups(Text, Comment.Single)),
(r'(\s*)(\|{3}.*?)$', bygroups(Text, Comment.Single)),
(r'(\s*)(\{-)', bygroups(Text, Comment.Multiline), 'comment'),
# Declaration
(r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
bygroups(Text, Name.Function, Text, Operator.Word, Text)),
# Identifiers
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
(r"('')?[A-Z][\w\']*", Keyword.Type),
(r'[a-z][\w\']*', Text),
# Special Symbols
(r'(<-|::|->|=>|=)', Operator.Word), # specials
(r'([(){}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s(){}]+', Text),
(r'\s+?', Text), # Whitespace
],
'module': [
(r'\s+', Text),
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z]\w*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
(r'--.*$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
# NOTE: the next four states are shared in the AgdaLexer; make sure
# any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'\{-', Comment.Multiline, '#push'),
(r'-\}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']", String.Char),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop')
],
}
class AgdaLexer(RegexLexer):
"""
For the `Agda <http://wiki.portal.chalmers.se/agda/pmwiki.php>`_
dependently typed functional programming language and proof assistant.
.. versionadded:: 2.0
"""
name = 'Agda'
aliases = ['agda']
filenames = ['*.agda']
mimetypes = ['text/x-agda']
reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data',
'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
'infixl', 'infixr', 'instance', 'let', 'mutual', 'open',
'pattern', 'postulate', 'primitive', 'private',
'quote', 'quoteGoal', 'quoteTerm',
'record', 'renaming', 'rewrite', 'syntax', 'tactic',
'unquote', 'unquoteDecl', 'using', 'where', 'with']
tokens = {
'root': [
# Declaration
(r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
bygroups(Text, Name.Function, Text, Operator.Word, Text)),
# Comments
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
# Holes
(r'\{!', Comment.Directive, 'hole'),
# Lexemes:
# Identifiers
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
(r'\b(Set|Prop)\b', Keyword.Type),
# Special Symbols
(r'(\(|\)|\{|\})', Operator),
(u'(\\.{1,3}|\\||\u039B|\u2200|\u2192|:|=|->)', Operator.Word),
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s(){}]+', Text),
(r'\s+?', Text), # Whitespace
],
'hole': [
# Holes
(r'[^!{}]+', Comment.Directive),
(r'\{!', Comment.Directive, '#push'),
(r'!\}', Comment.Directive, '#pop'),
(r'[!{}]', Comment.Directive),
],
'module': [
(r'\{-', Comment.Multiline, 'comment'),
(r'[a-zA-Z][\w.]*', Name, '#pop'),
(r'[\W0-9_]+', Text)
],
'comment': HaskellLexer.tokens['comment'],
'character': HaskellLexer.tokens['character'],
'string': HaskellLexer.tokens['string'],
'escape': HaskellLexer.tokens['escape']
}
class CryptolLexer(RegexLexer):
"""
FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report.
.. versionadded:: 2.0
"""
name = 'Cryptol'
aliases = ['cryptol', 'cry']
filenames = ['*.cry']
mimetypes = ['text/x-cryptol']
reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else',
'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2',
'max', 'min', 'module', 'newtype', 'pragma', 'property',
'then', 'type', 'where', 'width')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
# (r'--\s*|.*$', Comment.Doc),
(r'//.*$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r"'?[_a-z][\w']*", Name),
(r"('')?[A-Z][\w\']*", Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[\w.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z]\w*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
# TODO: these don't match the comments in docs, remove.
#(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
#(r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
'comment': [
# Multiline Comments
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
EXTRA_KEYWORDS = set(('join', 'split', 'reverse', 'transpose', 'width',
'length', 'tail', '<<', '>>', '<<<', '>>>', 'const',
'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error',
'trace'))
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Name.Builtin, value
else:
yield index, token, value
class LiterateLexer(Lexer):
"""
Base class for lexers of literate file formats based on LaTeX or Bird-style
(prefixing each code line with ">").
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
"""
bird_re = re.compile(r'(>[ \t]*)(.*\n)')
def __init__(self, baselexer, **options):
self.baselexer = baselexer
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
if style == 'bird':
# bird-style
for match in line_re.finditer(text):
line = match.group()
m = self.bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
code += m.group(2)
else:
insertions.append((len(code), [(0, Text, line)]))
else:
# latex-style
from pygments.lexers.markup import TexLexer
lxlexer = TexLexer(**self.options)
codelines = 0
latex = ''
for match in line_re.finditer(text):
line = match.group()
if codelines:
if line.lstrip().startswith('\\end{code}'):
codelines = 0
latex += line
else:
code += line
elif line.lstrip().startswith('\\begin{code}'):
codelines = 1
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
latex = ''
else:
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)):
yield item
class LiterateHaskellLexer(LiterateLexer):
"""
For Literate Haskell (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 0.9
"""
name = 'Literate Haskell'
aliases = ['lhs', 'literate-haskell', 'lhaskell']
filenames = ['*.lhs']
mimetypes = ['text/x-literate-haskell']
def __init__(self, **options):
hslexer = HaskellLexer(**options)
LiterateLexer.__init__(self, hslexer, **options)
class LiterateIdrisLexer(LiterateLexer):
"""
For Literate Idris (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Idris'
aliases = ['lidr', 'literate-idris', 'lidris']
filenames = ['*.lidr']
mimetypes = ['text/x-literate-idris']
def __init__(self, **options):
hslexer = IdrisLexer(**options)
LiterateLexer.__init__(self, hslexer, **options)
class LiterateAgdaLexer(LiterateLexer):
"""
For Literate Agda source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Agda'
aliases = ['lagda', 'literate-agda']
filenames = ['*.lagda']
mimetypes = ['text/x-literate-agda']
def __init__(self, **options):
agdalexer = AgdaLexer(**options)
LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
class LiterateCryptolLexer(LiterateLexer):
"""
For Literate Cryptol (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Cryptol'
aliases = ['lcry', 'literate-cryptol', 'lcryptol']
filenames = ['*.lcry']
mimetypes = ['text/x-literate-cryptol']
def __init__(self, **options):
crylexer = CryptolLexer(**options)
LiterateLexer.__init__(self, crylexer, **options)
class KokaLexer(RegexLexer):
"""
Lexer for the `Koka <http://koka.codeplex.com>`_
language.
.. versionadded:: 1.6
"""
name = 'Koka'
aliases = ['koka']
filenames = ['*.kk', '*.kki']
mimetypes = ['text/x-koka']
keywords = [
'infix', 'infixr', 'infixl',
'type', 'cotype', 'rectype', 'alias',
'struct', 'con',
'fun', 'function', 'val', 'var',
'external',
'if', 'then', 'else', 'elif', 'return', 'match',
'private', 'public', 'private',
'module', 'import', 'as',
'include', 'inline',
'rec',
'try', 'yield', 'enum',
'interface', 'instance',
]
# keywords that are followed by a type
typeStartKeywords = [
'type', 'cotype', 'rectype', 'alias', 'struct', 'enum',
]
# keywords valid in a type
typekeywords = [
'forall', 'exists', 'some', 'with',
]
# builtin names and special names
builtin = [
'for', 'while', 'repeat',
'foreach', 'foreach-indexed',
'error', 'catch', 'finally',
'cs', 'js', 'file', 'ref', 'assigned',
]
# symbols that can be in an operator
symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+'
# symbol boundary: an operator keyword should not be followed by any of these
sboundary = '(?!'+symbols+')'
# name boundary: a keyword should not be followed by any of these
boundary = '(?![\w/])'
# koka token abstractions
tokenType = Name.Attribute
tokenTypeDef = Name.Class
tokenConstructor = Generic.Emph
# main lexer
tokens = {
'root': [
include('whitespace'),
# go into type mode
(r'::?' + sboundary, tokenType, 'type'),
(r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'alias-type'),
(r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'struct-type'),
((r'(%s)' % '|'.join(typeStartKeywords)) +
r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'type'),
# special sequences of tokens (we use ?: for non-capturing group as
# required by 'bygroups')
(r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)',
bygroups(Keyword, Text, Keyword, Name.Namespace)),
(r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)'
r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)'
r'((?:[a-z]\w*/)*[a-z]\w*))?',
bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text,
Keyword, Name.Namespace)),
(r'(^(?:(?:public|private)\s*)?(?:function|fun|val))'
r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Text, Name.Function)),
(r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?'
r'([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Text, Keyword, Name.Function)),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
(r'(%s)' % '|'.join(keywords) + boundary, Keyword),
(r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),
(r'::?|:=|\->|[=.]' + sboundary, Keyword),
# names
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenConstructor)),
(r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)),
(r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))',
bygroups(Name.Namespace, Name)),
(r'_\w*', Name.Variable),
# literal string
(r'@"', String.Double, 'litstring'),
# operators
(symbols + "|/(?![*/])", Operator),
(r'`', Operator),
(r'[{}()\[\];,]', Punctuation),
# literals. No check for literal characters with len > 1
(r'[0-9]+\.[0-9]+([eE][\-+]?[0-9]+)?', Number.Float),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
],
# type started by alias
'alias-type': [
(r'=', Keyword),
include('type')
],
# type started by struct
'struct-type': [
(r'(?=\((?!,*\)))', Punctuation, '#pop'),
include('type')
],
# type started by colon
'type': [
(r'[(\[<]', tokenType, 'type-nested'),
include('type-content')
],
# type nested in brackets: can contain parameters, comma etc.
'type-nested': [
(r'[)\]>]', tokenType, '#pop'),
(r'[(\[<]', tokenType, 'type-nested'),
(r',', tokenType),
(r'([a-z]\w*)(\s*)(:)(?!:)',
bygroups(Name, Text, tokenType)), # parameter name
include('type-content')
],
# shared contents of a type
'type-content': [
include('whitespace'),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),
(r'(?=((%s)' % '|'.join(keywords) + boundary + '))',
Keyword, '#pop'), # need to match because names overlap...
# kinds
(r'[EPHVX]' + boundary, tokenType),
# type names
(r'[a-z][0-9]*(?![\w/])', tokenType),
(r'_\w*', tokenType.Variable), # Generic.Emph
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenType)),
(r'((?:[a-z]\w*/)*)([a-z]\w+)',
bygroups(Name.Namespace, tokenType)),
# type keyword operators
(r'::|->|[.:|]', tokenType),
# catchall
default('#pop')
],
# comments and literals
'whitespace': [
(r'\n\s*#.*$', Comment.Preproc),
(r'\s+', Text),
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*$', Comment.Single)
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'litstring': [
(r'[^"]+', String.Double),
(r'""', String.Escape),
(r'"', String.Double, '#pop'),
],
'string': [
(r'[^\\"\n]+', String.Double),
include('escape-sequence'),
(r'["\n]', String.Double, '#pop'),
],
'char': [
(r'[^\\\'\n]+', String.Char),
include('escape-sequence'),
(r'[\'\n]', String.Char, '#pop'),
],
'escape-sequence': [
(r'\\[nrt\\"\']', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
# Yes, \U literals are 6 hex digits.
(r'\\U[0-9a-fA-F]{6}', String.Escape)
]
}
|
py | 1a2f22e3695171a9affdff58faba065f0b5853c3 | import typing
import gym
on_state_change_type = typing.Callable[[
gym.Space, # State
gym.Space, # Action
float, # Reward
gym.Space, # New state
bool, # Is done
typing.Optional[object], # Info
], type(None)]
def _noop_function(*args, **kwargs):
pass
class BasePolicy(object):
"""A class that represents an exploration policy for Go-Explore"""
def __init__(self, environment: gym.Env):
"""Create exploration policy.
:param environment: OpenAI Gym environment that should be explored
"""
self.environment = environment
self._on_action = _noop_function
@property
def on_action(self):
return self._on_action
@on_action.setter
def on_action(self, new_on_action: on_state_change_type):
self._on_action = new_on_action
def _environment_act(self, current_state: gym.Space, action: gym.Space):
result = self.environment.step(action)
new_state, reward, done, info = result
self._on_action(current_state, action, reward, new_state, done, info)
return result
def explore(self, current_state: gym.Space):
"""Explore from current state.
This method should explore from current_state using the exploration
policy. It can be e.g. random actions, exploration through curiosity,
etc.
The environment should be in current_state in order for this method to
work properly.
:param current_state: Current state of the environment
:returns Latest tuple from env.step call (or None if not explored)
"""
raise NotImplementedError
|
py | 1a2f23047ab44cd2c40947ce2e5c48fea538fe5c | from disco import util
from discodb import DiscoDB, Q
from disco.worker.task_io import task_output_stream
def Open(url, task=None):
if task:
disco_data = task.disco_data
ddfs_data = task.ddfs_data
else:
from disco.settings import DiscoSettings
settings = DiscoSettings()
disco_data = settings['DISCO_DATA']
ddfs_data = settings['DDFS_DATA']
scheme, netloc, rest = util.urlsplit(url)
path, rest = rest.split('!', 1) if '!' in rest else (rest, '')
discodb = DiscoDB.load(open(util.localize(path, disco_data=disco_data,
ddfs_data=ddfs_data)))
if rest:
method_name, arg = rest.split('/', 1) if '/' in rest else (rest, None)
method = getattr(discodb, method_name)
if method_name in ('metaquery', 'query'):
return method(Q.urlscan(arg))
return method(*filter(None, arg))
return discodb
def input_stream(fd, size, url, params):
return Open(url, task=globals().get('Task')), size, url
class DiscoDBOutput(object):
def __init__(self, stream, params):
from discodb import DiscoDBConstructor
self.discodb_constructor = DiscoDBConstructor()
self.stream = stream
self.params = params
self.path = stream.path
def add(self, key, val):
self.discodb_constructor.add(key, val)
def close(self):
def flags():
return dict((flag, getattr(self.params, flag))
for flag in ('unique_items', 'disable_compression')
if hasattr(self.params, flag))
self.discodb_constructor.finalize(**flags()).dump(self.stream)
def discodb_output(stream, partition, url, params):
return DiscoDBOutput(stream, params), 'discodb:{0}'.format(url.split(':', 1)[1])
discodb_stream = (task_output_stream, discodb_output)
|
py | 1a2f23b7b527dd66060e8951d37926856c5dc26e | """
Python interface module for OSQP solver v0.6.2.post5
"""
from __future__ import print_function
from builtins import object
import osqp._osqp as _osqp # Internal low level module
import numpy as np
import scipy.sparse as spa
from warnings import warn
from platform import system
import osqp.codegen as cg
import osqp.utils as utils
import sys
import qdldl
class OSQP(object):
def __init__(self):
self._model = _osqp.OSQP()
def version(self):
return self._model.version()
def setup(self, P=None, q=None, A=None, l=None, u=None, **settings):
"""
Setup OSQP solver problem of the form
minimize 1/2 x' * P * x + q' * x
subject to l <= A * x <= u
solver settings can be specified as additional keyword arguments
"""
# TODO(bart): this will be unnecessary when the derivative will be in C
self._derivative_cache = {'P': P, 'q': q, 'A': A, 'l': l, 'u': u}
unpacked_data, settings = utils.prepare_data(P, q, A, l, u, **settings)
self._model.setup(*unpacked_data, **settings)
def update(self, q=None, l=None, u=None,
Px=None, Px_idx=np.array([]), Ax=None, Ax_idx=np.array([])):
"""
Update OSQP problem arguments
"""
# get problem dimensions
(n, m) = self._model.dimensions()
# check consistency of the input arguments
if q is not None and len(q) != n:
raise ValueError("q must have length n")
if l is not None:
if not isinstance(l, np.ndarray):
raise TypeError("l must be numpy.ndarray, not %s" %
type(l).__name__)
elif len(l) != m:
raise ValueError("l must have length m")
# Convert values to -OSQP_INFTY
l = np.maximum(l, -_osqp.constant('OSQP_INFTY'))
if u is not None:
if not isinstance(u, np.ndarray):
raise TypeError("u must be numpy.ndarray, not %s" %
type(u).__name__)
elif len(u) != m:
raise ValueError("u must have length m")
# Convert values to OSQP_INFTY
u = np.minimum(u, _osqp.constant('OSQP_INFTY'))
if Ax is None:
if len(Ax_idx) > 0:
raise ValueError("Vector Ax has not been specified")
else:
if len(Ax_idx) > 0 and len(Ax) != len(Ax_idx):
raise ValueError("Ax and Ax_idx must have the same lengths")
if Px is None:
if len(Px_idx) > 0:
raise ValueError("Vector Px has not been specified")
else:
if len(Px_idx) > 0 and len(Px) != len(Px_idx):
raise ValueError("Px and Px_idx must have the same lengths")
if q is None and l is None and u is None and Px is None and Ax is None:
raise ValueError("No updatable data has been specified")
# update linear cost
if q is not None:
self._model.update_lin_cost(q)
# update lower bound
if l is not None and u is None:
self._model.update_lower_bound(l)
# update upper bound
if u is not None and l is None:
self._model.update_upper_bound(u)
# update bounds
if l is not None and u is not None:
self._model.update_bounds(l, u)
# update matrix P
if Px is not None and Ax is None:
self._model.update_P(Px, Px_idx, len(Px))
# update matrix A
if Ax is not None and Px is None:
self._model.update_A(Ax, Ax_idx, len(Ax))
# update matrices P and A
if Px is not None and Ax is not None:
self._model.update_P_A(Px, Px_idx, len(Px), Ax, Ax_idx, len(Ax))
# TODO(bart): this will be unnecessary when the derivative will be in C
# update problem data in self._derivative_cache
if q is not None:
self._derivative_cache["q"] = q
if l is not None:
self._derivative_cache["l"] = l
if u is not None:
self._derivative_cache["u"] = u
if Px is not None:
if Px_idx.size == 0:
self._derivative_cache["P"].data = Px
else:
self._derivative_cache["P"].data[Px_idx] = Px
if Ax is not None:
if Ax_idx.size == 0:
self._derivative_cache["A"].data = Ax
else:
self._derivative_cache["A"].data[Ax_idx] = Ax
# delete results from self._derivative_cache to prohibit
# taking the derivative of unsolved problems
if "results" in self._derivative_cache.keys():
del self._derivative_cache["results"]
def update_settings(self, **kwargs):
"""
Update OSQP solver settings
It is possible to change: 'max_iter', 'eps_abs', 'eps_rel',
'eps_prim_inf', 'eps_dual_inf', 'rho'
'alpha', 'delta', 'polish',
'polish_refine_iter',
'verbose', 'scaled_termination',
'check_termination', 'time_limit',
"""
# get arguments
max_iter = kwargs.pop('max_iter', None)
eps_abs = kwargs.pop('eps_abs', None)
eps_rel = kwargs.pop('eps_rel', None)
eps_prim_inf = kwargs.pop('eps_prim_inf', None)
eps_dual_inf = kwargs.pop('eps_dual_inf', None)
rho = kwargs.pop('rho', None)
alpha = kwargs.pop('alpha', None)
delta = kwargs.pop('delta', None)
polish = kwargs.pop('polish', None)
polish_refine_iter = kwargs.pop('polish_refine_iter', None)
verbose = kwargs.pop('verbose', None)
scaled_termination = kwargs.pop('scaled_termination', None)
check_termination = kwargs.pop('check_termination', None)
warm_start = kwargs.pop('warm_start', None)
time_limit = kwargs.pop('time_limit', None)
# update them
if max_iter is not None:
self._model.update_max_iter(max_iter)
if eps_abs is not None:
self._model.update_eps_abs(eps_abs)
if eps_rel is not None:
self._model.update_eps_rel(eps_rel)
if eps_prim_inf is not None:
self._model.update_eps_prim_inf(eps_prim_inf)
if eps_dual_inf is not None:
self._model.update_eps_dual_inf(eps_dual_inf)
if rho is not None:
self._model.update_rho(rho)
if alpha is not None:
self._model.update_alpha(alpha)
if delta is not None:
self._model.update_delta(delta)
if polish is not None:
self._model.update_polish(polish)
if polish_refine_iter is not None:
self._model.update_polish_refine_iter(polish_refine_iter)
if verbose is not None:
self._model.update_verbose(verbose)
if scaled_termination is not None:
self._model.update_scaled_termination(scaled_termination)
if check_termination is not None:
self._model.update_check_termination(check_termination)
if warm_start is not None:
self._model.update_warm_start(warm_start)
if time_limit is not None:
self._model.update_time_limit(time_limit)
if max_iter is None and \
eps_abs is None and \
eps_rel is None and \
eps_prim_inf is None and \
eps_dual_inf is None and \
rho is None and \
alpha is None and \
delta is None and \
polish is None and \
polish_refine_iter is None and \
verbose is None and \
scaled_termination is None and \
check_termination is None and \
warm_start is None:
raise ValueError("No updatable settings has been specified!")
def solve(self):
"""
Solve QP Problem
"""
# Solve QP
results = self._model.solve()
# TODO(bart): this will be unnecessary when the derivative will be in C
self._derivative_cache['results'] = results
return results
def warm_start(self, x=None, y=None):
"""
Warm start primal or dual variables
"""
# get problem dimensions
(n, m) = self._model.dimensions()
if x is not None:
if len(x) != n:
raise ValueError("Wrong dimension for variable x")
if y is None:
self._model.warm_start_x(x)
if y is not None:
if len(y) != m:
raise ValueError("Wrong dimension for variable y")
if x is None:
self._model.warm_start_y(y)
if x is not None and y is not None:
self._model.warm_start(x, y)
if x is None and y is None:
raise ValueError("Unrecognized fields")
def codegen(self, folder, project_type='', parameters='vectors',
python_ext_name='emosqp', force_rewrite=False, compile_python_ext=True,
FLOAT=False, LONG=True):
"""
Generate embeddable C code for the problem
"""
# Check parameters arguments
if parameters == 'vectors':
embedded = 1
elif parameters == 'matrices':
embedded = 2
else:
raise ValueError("Unknown value of 'parameters' argument.")
# Set float and long flags
if FLOAT:
float_flag = 'ON'
else:
float_flag = 'OFF'
if LONG:
long_flag = 'ON'
else:
long_flag = 'OFF'
# Check project_type argument
expectedProject = ('', 'Makefile', 'MinGW Makefiles',
'Unix Makefiles', 'CodeBlocks', 'Xcode')
if project_type not in expectedProject:
raise ValueError("Unknown value of 'project_type' argument.")
if project_type == 'Makefile':
if system() == 'Windows':
project_type = 'MinGW Makefiles'
elif system() == 'Linux' or system() == 'Darwin':
project_type = 'Unix Makefiles'
# Convert workspace to Python
sys.stdout.write("Getting workspace from OSQP object... \t\t\t\t")
sys.stdout.flush()
work = self._model._get_workspace()
print("[done]")
# Generate code with codegen module
cg.codegen(work, folder, python_ext_name, project_type, compile_python_ext,
embedded, force_rewrite, float_flag, long_flag)
def derivative_iterative_refinement(self, rhs, max_iter=20, tol=1e-12):
M = self._derivative_cache['M']
# Prefactor
solver = self._derivative_cache['solver']
sol = solver.solve(rhs)
for k in range(max_iter):
delta_sol = solver.solve(rhs - M @ sol)
sol = sol + delta_sol
if np.linalg.norm(M @ sol - rhs) < tol:
break
if k == max_iter - 1:
warn("max_iter iterative refinement reached.")
return sol
def adjoint_derivative(self, dx=None, dy_u=None, dy_l=None,
P_idx=None, A_idx=None, eps_iter_ref=1e-04):
"""
Compute adjoint derivative after solve.
"""
P, q = self._derivative_cache['P'], self._derivative_cache['q']
A = self._derivative_cache['A']
l, u = self._derivative_cache['l'], self._derivative_cache['u']
try:
results = self._derivative_cache['results']
except KeyError:
raise ValueError("Problem has not been solved. "
"You cannot take derivatives. "
"Please call the solve function.")
if results.info.status != "solved":
raise ValueError("Problem has not been solved to optimality. "
"You cannot take derivatives")
m, n = A.shape
x = results.x
y = results.y
y_u = np.maximum(y, 0)
y_l = -np.minimum(y, 0)
if A_idx is None:
A_idx = A.nonzero()
if P_idx is None:
P_idx = P.nonzero()
if dy_u is None:
dy_u = np.zeros(m)
if dy_l is None:
dy_l = np.zeros(m)
# Make sure M matrix exists
if 'M' not in self._derivative_cache:
# Multiply second-third row by diag(y_u)^-1 and diag(y_l)^-1
# to make the matrix symmetric
inv_dia_y_u = spa.diags(np.reciprocal(y_u + 1e-20))
inv_dia_y_l = spa.diags(np.reciprocal(y_l + 1e-20))
M = spa.bmat([
[P, A.T, -A.T],
[A, spa.diags(A @ x - u) @ inv_dia_y_u, None],
[-A, None, spa.diags(l - A @ x) @ inv_dia_y_l]
], format='csc')
delta = spa.bmat([[eps_iter_ref * spa.eye(n), None],
[None, -eps_iter_ref * spa.eye(2 * m)]],
format='csc')
self._derivative_cache['M'] = M
self._derivative_cache['solver'] = qdldl.Solver(M + delta)
rhs = - np.concatenate([dx, dy_u, dy_l])
r_sol = self.derivative_iterative_refinement(rhs)
r_x, r_yu, r_yl = np.split(r_sol, [n, n+m])
# Extract derivatives for the constraints
rows, cols = A_idx
dA_vals = (y_u[rows] - y_l[rows]) * r_x[cols] + \
(r_yu[rows] - r_yl[rows]) * x[cols]
dA = spa.csc_matrix((dA_vals, (rows, cols)), shape=A.shape)
du = - r_yu
dl = r_yl
# Extract derivatives for the cost (P, q)
rows, cols = P_idx
dP_vals = .5 * (r_x[rows] * x[cols] + r_x[cols] * x[rows])
dP = spa.csc_matrix((dP_vals, P_idx), shape=P.shape)
dq = r_x
return (dP, dq, dA, dl, du)
|
py | 1a2f24660a7e35fffbe34868aabb5aba18a8303c | """
Base interface for a reader class
"""
import numpy as np
import logging
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
logger = logging.getLogger(__name__)
import PyFLOTRAN.utils.SubFishModule as subfish
import seaborn as sns
class BaseReader:
data: np.ndarray # Hint of self.data array
info: dict
def __init__(self, filename=None, header=False, **kwargs):
self.filename = Path(filename)
self.info = {"reader": {}}
self.data = None
self.header = header
self.__dict__.update(kwargs)
self.open_file(filename)
def read_file(self, opened_file):
"""
Reads the data and stores it inside the class
:return:
"""
pass
def open_file(self, filename):
with open(filename) as opened_file:
if self.header:
opened_file.readline() # For now, skips the header if it has
self.read_file(opened_file)
self.build_info()
def read_header(self, opened_file):
"""
Reads the header of the file
:return:
"""
pass
def get_data(self) -> np.ndarray:
"""
Outputs the read data
:return:
"""
return np.array(0)
def build_info(self):
"""
Generates a dictionary containing the basic info of the read data
:return:
"""
self.info = {}
def global_coords_to_local(self, x_local_to_global, y_local_to_global):
"""Converts global data coordinates into local"""
assert len(self.data.shape) >= 2 and self.data.shape[1] >= 2, "Error in data shape"
self.data[:, 0] -= x_local_to_global
self.data[:, 1] -= y_local_to_global
def local_coords_to_global(self, x_local_to_global, y_local_to_global):
"""Converts local data coordinates into global"""
assert len(self.data.shape) >= 2 and self.data.shape[1] >= 2, "Error in data shape"
self.data[:, 0] += x_local_to_global
self.data[:, 1] += y_local_to_global
def dump_to_csv(self, output_file, delimiter=","):
"""
Writes the data into a csv file
:param output_file:
:return:
"""
print(f"Starting dump into {output_file}")
np.savetxt(output_file, self.get_data(), delimiter=delimiter)
print(f"The data has been properly exported to the {output_file} file")
def create_postprocess_dict(self):
self.postprocessing_dict = Path().cwd() / "postprocess"
self.postprocessing_dict.mkdir(exist_ok=True)
def generate_subfish_data(self, subfish_dict: dict, unit_factor=1/(365 * 24), unit_name='d') -> pd.DataFrame:
"""
This method reads a given subfish dict and returns the calculated results
Args:
subfish_dict: dictionary containing subfish module parameters
unit_factor: factor multiplyin the results (originally computed in seconds)
Returns:
Array containing the times and computed values
"""
if not subfish_dict:
raise AttributeError('Please, provide a suitable subfish dict object')
tang_sol = subfish.calculate_tang(subfish_dict)
tang_sol[0] *= unit_factor
tang_sol_pd = pd.DataFrame({f'Time [{unit_name}]': tang_sol[0],
'Result [M]': tang_sol[1]
}
)
return tang_sol_pd
|
py | 1a2f2588b7582d94c297c6556a389c0cf55ecef5 | #!/usr/bin/env python
from __future__ import with_statement, division
from Tkinter import Tk, Frame, Button, Canvas, Label, NW
from array import array
from PIL import Image as PILImage, ImageTk, ImageDraw
def tileFromPlanar(s, opaqueBase=0):
nPlanes = len(s) // 8
im = PILImage.new('P', (8, 8))
pixels = im.load()
if pixels is None:
print "Ouch!", repr(im)
for y in range(8):
planedata = [ord(c) for c in s[y::8]]
for x in range(8):
c = 0
for i in range(nPlanes):
if planedata[i] & (0x80 >> x):
c |= 1 << i
pixels[x, y] = c + opaqueBase if c > 0 else c
return im
def renderAttrPicker(tileImg, palette, value):
im = PILImage.new('P', (128, 32))
im.putpalette(palette)
for i in range(4):
x = i * 32
t = 0 if i == value else 3
im.paste(i * 4 + 1, (x + 16, t, x + 32 - t, 16))
im.paste(i * 4 + 2, (x + t, 16, x + 16, 32 - t))
im.paste(i * 4 + 3, (x + 16, 16, x + 32 - t, 32 - t))
im.paste(tileImg.resize((16, 16)), (value * 32, 0))
#im.show()
return im
def renderChrFile(tiledata, palette, opaqueBase=0):
tiles = [tiledata[i:i + 16] for i in range(0, len(tiledata), 16)]
rows = [tiles[i:i + 16] for i in range(0, len(tiles), 16)]
h = len(rows) * 8
w = 128
im = PILImage.new('P', (w, h))
y = 0
#palette = [0, 0, 0, 153, 102, 0, 102, 204, 0, 255, 204, 0]
im.putpalette(palette)
for row in rows:
x = 0
for tiledata in row:
tile = tileFromPlanar(tiledata, opaqueBase)
im.paste(tile, (x, y))
x += 8
y += 8
return im
class NamFile:
defaultNESPalette = \
"\x0f\x00\x10\x30\x0f\x12\x1a\x30\x0f\x1a\x2c\x30\x0f\x12\x14\x30"
nesclut = [
(0x80,0x80,0x80), (0x00,0x00,0xBB), (0x37,0x00,0xBF), (0x84,0x00,0xA6),
(0xBB,0x00,0x6A), (0xB7,0x00,0x1E), (0xB3,0x00,0x00), (0x91,0x26,0x00),
(0x7B,0x2B,0x00), (0x00,0x3E,0x00), (0x00,0x48,0x0D), (0x00,0x3C,0x22),
(0x00,0x2F,0x66), (0x00,0x00,0x00), (0x05,0x05,0x05), (0x05,0x05,0x05),
(0xC8,0xC8,0xC8), (0x00,0x59,0xFF), (0x44,0x3C,0xFF), (0xB7,0x33,0xCC),
(0xFF,0x33,0xAA), (0xFF,0x37,0x5E), (0xFF,0x37,0x1A), (0xD5,0x4B,0x00),
(0xC4,0x62,0x00), (0x3C,0x7B,0x00), (0x1E,0x84,0x15), (0x00,0x95,0x66),
(0x00,0x84,0xC4), (0x11,0x11,0x11), (0x09,0x09,0x09), (0x09,0x09,0x09),
(0xFF,0xFF,0xFF), (0x00,0x95,0xFF), (0x6F,0x84,0xFF), (0xD5,0x6F,0xFF),
(0xFF,0x77,0xCC), (0xFF,0x6F,0x99), (0xFF,0x7B,0x59), (0xFF,0x91,0x5F),
(0xFF,0xA2,0x33), (0xA6,0xBF,0x00), (0x51,0xD9,0x6A), (0x4D,0xD5,0xAE),
(0x00,0xD9,0xFF), (0x66,0x66,0x66), (0x0D,0x0D,0x0D), (0x0D,0x0D,0x0D),
(0xFF,0xFF,0xFF), (0x84,0xBF,0xFF), (0xBB,0xBB,0xFF), (0xD0,0xBB,0xFF),
(0xFF,0xBF,0xEA), (0xFF,0xBF,0xCC), (0xFF,0xC4,0xB7), (0xFF,0xCC,0xAE),
(0xFF,0xD9,0xA2), (0xCC,0xE1,0x99), (0xAE,0xEE,0xB7), (0xAA,0xF7,0xEE),
(0xB3,0xEE,0xFF), (0xDD,0xDD,0xDD), (0x11,0x11,0x11), (0x11,0x11,0x11)
]
def __init__(self, chrfilename, palfilename='', namfilename=None):
self.loadchr(chrfilename)
self.loadpal(palfilename)
self.loadnam(namfilename)
def loadchr(self, chrfilename):
with open(chrfilename, 'rb') as infp:
chrdata = infp.read(4096)
self.chrdata = chrdata
if len(self.chrdata) != 4096:
raise ValueError("not enough data for pattern table")
def loadpal(self, palfilename):
self.palfilename = palfilename
try:
with open(palfilename, 'rb') as infp:
pal = infp.read(16)
if len(paldata) != 16:
raise ValueError("not enough data for palette")
except IOError, e:
import errno
if e.errno in (errno.ENOENT, errno.EINVAL):
pal = self.defaultNESPalette
else:
raise
self.clut = []
for c in pal:
self.clut.extend(self.nesclut[ord(c) & 0x3F])
def loadnam(self, namfilename):
print "namfilename is", namfilename
if namfilename is not None:
try:
with open(namfilename, 'rb') as infp:
namdata = infp.read(1152)
if ((len(namdata) != 1024
and namdata.startswith('\x04\x00'))
or namfilename.lower().endswith('.pkb')):
print "unpacking"
namdata = UnPackBits(namdata[2:]).flush().tostring()
if len(namdata) != 1024:
raise ValueError("not enough data for nametable")
self.namdata = array('B', namdata)
except IOError, e:
import errno
if e.errno == errno.ENOENT:
namfilename = None
else:
raise
if namfilename is None:
self.namdata = array('B', [0 for i in range(1024)])
self.namfilename = namfilename
self.setUnsaved(False)
def setUnsaved(self, isSaved):
import datetime
self.unsaved = (datetime.datetime.now()
if isSaved
else False)
def savenam(self, namfilename=None):
if namfilename is None:
namfilename = self.namfilename
s = self.namdata.tostring()
if namfilename.lower().endswith('.pkb'):
s = "\x04\x00" + PackBits(s).flush().tostring()
with open(namfilename, 'wb') as outfp:
outfp.write(s)
self.namfilename = namfilename
self.setUnsaved(False)
def getTile(self, x, y):
if x < 0 or x >= 32 or y < 0 or y >= 30:
return None
nameIdx = y * 32 + x
tileNo = self.namdata[nameIdx]
attrIdx = (y >> 2) * 8 + (x >> 2) + 960
attrShift = ((y & 0x02) << 1) | (x & 0x02)
attrNo = (self.namdata[attrIdx] >> attrShift) & 0x03
return (tileNo, attrNo)
def setTile(self, x, y, tileNo, attrNo=None):
if x < 0 or x >= 32 or y < 0 or y >= 30:
return
nameIdx = y * 32 + x
self.namdata[nameIdx] = tileNo
if attrNo is not None:
attrIdx = (y >> 2) * 8 + (x >> 2) + 960
attrShift = ((y & 0x02) << 1) | (x & 0x02)
attrByte = (attrNo & 0x03) << attrShift
attrByte |= self.namdata[attrIdx] & ~(0x03 << attrShift)
self.namdata[attrIdx] = attrByte
def getTileData(self, tileNo):
return self.chrdata[tileNo * 16:tileNo * 16 + 16]
def renderTile(self, tileNo, attrNo):
return tileFromPlanar(self.getTileData(tileNo), attrNo * 4)
def build_menubar(parent, mbardata):
from Tkinter import Menu
menubar = Menu(parent)
parent.config(menu=menubar)
menus = []
for (label, items) in mbardata:
menu = Menu(menubar)
menus.append(menu)
menubar.add_cascade(label=label, menu=menu)
for item in items:
if item == '-':
menu.add_separator()
else:
label = item[0]
underline = label.find('&')
if underline >= 0:
label = label[:underline] + label[underline+1:]
else:
underline = None
accelerator = item[2] if len(item) > 2 else None
menu.add_command(label=label, command=item[1],
accelerator=accelerator,
underline=underline)
return (menubar, menus)
class TilePicker(Frame):
def __init__(self, parent, doc, **kw):
apply(Frame.__init__, (self, parent), kw)
self.doc = doc
self.tilePicker = None
self.attrPicker = None
self.status = None
self.curTile = 0
self.setAttribute(0)
self.tilePicker = Label(self, image=self.tilePickerPI,
width=128, borderwidth=0)
self.tilePicker.grid(row=0,column=0)
self.tilePicker.bind("<Button-1>", self.tilePickerCallback)
self.attrPicker = Label(self, image=self.attrPickerPI,
borderwidth=0)
self.attrPicker.grid(row=1,column=0)
self.attrPicker.bind("<Button-1>", self.attrPickerCallback)
self.status = Label(self)
self.status.grid(row=2,column=0)
self.setStatus()
def setAttribute(self, value):
self.curAttribute = value & 0x03
self.updateWidgets()
def updateWidgets(self):
self.tilePickerImage = renderChrFile(self.doc.chrdata,
self.doc.clut,
self.curAttribute * 4)
self.tilePickerPI = ImageTk.PhotoImage(self.tilePickerImage)
if self.tilePicker is not None:
self.tilePicker.configure(image=self.tilePickerPI)
previewTile = self.doc.renderTile(self.curTile, self.curAttribute)
self.attrPickerImage = renderAttrPicker(previewTile,
self.doc.clut,
self.curAttribute)
self.attrPickerPI = ImageTk.PhotoImage(self.attrPickerImage)
if self.attrPicker is not None:
self.attrPicker.configure(image=self.attrPickerPI)
self.setStatus()
def setTile(self, tile):
self.curTile = tile
self.setAttribute(self.curAttribute)
def setStatus(self):
if self.status is None:
return
label = "tile $%02x attr %d" % (self.curTile, self.curAttribute)
self.status.configure(text=label)
def tilePickerCallback(self, event):
if event.x >= 0 and event.x < 128 and event.y >= 0 and event.y < 128:
tileX = event.x // 8
tileY = event.y // 8
newTileNo = tileY * 16 + tileX
#print "mouse was clicked on tile", newTileNo
self.setTile(newTileNo)
return
print "mouse was clicked at (%d, %d)" % (event.x, event.y)
def attrPickerCallback(self, event):
if event.x >= 0 and event.x < 128:
attr = event.x // 32
#print "mouse was clicked on attribute", attr
self.setAttribute(attr)
return
print "mouse was clicked at (%d, %d)" % (event.x, event.y)
class NamDisplay(Canvas):
def __init__(self, parent, doc, **kw):
kw['width'] = 512
kw['height'] = 480
kw['relief']='raised'
kw['highlightthickness'] = 0
apply(Canvas.__init__, (self, parent), kw)
self.doc = doc
self.tile = []
im = PILImage.new('RGB', (32, 32))
for y in range(15):
row = []
for x in range(16):
tile = ImageTk.PhotoImage(im)
if True or ((x ^ y) & 1) == 0:
self.create_image(x * 32, y * 32, image=tile, anchor=NW)
row.append(tile)
self.tile.append(row)
self.updating = False
self.updScreen()
def updScreen(self):
self.updating = True
for y in range(0, 30, 2):
for x in range(0, 32, 2):
self.updTile(x, y)
self.updating = False
self.update_idletasks()
def updTile(self, x, y):
if x < 0 or x >= 32 or y < 0 or y >= 30:
return
y = y & ~1
x = x & ~1
im = PILImage.new('RGB', (32, 32))
dst = self.tile[y >> 1][x >> 1]
for y1 in range(2):
for x1 in range(2):
(tileNo, attrNo) = self.doc.getTile(x + x1, y + y1)
tile = self.doc.renderTile(tileNo, attrNo).resize((16, 16))
tile.putpalette(self.doc.clut)
im.paste(tile, (x1 * 16, y1 * 16))
dst.paste(im)
if not self.updating:
self.update_idletasks()
class PackBits():
def __init__(self, toWrite=''):
self.bytes = array('b')
self.closed = False
self.mode = 'wb'
self.name = '<PackBits>'
self.newlines = None
if toWrite:
self.write(toWrite)
def close(self):
self.bytes = None
self.closed = True
def write(self, s):
"""Add a string to the buffer."""
if not self.closed:
self.bytes.fromstring(s)
def tell(self):
return len(self.bytes)
def truncate(self, length):
if not self.closed:
del self[length:]
def writelines(self, seq):
"""Add a sequence of strings to the buffer."""
self.write(''.join(seq))
def flush(self):
"""Compress the data to a file."""
i = 0
base = 0
out = array('b')
while base < len(self.bytes):
# measure the run starting at t
i = 1
imax = min(128, len(self.bytes) - base)
basebyte = self.bytes[base]
while (i < imax
and basebyte == self.bytes[base + i]):
i += 1
# if the run is either length 3 or to the end of the file,
# write it
if i > 2 or base + i == len(self.bytes):
out.append(1 - i)
out.append(self.bytes[base])
base += i
continue
# measure the nonrun starting at t
i = 1
imax = min(128, len(self.bytes) - base)
while (i < imax
and (base + i + 2 >= len(self.bytes)
or self.bytes[base + i] != self.bytes[base + i + 1]
or self.bytes[base + i] != self.bytes[base + i + 2])):
i += 1
out.append(i - 1)
out.extend(self.bytes[base:base + i])
base += i
return out
@staticmethod
def test():
pb = PackBits('stopping stoppping stopppppi')
data = pb.flush()
print repr(data)
class UnPackBits(PackBits):
def flush(self):
out = array('b')
base = 0
while base < len(self.bytes):
c = self.bytes[base]
if c > 0 and c <= 127:
b = self.bytes[base + 1]
out.extend(self.bytes[base + 1:base + c + 2])
base += 2 + c
elif c >= -127:
b = self.bytes[base + 1]
out.fromlist([b] * (1 - c))
base += 2
return out
@staticmethod
def test():
start = 'stopping stoppping stopppppi'
packed = PackBits(start).flush().tostring()
print repr(packed)
unpacked = UnPackBits(packed).flush().tostring()
print repr(unpacked)
print "pass" if start == unpacked else "fail"
class App:
filetypes = [
('NES nametable', '*.nam'),
('NES compressed nametable', '*.pkb'),
('PNG image', '*.png'),
('GIF image', '*.gif'),
('Windows bitmap', '*.bmp')
]
def __init__(self, w, doc):
import sys
self.window = w
self.doc = doc
mbardata = [
("File", [
("&New Nametable", lambda: self.file_new_nam(), "Ctrl+N"),
("&Open Nametable...", lambda: self.file_open_nam(), "Ctrl+O"),
("Open &Pattern Table...", lambda: self.file_open_chr(), "Ctrl+L"),
'-',
("&Save", lambda: self.file_save_nam(), "Ctrl+S"),
("Save &As...", lambda: self.file_save_nam_as(), "Ctrl+A"),
'-',
("E&xit", lambda: self.file_quit(), "Ctrl+Q")
]),
("Help", [
("&About...", lambda: self.about())
])
]
(menubar, menus) = build_menubar(w, mbardata)
w.bind("<Control-n>", lambda e: self.file_new_nam())
w.bind("<Control-N>", lambda e: self.file_new_nam())
w.bind("<Control-o>", lambda e: self.file_open_nam())
w.bind("<Control-O>", lambda e: self.file_open_nam())
w.bind("<Control-l>", lambda e: self.file_open_chr())
w.bind("<Control-L>", lambda e: self.file_open_chr())
w.bind("<Control-s>", lambda e: self.file_save_nam())
w.bind("<Control-S>", lambda e: self.file_save_nam())
w.bind("<Control-q>", lambda e: self.file_quit())
w.bind("<Control-Q>", lambda e: self.file_quit())
self.tilePicker = TilePicker(w, doc)
self.tilePicker.grid(row=0,column=0, sticky=NW)
self.namDisplay = NamDisplay(w, doc, borderwidth=0)
self.namDisplay.grid(row=0,column=1)
self.namDisplay.bind("<Control-Button-1>", self.namPickupCallback)
self.namDisplay.bind("<Control-B1-Motion>", self.namPickupCallback)
self.namDisplay.bind("<Button-1>", self.namWriteCallback)
self.namDisplay.bind("<B1-Motion>", self.namWriteCallback)
w.wm_resizable(0,0)
self.updWindowTitle()
def namPickupCallback(self, event):
if event.x >= 0 and event.x < 512 and event.y >= 0 and event.y < 512:
x = event.x // 16
y = event.y // 16
(tile, attribute) = self.doc.getTile(x, y)
self.tilePicker.curTile = tile
self.tilePicker.setAttribute(attribute)
return
def namWriteCallback(self, event):
if event.x >= 0 and event.x < 512 and event.y >= 0 and event.y < 512:
x = event.x // 16
y = event.y // 16
t = self.tilePicker.curTile
a = self.tilePicker.curAttribute
self.doc.setTile(x, y, t, a)
if not self.doc.unsaved:
self.doc.setUnsaved(True)
self.updWindowTitle()
self.namDisplay.updTile(x, y)
return
def updWindowTitle(self):
nfn = self.doc.namfilename
if nfn is None:
nfn = 'untitled'
if self.doc.unsaved:
nfn = '*' + nfn
appname = '8name II'
title = ' - '.join((nfn, appname))
self.window.title(title)
def file_new_nam(self):
print "File > New Nametable"
def file_open_nam(self):
from tkFileDialog import askopenfilename
filename = askopenfilename(parent=root,
filetypes=self.filetypes,
initialfile=self.doc.namfilename,
title="Open Nametable")
print "file open nam: filename is", filename
if not isinstance(filename, basestring):
return
self.doc.loadnam(filename)
self.namDisplay.updScreen()
self.updWindowTitle()
def file_open_chr(self):
from tkFileDialog import askopenfilename
filename = askopenfilename(parent=root,
filetypes=[('Pattern Table', '*.chr')],
initialfile=self.doc.namfilename,
title="Open Pattern Table")
if not isinstance(filename, str):
return
self.doc.loadchr(filename)
self.tilePicker.updateWidgets()
self.namDisplay.updScreen()
def file_save_nam(self):
if self.doc.namfilename is None:
return self.file_save_nam_as()
self.doc.savenam()
self.updWindowTitle()
def file_save_nam_as(self):
from tkFileDialog import asksaveasfilename
filename = asksaveasfilename(parent=root,
filetypes=self.filetypes,
title="Save Nametable As")
ext = filename[-4:].lower()
if ext in ('.png', '.gif', '.bmp'):
print "Would save image to", filename
else:
self.doc.savenam(filename)
self.updWindowTitle()
def file_quit(self):
self.window.destroy()
root = Tk()
app = App(root, NamFile('../spritecans.chr'))
root.mainloop()
print "remain:"
print "1. implement image saving"
print "2. implement and test loading"
print "3. implement and test compressed pkb support"
print "4. Implement stub methods for File menu items"
print "5. Warn on closing document where doc.unsaved is not False"
print "6. Write palette editor"
|
py | 1a2f2709e216f80da7171499ad9a4b12bdbb6cb6 | # Write a Python program to find out the number of CPUs using.
import multiprocessing
print(multiprocessing.cpu_count())
|
py | 1a2f275364493555282f3be31fdd27bafcd03635 | from denoising_diffusion_pytorch import Unet, GaussianDiffusion, Trainer
import os
def main(train):
if train==True:
model = Unet(
dim = 128,
dim_mults = (1, 2, 2, 2)
).cuda()
diffusion = GaussianDiffusion(
model,
image_size = 32,
timesteps = 1000, # number of steps
loss_type = 'l2' # L1 or L2
).cuda()
trainer = Trainer(
diffusion,
'/home/congen/code/geoml_gan/data/cifar10',
train=True,
dataset_name='cifar10',
image_size=32,
train_batch_size = 64,
train_lr = 2e-4,
train_num_steps = 500001, # total training steps
gradient_accumulate_every = 2, # gradient accumulation steps
ema_decay = 0.9999, # exponential moving average decay
fp16 = True # turn on mixed precision training with apex
)
#trainer.load(20)
trainer.train()
else:
model = Unet(
dim=128,
dim_mults=(1, 2, 2, 2)
).cuda()
diffusion = GaussianDiffusion(
model,
image_size=32,
timesteps=1000, # number of steps
loss_type='l2' # L1 or L2
).cuda()
trainer = Trainer(
diffusion,
'/home/congen/code/geoml_gan/data/cifar10',
train=False,
dataset_name='cifar10',
image_size=32,
train_batch_size=64,
train_lr=2e-4,
train_num_steps=200001, # total training steps
gradient_accumulate_every=2, # gradient accumulation steps
ema_decay=0.9999, # exponential moving average decay
fp16=True # turn on mixed precision training with apex
)
trainer.test()
"""
Usage:
export CUDA_VISIBLE_DEVICES=2
export PORT=6006
export CUDA_HOME=/opt/cuda/cuda-10.2
export TIME_STR=1
python train.py
:return:
"""
if __name__ == '__main__':
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
main(False) |
py | 1a2f27d2d5ea5b56f238e84a811160b2415bf74c | # get human_ebv_tpms.py
import pandas as pd
import argparse
import os
import math
import datetime
import subprocess
# get basename from a file and path string
def get_basename(filepath):
import os
return os.path.basename(os.path.splitext(filepath)[0])
# get and format output directory
def format_odir(odir):
import os
cwd = os.getcwd()
if odir != '':
# if first character is not /, use cwd to make this an absolute path
if odir[0] != '/' and odir[0] != '~':
odir = cwd+odir
if odir[-1] != '/':
odir += '/'
return odir
# make a dated output directory for the files used for the tracks
def make_dated_folder(odir, bname):
date = datetime.datetime.now()
date = date.strftime('%y%m%d')
odir = odir+date+'_'+bname+'_figures/'
if not os.path.isdir(odir):
print('Making output directory '+odir)
os.makedirs(odir)
return odir
# get value associated with keyword in the 9th column of gtf
def get_field_value(key, fields):
if key not in fields:
return None
else:
return fields.split(key+' "')[1].split()[0].replace('";','')
# calculate tpm for a column in the abundance table
def get_tpm(df, col):
new_col = 'TPM_'+col
total_reads = df[d].sum()
df[new_col] = df.apply(lambda x: float(x[d]*1000000)/total_reads, axis=1)
return new_col, df
# calculate tpm for a column in the abundance table
def get_log_tpm(df, col, gene):
tpm_col = 'TPM_'+col
if not gene:
new_col = 'log_'+tpm_col
else:
new_col = 'gene_log_'+TPM_col
df[new_col] = df.apply(lambda x: math.log2(x[tpm_col]+1), axis=1)
return new_col, df
# get gtf file name
parser = argparse.ArgumentParser(description='removes EBV transcripts from GTF file')
parser.add_argument('--human_gtf', help='GTF with human and EBV data')
parser.add_argument('--human_filt_ab', help='Filtered abundance file with human and EBV data')
parser.add_argument('--human_ab', help='Unfiltered abundance file with human and EBV data')
parser.add_argument('--ebv_filt_ab', help='EBV only filtered abundance file')
parser.add_argument('--ebv_ab', help='EBV only unfiltered abundance file')
parser.add_argument('--datasets', help='Comma-separated list of dataset names to use for human+ebv data')
parser.add_argument('--o', help='Prefix for output file')
args = parser.parse_args()
full_gtf = args.human_gtf
full_ab = args.human_filt_ab
full_unf_ab = args.human_ab
ebv_ab = args.ebv_filt_ab
ebv_unf_ab = args.ebv_ab
my_datasets = args.datasets.split(',')
oprefix = args.o
# get all human transcript ids
infile = open(full_gtf, 'r')
human_tids = []
ebv_tids = []
for i, line in enumerate(infile):
line = line.replace('\n', '')
temp = line.split('\t')
fields = temp[-1]
if temp[0] != 'chrEBV' and temp[2] == 'transcript':
human_tids.append(get_field_value('talon_transcript', fields))
elif temp[0] == 'chrEBV' and temp[2] == 'transcript':
ebv_tids.append(get_field_value('talon_transcript', fields))
full_df = pd.read_csv(full_ab, sep='\t')
ebv_df = pd.read_csv(ebv_ab, sep='\t')
# reformat human table
# dc_datasets = ['D4', 'D5', 'D10', 'D11']
datasets = my_datasets
# full_df.drop(dc_datasets, inplace=True, axis=1) # drop datasets we don't want
full_df = full_df.loc[full_df[datasets].sum(axis=1) != 0] # drop transcripts with no reads in datasets we do want
full_df = full_df.loc[full_df['transcript_ID'].isin(human_tids)] # drop ebv transcripts
full_df['ebv'] = 'Human' # give human/ebv designation
full_df = full_df.loc[full_df.transcript_novelty != 'Genomic']
# drop genomic transcripts from the ebv dataset (b/c it's not pre-filtered)
ebv_df = ebv_df.loc[ebv_df.transcript_novelty != 'Genomic']
ebv_df['ebv'] = 'EBV' # human/ebv designation
# merge transcript df so TPMs can be calculated correctly
t_df = pd.concat([full_df, ebv_df])
# combine datasets
combine_datasets = True
if combine_datasets:
t_df['combined'] = t_df[my_datasets].sum(axis=1)
datasets = ['combined']
# # make sure the concatenation worked
# print(t_df.loc[t_df['transcript_ID'] == 121].head())
# print(ebv_df.loc[ebv_df['transcript_ID'] == 121].head())
# print(full_df.loc[full_df['transcript_ID'] == 121].head())
# get tpms and number of human transcripts for
# each dataset and for full/ebv
for d in datasets:
# raw TPM
tpm, t_df = get_tpm(t_df, d)
# log2TPM
log, t_df = get_log_tpm(t_df, d, 0)
# sanity check - sum of all TPMs for each sample
print('TPM total for {}: {}'.format(d, str(t_df[tpm].sum())))
human_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'Human')]
ebv_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'EBV')]
n_human_transcripts = len(human_df.index)
n_ebv_transcripts = len(ebv_df.index)
print('Number of human transcripts in {}: {}'.format(d, str(n_human_transcripts)))
print('Number of EBV transcripts in {}: {}'.format(d, str(n_ebv_transcripts)))
# add columns for number of dataset human/ebv transcripts
n_transcripts_col = 'n_'+d
t_df[n_transcripts_col] = t_df.apply(lambda x:\
n_human_transcripts if x['ebv'] == 'Human' else n_ebv_transcripts, axis=1)
# add heights geom_text locations for dataset/human/ebv transcripts
human_height = t_df.loc[t_df.ebv == 'Human'][log].max()+1
ebv_height = t_df.loc[t_df.ebv == 'EBV'][log].max()+1
height_col = d+'_height'
t_df[height_col] = t_df.apply(lambda x:\
human_height if x.ebv == 'Human' else ebv_height, axis=1)
# print(human_height)
# print(ebv_height)
# print(t_df.head())
# print(t_df.tail())
# write gene and transcript tables to a csv
t_df['dot_size'] = t_df.apply(lambda x: 1 if x['ebv'] == 'EBV' else 0.6, axis=1)
t_df['alpha'] = t_df.apply(lambda x: 0.5 if x['ebv'] == 'EBV' else 0.2, axis=1)
# bname = get_basename(ebv_ab)
# odir = format_odir(os.path.dirname(ebv_ab))
# odir = make_dated_folder(odir,bname)
to = oprefix+'_ebv_human_transcript_abundance.csv'
t_df.to_csv(to, sep=',', index=False)
## get transcript tpms without filtering for bioreps to use for gene tpms
# read in the unfiltered datasets
full_df = pd.read_csv(full_unf_ab, sep='\t')
ebv_df = pd.read_csv(ebv_unf_ab, sep='\t')
# reformat human table
# dc_datasets = ['D4', 'D5', 'D10', 'D11']
datasets = my_datasets
# full_df.drop(dc_datasets, inplace=True, axis=1) # drop datasets we don't want
full_df = full_df.loc[full_df['transcript_ID'].isin(human_tids)] # drop ebv transcripts
full_df['ebv'] = 'Human' # give human/ebv designation
full_df = full_df.loc[full_df.transcript_novelty != 'Genomic']
# drop genomic transcripts from the ebv dataset (b/c it's not pre-filtered)
ebv_df = ebv_df.loc[ebv_df.transcript_novelty != 'Genomic']
ebv_df['ebv'] = 'EBV' # human/ebv designation
# merge transcript df so TPMs can be calculated correctly
t_df = pd.concat([full_df, ebv_df])
# combine datasets
combine_datasets = True
if combine_datasets:
t_df['combined'] = t_df[datasets].sum(axis=1)
datasets = ['combined']
# # make sure the concatenation worked
# print(t_df.loc[t_df['transcript_ID'] == 121].head())
# print(ebv_df.loc[ebv_df['transcript_ID'] == 121].head())
# print(full_df.loc[full_df['transcript_ID'] == 121].head())
# get tpms and number of human transcripts for
# each dataset and for full/ebv
for d in datasets:
# raw TPM
tpm, t_df = get_tpm(t_df, d)
# log2TPM
log, t_df = get_log_tpm(t_df, d, 0)
# sanity check - sum of all TPMs for each sample
print('TPM total for {}: {}'.format(d, str(t_df[tpm].sum())))
human_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'Human')]
ebv_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'EBV')]
n_human_transcripts = len(human_df.index)
n_ebv_transcripts = len(ebv_df.index)
print('Number of human transcripts in {}: {}'.format(d, str(n_human_transcripts)))
print('Number of EBV transcripts in {}: {}'.format(d, str(n_ebv_transcripts)))
# add columns for number of dataset human/ebv transcripts
n_transcripts_col = 'n_'+d
t_df[n_transcripts_col] = t_df.apply(lambda x:\
n_human_transcripts if x['ebv'] == 'Human' else n_ebv_transcripts, axis=1)
# add heights geom_text locations for dataset/human/ebv transcripts
human_height = t_df.loc[t_df.ebv == 'Human'][log].max()+1
ebv_height = t_df.loc[t_df.ebv == 'EBV'][log].max()+1
height_col = d+'_height'
t_df[height_col] = t_df.apply(lambda x:\
human_height if x.ebv == 'Human' else ebv_height, axis=1)
# get gene tpms
cols = []
for d in datasets:
cols.append(d)
cols.append('TPM_'+d)
g_df = t_df.groupby(['gene_ID', 'gene_novelty', 'ebv'])[cols].sum()
g_df.reset_index(inplace=True)
# # make sure the groupby worked
# print(g_df.loc[g_df['gene_ID'] == 16].head())
# print(t_df.loc[t_df['gene_ID'] == 16].head())
# print(t_df.loc[t_df['gene_ID'] == 16].head())
# get tpms, heights, and numbers for gene
for d in datasets:
# log2TPM
log, g_df = get_log_tpm(g_df, d, 0)
human_df = g_df.loc[(g_df[d] != 0) & (g_df['ebv'] == 'Human')]
ebv_df = g_df.loc[(g_df[d] != 0) & (g_df['ebv'] == 'EBV')]
n_human_genes = len(human_df.index)
n_ebv_genes = len(ebv_df.index)
print('Number of human genes in {}: {}'.format(d, str(n_human_genes)))
print('Number of EBV genes in {}: {}'.format(d, str(n_ebv_genes)))
# add columns for number of dataset human/ebv genes
n_genes_col = 'n_'+d
g_df[n_genes_col] = g_df.apply(lambda x:\
n_human_genes if x['ebv'] == 'Human' else n_ebv_genes, axis=1)
# add heights geom_text locations for dataset/human/ebv transcripts
human_height = g_df.loc[g_df.ebv == 'Human'][log].max()+0.4
ebv_height = g_df.loc[g_df.ebv == 'EBV'][log].max()+0.4
height_col = d+'_height'
g_df[height_col] = g_df.apply(lambda x:\
human_height if x.ebv == 'Human' else ebv_height, axis=1)
print(human_height)
print(ebv_height)
print(g_df.head())
print(g_df.tail())
# add different dot sizes for human/ebv
# t_df['dot_size'] = t_df.apply(lambda x: 1 if x['ebv'] == 'EBV' else 0.6, axis=1)
g_df['dot_size'] = g_df.apply(lambda x: 1 if x['ebv'] == 'EBV' else 0.6, axis=1)
# t_df['alpha'] = t_df.apply(lambda x: 0.5 if x['ebv'] == 'EBV' else 0.2, axis=1)
g_df['alpha'] = g_df.apply(lambda x: 0.5 if x['ebv'] == 'EBV' else 0.2, axis=1)
# # rename gene/transcript novelty columns
# t_df.rename(index=str, columns={\
# 'transcript_novelty':'Isoform Type'}, inplace=True)
# g_df.rename(index=str, columns={\
# 'gene_novelty':'Gene Type'}, inplace=True)
# write gene table to a csv
go = oprefix+'_ebv_human_gene_abundance.csv'
g_df.to_csv(go, sep=',', index=False)
# make graphs
cmd = 'Rscript plot_ebv_v_human_abundances.R --gene_csv {} --transcript_csv {}'\
' --datasets {}'.format(go, to, ','.join(datasets))
# print(cmd)
|
py | 1a2f27ec83540a6ba9ef180cbaaf5615dbe3c9a6 | #!/usr/bin/env python
"""
Normalize site observed ancestry by genome-wide average.
2*ID_average - ExpectedCopiesOfPop1Ancestry.
@Author: [email protected]
Usage:
HapmixEPop1NormalizedByIDAverage.py -a IDaverage
HapmixEPop1NormalizedByIDAverage.py -h | --help | -v | --version | -f | --format
Notes:
1. Read ExpectedCopiesOfPop1Ancestry from stdin, and output to stdout.
Options:
-a IDaverage Individual average for pop1, one line one person.
-h --help Show this screen.
-v --version Show version.
-f --format Show input/output file format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
def ShowFormat():
'''File format example'''
print('''
#ExpectedCopiesOfPop1Ancestry, one column each person.
------------------------
1 1 2 1 2 2
1 2 2 1 2 2
#id average:
------------------------
0.8
0.5
0.1
#output:
------------------------
0.40 0.00 -0.20
-0.60 0.00 -0.20
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
# print(args)
# sys.exit(0)
if(args['--format']):
ShowFormat()
sys.exit(-1)
idav = [] # 2*ID_average
with open(args['-a'],'r') as ifile:
for line in ifile:
line = line.strip()
if line:
idav.append(2 * float(line))
checkLen = True
for line in sys.stdin:
line = line.strip()
if line:
ss = line.split()
ss = [float(x) for x in ss] #number of pop1 copy for each person.
if checkLen:
if len(ss) != len(idav):
sys.stderr.write('Error: numbr of individuals in ID_average file is not the same as that in sys.stdin.\n')
sys.exit(-1)
else:
checkLen = False
out = [ '%.2f'%(y-x) for x,y in zip(idav, ss)]
sys.stdout.write('%s\n'%('\t'.join(out)))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
|
py | 1a2f2822f81fe36b28f6dd2487506ab31227c11b | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for execution_util.py."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import subprocess
from unittest import mock
from gslib import exception
from gslib.tests import testcase
from gslib.utils import execution_util
class TestExecutionUtil(testcase.GsUtilUnitTestCase):
"""Test execution utils."""
@mock.patch.object(subprocess, 'Popen')
def testExternalCommandReturnsNoOutput(self, mock_Popen):
mock_command_process = mock.Mock()
mock_command_process.returncode = 0
mock_command_process.communicate.return_value = (None, None)
mock_Popen.return_value = mock_command_process
stdout, stderr = execution_util.ExecuteExternalCommand(['fake-command'])
self.assertIsNone(stdout)
self.assertIsNone(stderr)
mock_Popen.assert_called_once_with(['fake-command'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@mock.patch.object(subprocess, 'Popen')
def testExternalCommandReturnsStringOutput(self, mock_Popen):
mock_command_process = mock.Mock()
mock_command_process.returncode = 0
mock_command_process.communicate.return_value = ('a', 'b')
mock_Popen.return_value = mock_command_process
stdout, stderr = execution_util.ExecuteExternalCommand(['fake-command'])
self.assertEqual(stdout, 'a')
self.assertEqual(stderr, 'b')
mock_Popen.assert_called_once_with(['fake-command'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@mock.patch.object(subprocess, 'Popen')
def testExternalCommandReturnsBytesOutput(self, mock_Popen):
mock_command_process = mock.Mock()
mock_command_process.returncode = 0
mock_command_process.communicate.return_value = (b'a', b'b')
mock_Popen.return_value = mock_command_process
stdout, stderr = execution_util.ExecuteExternalCommand(['fake-command'])
self.assertEqual(stdout, 'a')
self.assertEqual(stderr, 'b')
mock_Popen.assert_called_once_with(['fake-command'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@mock.patch.object(subprocess, 'Popen')
def testExternalCommandReturnsNoOutput(self, mock_Popen):
mock_command_process = mock.Mock()
mock_command_process.returncode = 1
mock_command_process.communicate.return_value = (None, b'error')
mock_Popen.return_value = mock_command_process
with self.assertRaises(exception.ExternalBinaryError):
execution_util.ExecuteExternalCommand(['fake-command'])
mock_Popen.assert_called_once_with(['fake-command'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@mock.patch.object(subprocess, 'Popen')
def testExternalCommandRaisesFormattedStderr(self, mock_Popen):
mock_command_process = mock.Mock()
mock_command_process.returncode = 1
mock_command_process.communicate.return_value = (None, b'error.\n')
mock_Popen.return_value = mock_command_process
with self.assertRaisesRegexp(exception.ExternalBinaryError, 'error'):
execution_util.ExecuteExternalCommand(['fake-command'])
|
py | 1a2f29c011035f3771d9e24758c92c2c15286a7c | """Collection of services."""
from typing import Any, Dict, List, Optional
from fastapi import HTTPException, status
from tortoise import QuerySet
from teached.users.models import Teacher
from .models import ( # noqa I202
Announcement,
Assignment,
BookMark,
Category,
Course,
CourseDetailPydantic,
Enrollment,
Language,
Lecture,
Requirement,
Review,
Section,
)
from .schema import CourseDetail
from .utils import unique_slug
async def create_course(*, data: Dict, teacher: Teacher) -> str:
"""Create new course.
Args:
data: Dict of new user info.
teacher: Teacher model instance.
Returns:
Slug of the course
"""
languages = data.pop("languages")
categories = data.pop("categories")
requirements = data.pop("requirements")
course = Course(**data, teacher=teacher)
# TODO: change this to signal
course.slug = unique_slug(title=data.get("title"))
await course.save()
for language in languages:
value, created = await Language.get_or_create(name=language.capitalize())
await course.languages.add(value)
for category in categories:
value, created = await Category.get_or_create(name=category.capitalize())
await course.categories.add(value)
for requirement in requirements:
await Requirement.create(name=requirement.capitalize(), course=course)
return course.slug
async def get_published_courses(
*,
search: Optional[str] = None,
category: Optional[str] = None,
language: Optional[str] = None,
level: Optional[str] = None,
price: Optional[str] = None,
discount: Optional[str] = None,
) -> QuerySet[Course]:
"""Return all published courses.
Args:
search: Search courses by title.
category: Filter by category.
language: Filter by language.
level: Filter by level.
price: Filter by price.
discount: Filter by discount.
Returns:
Query set of course.
"""
courses = Course.filter(is_drift=False, is_active=True)
if search:
courses = courses.filter(title=search)
if category:
courses = courses.filter(categories__name=category)
if language:
courses = courses.filter(languages__name=language)
if level:
courses = courses.filter(level=level)
if price:
courses = courses.filter(price=price)
if discount:
courses = courses.filter(discount=discount)
return courses
async def get_published_course(*, slug: str, user: Any) -> CourseDetail:
"""Return a published courses.
Args:
slug: The slug of course.
user: Current authenticated user.
Returns:
Query set of course.
"""
course = await Course.get(is_drift=False, is_active=True, slug=slug)
pydatic_data = await CourseDetailPydantic.from_tortoise_orm(course)
data = pydatic_data.dict()
data.update(
{
"is_authenticated": user is not None,
"has_enroll": False,
"is_owner": False,
"enrollments": await course.enrollments.all().count(),
"reviews": await course.reviews.all().count(),
}
)
if user:
user_student = await user.students.first()
user_teacher = await user.teachers.first()
if user_student:
data.update(
{
"has_enroll": await course.enrollments.filter(
student=user_student
).first()
is not None
}
)
author = await course.teacher
if user_teacher == author:
data.update({"is_owner": True})
# TODO: change it to computed method.
reviews = course.reviews.all()
try:
rate = sum(review.rate for review in await reviews) / await reviews.count()
except ZeroDivisionError:
rate = 0
data.update({"rate": rate})
return CourseDetail(**data)
async def enroll_to_published_course(*, slug: str, student: Any) -> Dict[str, str]:
"""Enroll new student to a published course.
Args:
slug: The slug of course.
student: Student instances.
Returns:
Dict.
Raises:
HTTPException: If use has already enrolled.
"""
course = await Course.get(is_drift=False, is_active=True, slug=slug)
if await course.enrollments.filter(student=student):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"You already enrolled to {course}",
)
if course.price > 0:
print("Payment")
# TODO: add the stripe payment
# stripe()
# TODO: add payment process to the payment model
# Payment()
await Enrollment.create(course=course, student=student)
return {
"detail": f"Yea! you have enrolled to {course}, go and enjoy the course now :)"
}
async def bookmark_a_published_course(*, slug: str, student: Any) -> Dict[str, str]:
"""Bookmark a published course.
Args:
slug: The slug of course.
student: Student instances.
Returns:
Dict.
Raises:
HTTPException: If use has already bookmarked.
"""
course = await Course.get(is_drift=False, is_active=True, slug=slug)
if await course.book_marks.filter(course=course, student=student):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"You already bookmark {course}",
)
await BookMark.create(course=course, student=student)
return {"detail": f"{course} has been bookmarked :)"}
async def get_bookmarks(*, student: Any) -> List[Dict]:
"""Get list of bookmark.
Args:
student: Student instances.
Returns:
List of bookmarked course.
"""
course_list = []
for bookmark in await BookMark.filter(student=student):
course = await bookmark.course
course_list.append(
{"title": f"{course.title}", "cover": {course.cover}, "slug": course.slug}
)
return course_list
async def create_review_for_published_course(
*, slug: str, data: Dict, student: Any
) -> Dict[str, str]:
"""Create review for a published course.
Args:
slug: The slug of course.
data: Dict of data for review creation.
student: Student instances.
Returns:
Dict.
Raises:
HTTPException: If use has has not enroll for the course or
student review the course already.
"""
course = await Course.get(is_drift=False, is_active=True, slug=slug)
if not await course.enrollments.filter(student=student):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="You need to enroll to the course first",
)
if await course.reviews.filter(student=student):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="You already review this course",
)
await Review.create(**data, course=course, student=student)
return {"detail": "review has been created."}
async def reviews_course_list(*, slug: str) -> List[Dict]:
"""Get all reviews.
Args:
slug: The slug of course.
Returns:
List of reviews.
"""
course = await Course.get(is_drift=False, is_active=True, slug=slug)
review_list = []
for review in await course.reviews.all():
student = await review.student
user = await student.user
review_list.append(
{
"review": f"{review.review}",
"rate": {review.rate},
"user": {"username": user.username},
}
)
return review_list
async def create_course_section(*, data: Dict, course: Course,) -> Dict:
"""Create course section.
Args:
data: Dict of data for section creation.
course: Course instance.
Returns:
The created section info.
Raises:
HTTPException: if the same section was created before.
"""
section, created = await Section.get_or_create(**data, course=course)
if not created:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="This section was been created before",
)
section.slug = unique_slug(title=section.title)
await section.save()
return {
"title": section.title,
"objective": section.objective,
"order": section.order,
"slug": section.slug,
}
async def create_course_announcement(
*, data: Dict, course: Course, teacher: Teacher
) -> Dict:
"""Create course announcement.
Args:
data: Dict of data for section creation.
course: Course instance.
teacher: Teacher instance.
Returns:
The created announcement info.
Raises:
HTTPException: if the same section was created before.
"""
announcement, created = await Announcement.get_or_create(
**data, course=course, teacher=teacher
)
if not created:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="This announcement was been created before",
)
announcement.slug = unique_slug(title=announcement.title)
await announcement.save()
return {
"title": announcement.title,
"description": announcement.description,
"slug": announcement.slug,
}
async def create_section_lecture(*, data: Dict, section_slug: str) -> Dict:
"""Create section lecture.
Args:
data: Dict of data for section creation.
section_slug: The slug of the section.
Returns:
The created lecture info.
Raises:
HTTPException: if the same lecture was created before.
"""
section = await Section.get(slug=section_slug)
lecture, created = await Lecture.get_or_create(**data, section=section)
if not created:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="This lecture was been created before",
)
lecture.slug = unique_slug(title=lecture.title)
await lecture.save()
return {
"title": lecture.title,
"text": lecture.text,
"video": lecture.video,
"order": section.order,
"slug": section.slug,
}
async def create_section_assignment(*, data: Dict, section_slug: str) -> Dict:
"""Create section assignment.
Args:
data: Dict of data for section creation.
section_slug: The slug of the section.
Returns:
The created assignment info.
Raises:
HTTPException: if the same assignment was created before.
"""
section = await Section.get(slug=section_slug)
assignment, created = await Assignment.get_or_create(**data, section=section)
if not created:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="This assignment was been created before",
)
assignment.slug = unique_slug(title=assignment.title)
await assignment.save()
return {
"title": assignment.title,
"text": assignment.description,
"file": assignment.file,
"slug": section.slug,
}
async def update_course_settings(*, data: Dict, teacher: Teacher, slug: str) -> Dict:
"""Update course settings.
Args:
data: Dict of data for section creation.
teacher: Teacher instance.
slug: Course slug.
Returns:
The updated settings info.
"""
courses = Course.filter(slug=slug, teacher=teacher)
await courses.update(**data)
course = await courses.first()
return {
"is_drift": course.is_drift,
"price": course.price,
"discount": course.discount,
"is_active": course.is_active,
}
|
py | 1a2f29cd7414e2ec385d7c2618051ce26c1d5799 | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.helix_sheet_recs_as_pdb_files
import sys
import iotbx.pdb
from libtbx.utils import Sorry
legend = """phenix.helix_sheet_recs_as_pdb_files:
Given PDB file with HELIX/SHEET records output PDB files corresponding to
each individual HELIX/SHEET record.
How to run:
phenix.helix_sheet_recs_as_pdb_files model.pdb
Feedback:
[email protected]
[email protected]"""
def run(args):
if(len(args)!=1): raise Sorry("PDB file must be provided.")
pdb_inp = iotbx.pdb.input(file_name = args[0])
h = pdb_inp.construct_hierarchy()
asc = h.atom_selection_cache()
sso = pdb_inp.extract_secondary_structure()
for rec in sso.sheets+sso.helices:
file_name = "_".join(rec.as_pdb_str().split())
file_name = file_name[:min(36, len(file_name))]
file_name += ".pdb"
sel_list = rec.as_atom_selections()
assert type(sel_list) == list
if(len(sel_list) == 1):
sel = asc.selection(string=sel_list[0])
else:
sel_str=" or ".join( ["(%s)"%s for s in rec.as_atom_selections()] )
sel = asc.selection(string=sel_str)
h_selected = h.select(sel)
h_selected.write_pdb_file(file_name=file_name)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
py | 1a2f2b9c17438fe42dde44958a29be9489995353 | # Author: Jacek Komorowski
# Warsaw University of Technology
import os
import configparser
import time
import numpy as np
class ModelParams:
def __init__(self, model_params_path):
config = configparser.ConfigParser()
config.read(model_params_path)
params = config['MODEL']
self.model_params_path = model_params_path
self.model = params.get('model')
self.output_dim = params.getint('output_dim', 256) # Size of the final descriptor
# Add gating as the last step
if 'vlad' in self.model.lower():
self.cluster_size = params.getint('cluster_size', 64) # Size of NetVLAD cluster
self.gating = params.getboolean('gating', True) # Use gating after the NetVlad
#######################################################################
# Model dependent
#######################################################################
if 'MinkFPN' in self.model:
# Models using MinkowskiEngine
self.mink_quantization_size = params.getfloat('mink_quantization_size')
# Size of the local features from backbone network (only for MinkNet based models)
# For PointNet-based models we always use 1024 intermediary features
self.feature_size = params.getint('feature_size', 256)
if 'planes' in params:
self.planes = [int(e) for e in params['planes'].split(',')]
else:
self.planes = [32, 64, 64]
if 'layers' in params:
self.layers = [int(e) for e in params['layers'].split(',')]
else:
self.layers = [1, 1, 1]
self.num_top_down = params.getint('num_top_down', 1)
self.conv0_kernel_size = params.getint('conv0_kernel_size', 5)
def print(self):
print('Model parameters:')
param_dict = vars(self)
for e in param_dict:
print('{}: {}'.format(e, param_dict[e]))
print('')
def get_datetime():
return time.strftime("%Y%m%d_%H%M")
def xyz_from_depth(depth_image, depth_intrinsic, depth_scale=1000.):
# Return X, Y, Z coordinates from a depth map.
# This mimics OpenCV cv2.rgbd.depthTo3d() function
fx = depth_intrinsic[0, 0]
fy = depth_intrinsic[1, 1]
cx = depth_intrinsic[0, 2]
cy = depth_intrinsic[1, 2]
# Construct (y, x) array with pixel coordinates
y, x = np.meshgrid(range(depth_image.shape[0]), range(depth_image.shape[1]), sparse=False, indexing='ij')
X = (x - cx) * depth_image / (fx * depth_scale)
Y = (y - cy) * depth_image / (fy * depth_scale)
xyz = np.stack([X, Y, depth_image / depth_scale], axis=2)
xyz[depth_image == 0] = np.nan
return xyz
class MinkLocParams:
"""
Params for training MinkLoc models on Oxford dataset
"""
def __init__(self, params_path, model_params_path):
"""
Configuration files
:param path: General configuration file
:param model_params: Model-specific configuration
"""
assert os.path.exists(params_path), 'Cannot find configuration file: {}'.format(params_path)
assert os.path.exists(model_params_path), 'Cannot find model-specific configuration file: {}'.format(model_params_path)
self.params_path = params_path
self.model_params_path = model_params_path
# self.model_params_path = model_params_path
config = configparser.ConfigParser()
config.read(self.params_path)
params = config['DEFAULT']
self.num_points = params.getint('num_points', 4096)
self.dataset_folder = params.get('dataset_folder')
self.queries_folder = params.get('queries_folder')
params = config['TRAIN']
self.num_workers = params.getint('num_workers', 0)
self.batch_size = params.getint('batch_size', 128)
# Set batch_expansion_th to turn on dynamic batch sizing
# When number of non-zero triplets falls below batch_expansion_th, expand batch size
self.batch_expansion_th = params.getfloat('batch_expansion_th', None)
if self.batch_expansion_th is not None:
assert 0. < self.batch_expansion_th < 1., 'batch_expansion_th must be between 0 and 1'
self.batch_size_limit = params.getint('batch_size_limit', 256)
# Batch size expansion rate
self.batch_expansion_rate = params.getfloat('batch_expansion_rate', 1.5)
assert self.batch_expansion_rate > 1., 'batch_expansion_rate must be greater than 1'
else:
self.batch_size_limit = self.batch_size
self.batch_expansion_rate = None
self.lr = params.getfloat('lr', 1e-3)
self.scheduler = params.get('scheduler', 'MultiStepLR')
if self.scheduler is not None:
if self.scheduler == 'CosineAnnealingLR':
self.min_lr = params.getfloat('min_lr')
elif self.scheduler == 'MultiStepLR':
scheduler_milestones = params.get('scheduler_milestones')
self.scheduler_milestones = [int(e) for e in scheduler_milestones.split(',')]
else:
raise NotImplementedError('Unsupported LR scheduler: {}'.format(self.scheduler))
self.epochs = params.getint('epochs', 20)
self.weight_decay = params.getfloat('weight_decay', None)
self.normalize_embeddings = params.getboolean('normalize_embeddings', True) # Normalize embeddings during training and evaluation
self.loss = params.get('loss')
if 'Contrastive' in self.loss:
self.pos_margin = params.getfloat('pos_margin', 0.2)
self.neg_margin = params.getfloat('neg_margin', 0.65)
elif 'Triplet' in self.loss:
self.margin = params.getfloat('margin', 0.4) # Margin used in loss function
else:
raise 'Unsupported loss function: {}'.format(self.loss)
self.aug_mode = params.getint('aug_mode', 1) # Augmentation mode (1 is default)
self.train_file = params.get('train_file')
self.val_file = params.get('val_file', None)
self.eval_database_files = ['kitti_evaluation_database.pickle']
self.eval_query_files = ['kitti_evaluation_query.pickle']
# self.eval_database_files = ['oxford_evaluation_database.pickle', 'business_evaluation_database.pickle',
# 'residential_evaluation_database.pickle', 'university_evaluation_database.pickle']
#
# self.eval_query_files = ['oxford_evaluation_query.pickle', 'business_evaluation_query.pickle',
# 'residential_evaluation_query.pickle', 'university_evaluation_query.pickle']
assert len(self.eval_database_files) == len(self.eval_query_files)
# Read model parameters
self.model_params = ModelParams(self.model_params_path)
self._check_params()
def _check_params(self):
assert os.path.exists(self.dataset_folder), 'Cannot access dataset: {}'.format(self.dataset_folder)
assert os.path.exists(self.queries_folder), 'Cannot access dataset: {}'.format(self.queries_folder)
def print(self):
print('Parameters:')
param_dict = vars(self)
for e in param_dict:
if e != 'model_params':
print('{}: {}'.format(e, param_dict[e]))
self.model_params.print()
print('')
|