repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/Week_5/2_creating_processes.py | 1 | 1801 | """
Создание/вызов процесса в всистем производится при помощи
системного вызова fork
Вывод доступных аттрибутов для модуля OS
print(dir(os))
В Windows fork недоступен для использования
Вывод всех процессов
$ ls -l
Поиск процессов по имени процесса
$ ps uax| grep ex.p
"""
# import time
# import os
#
# pid = os.fork()
#
#
# if pid == 0:
# # inherit process
# while True:
# print('chile', os.getpid())
# time.sleep(5)
#
# else:
# # parent process
# print('parent', os.getpid())
# os.wait()
#====================================================================================================================
"""
Файлы в родительском и дочернем процессах
"""
# import os
#
# f = open('data.txt')
# foo = f.readline()
#
# if os.fork == 0:
# # inherit process
# foo = f.readline()
# print('child', foo)
# else:
# # parent process
# foo = f.readline()
# print('parent', foo)
#====================================================================================================================
"""
Создание процесса, модуль multiprocessing
"""
from multiprocessing import Process
# def f(name):
# print('hello', name)
#
#
# p = Process(target =f, args = ("Bob", ))
# p.start()
# p.join()
# Альтернативный спсооб создания процесса
class PrintProcess(Process):
def __init__(self, name):
super().__init__()
self.name = name
def run(self):
print('hello', self.name)
p = PrintProcess('Mike')
p.start()
p.join()
| apache-2.0 | 2,430,489,199,370,699,300 | 13.75 | 117 | 0.505867 | false |
GISAElkartea/tresna-kutxa | tk/material/migrations/0001_initial.py | 1 | 9308 | # Generated by Django 2.0 on 2018-06-23 18:27
from django.db import migrations, models
import django.db.models.deletion
import localized_fields.fields.char_field
import localized_fields.fields.text_field
import localized_fields.fields.uniqueslug_field
import localized_fields.mixins
import tk.material.fields
import tk.material.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Approval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='creation timestamp')),
('approved', models.BooleanField(default=False, verbose_name='is approved')),
('comment', models.TextField(blank=True, verbose_name='comment')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='contact email')),
],
options={
'verbose_name': 'Approval',
'verbose_name_plural': 'Approvals',
'ordering': ['-timestamp'],
},
),
migrations.CreateModel(
name='GroupFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', localized_fields.fields.char_field.LocalizedCharField(max_length=512, required=['eu'], uniqueness=[], verbose_name='name')),
],
options={
'verbose_name': 'Group feature',
'verbose_name_plural': 'Group features',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', localized_fields.fields.char_field.LocalizedCharField(max_length=512, required=['eu'], uniqueness=[], verbose_name='name')),
],
options={
'verbose_name': 'Location',
'verbose_name_plural': 'Locations',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', localized_fields.fields.char_field.LocalizedCharField(max_length=512, required=[], uniqueness=[], verbose_name='title')),
('slug', localized_fields.fields.uniqueslug_field.LocalizedUniqueSlugField(include_time=False, populate_from='title', required=['eu'], uniqueness=['eu', 'es'])),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='creation timestamp')),
('goal', localized_fields.fields.text_field.LocalizedTextField(blank=True, null=True, required=[], uniqueness=[], verbose_name='goal')),
('brief', localized_fields.fields.text_field.LocalizedTextField(required=[], uniqueness=[], verbose_name='brief')),
('author', models.CharField(blank=True, max_length=512, verbose_name='author')),
],
options={
'verbose_name': 'Material',
'verbose_name_plural': 'Materials',
'ordering': ['-timestamp'],
},
bases=(localized_fields.mixins.AtomicSlugRetryMixin, models.Model),
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', localized_fields.fields.char_field.LocalizedCharField(max_length=512, required=['eu'], uniqueness=[], verbose_name='name')),
],
options={
'verbose_name': 'Subject',
'verbose_name_plural': 'Subjects',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Activity',
fields=[
('material_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='material.Material')),
('duration', models.DurationField(blank=True, null=True, verbose_name='duration')),
('min_people', models.PositiveSmallIntegerField(default=2, verbose_name='minimum number of people')),
('max_people', models.PositiveSmallIntegerField(default=30, verbose_name='maximum number of people')),
('notes', localized_fields.fields.text_field.LocalizedTextField(blank=True, null=True, required=[], uniqueness=[], verbose_name='notes')),
('attachment', models.FileField(blank=True, upload_to='material/activities/', verbose_name='attachment')),
('url', models.URLField(blank=True, help_text='Link the material if its copyright does not allow sharing it.', verbose_name='URL')),
('group_feature', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='material.GroupFeature', verbose_name='group feature')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='material.Location', verbose_name='location')),
],
options={
'verbose_name': 'Activity',
'verbose_name_plural': 'Activities',
'ordering': ['-timestamp'],
},
bases=('material.material',),
),
migrations.CreateModel(
name='Link',
fields=[
('material_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='material.Material')),
('url', models.URLField(verbose_name='URL')),
],
options={
'verbose_name': 'Link',
'verbose_name_plural': 'Links',
'ordering': ['-timestamp'],
},
bases=('material.material',),
),
migrations.CreateModel(
name='Reading',
fields=[
('material_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='material.Material')),
('pages', models.PositiveIntegerField(blank=True, null=True, verbose_name='pages')),
('year', models.PositiveIntegerField(blank=True, null=True, validators=[tk.material.models.validate_year], verbose_name='year')),
('languages', tk.material.fields.LanguageField(base_field=models.CharField(max_length=7), size=None, verbose_name='languages')),
('attachment', models.FileField(blank=True, upload_to='material/readings/', verbose_name='attachment')),
('url', models.URLField(blank=True, help_text='Link the material if its copyright does not allow sharing it.', verbose_name='URL')),
],
options={
'verbose_name': 'Reading',
'verbose_name_plural': 'Readings',
'ordering': ['-timestamp'],
},
bases=('material.material',),
),
migrations.CreateModel(
name='Video',
fields=[
('material_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='material.Material')),
('duration', models.DurationField(verbose_name='duration')),
('year', models.PositiveIntegerField(blank=True, null=True, validators=[tk.material.models.validate_year], verbose_name='year')),
('audios', tk.material.fields.LanguageField(base_field=models.CharField(max_length=7), blank=True, size=None, verbose_name='audio languages')),
('subtitles', tk.material.fields.LanguageField(base_field=models.CharField(max_length=7), blank=True, size=None, verbose_name='subtitle languages')),
('attachment', models.FileField(blank=True, upload_to='material/videos', verbose_name='attachment')),
('url', models.URLField(blank=True, help_text='Link the material if its copyright does not allow sharing it.', verbose_name='URL')),
],
options={
'verbose_name': 'Video',
'verbose_name_plural': 'Videos',
'ordering': ['-timestamp'],
},
bases=('material.material',),
),
migrations.AddField(
model_name='material',
name='subjects',
field=models.ManyToManyField(to='material.Subject', verbose_name='subject'),
),
migrations.AddField(
model_name='approval',
name='material',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='material.Material'),
),
]
| agpl-3.0 | 8,054,153,607,170,225,000 | 54.404762 | 196 | 0.584981 | false |
pombredanne/bandit | tests/unit/formatters/test_json.py | 1 | 3957 | # Copyright (c) 2015 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import tempfile
import mock
import testtools
import bandit
from bandit.core import config
from bandit.core import constants
from bandit.core import issue
from bandit.core import manager
from bandit.core import metrics
from bandit.formatters import json as b_json
class JsonFormatterTests(testtools.TestCase):
def setUp(self):
super(JsonFormatterTests, self).setUp()
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.context = {'filename': self.tmp_fname,
'lineno': 4,
'linerange': [4]}
self.check_name = 'hardcoded_bind_all_interfaces'
self.issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM,
'Possible binding to all interfaces.')
self.candidates = [issue.Issue(bandit.LOW, bandit.LOW, 'Candidate A',
lineno=1),
issue.Issue(bandit.HIGH, bandit.HIGH, 'Candiate B',
lineno=2)]
self.manager.out_file = self.tmp_fname
self.issue.fname = self.context['filename']
self.issue.lineno = self.context['lineno']
self.issue.linerange = self.context['linerange']
self.issue.test = self.check_name
self.manager.results.append(self.issue)
self.manager.metrics = metrics.Metrics()
# mock up the metrics
for key in ['_totals', 'binding.py']:
self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2}
for (criteria, default) in constants.CRITERIA:
for rank in constants.RANKING:
self.manager.metrics.data[key]['{0}.{1}'.format(
criteria, rank
)] = 0
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_report(self, get_issue_list):
self.manager.files_list = ['binding.py']
self.manager.scores = [{'SEVERITY': [0] * len(constants.RANKING),
'CONFIDENCE': [0] * len(constants.RANKING)}]
get_issue_list.return_value = collections.OrderedDict(
[(self.issue, self.candidates)])
tmp_file = open(self.tmp_fname, 'w')
b_json.report(self.manager, tmp_file, self.issue.severity,
self.issue.confidence)
with open(self.tmp_fname) as f:
data = json.loads(f.read())
self.assertIsNotNone(data['generated_at'])
self.assertEqual(self.tmp_fname, data['results'][0]['filename'])
self.assertEqual(self.issue.severity,
data['results'][0]['issue_severity'])
self.assertEqual(self.issue.confidence,
data['results'][0]['issue_confidence'])
self.assertEqual(self.issue.text, data['results'][0]['issue_text'])
self.assertEqual(self.context['lineno'],
data['results'][0]['line_number'])
self.assertEqual(self.context['linerange'],
data['results'][0]['line_range'])
self.assertEqual(self.check_name, data['results'][0]['test_name'])
self.assertIn('candidates', data['results'][0])
| apache-2.0 | 2,109,796,595,425,699,300 | 40.21875 | 79 | 0.595906 | false |
Castronova/EMIT | gui/views/EMITView.py | 1 | 5874 | import wx
import wx.html2
import wx.lib.agw.aui as aui
from wx.lib.newevent import NewEvent
from LowerPanelView import ViewLowerPanel
from gui.controller.CanvasCtrl import CanvasCtrl
from gui.controller.ToolboxCtrl import ToolboxCtrl
from gui.controller.ModelCtrl import ModelDetailsCtrl
# create custom events
wxCreateBox, EVT_CREATE_BOX = NewEvent()
wxStdOut, EVT_STDDOUT= NewEvent()
wxDbChanged, EVT_DBCHANGED= NewEvent()
class EMITView(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title="Environmental Model Integration Project", pos=wx.DefaultPosition,
size=wx.Size(1200, 750), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.pnlDocking = wx.Panel(id=wx.ID_ANY, name='pnlDocking', parent=self, size=wx.Size(1200, 750),
style=wx.TAB_TRAVERSAL)
self.bnb = wx.Notebook(self.pnlDocking)
ViewLowerPanel(self.bnb)
##################################
# MENU BAR
##################################
self._menu_bar = wx.MenuBar()
self.view_menu = wx.Menu()
self.data_menu = wx.Menu()
# File Menu Option
self._file_menu = wx.Menu()
self._load = self._file_menu.Append(wx.NewId(), '&Load\tCtrl+O', 'Load Configuration')
self._save_menu = self._file_menu.Append(wx.NewId(), '&Save Configuration\tCtrl+S', 'Save Configuration')
self._add_user_menu = self._file_menu.Append(wx.NewId(), 'Add User', 'Add New User')
self.save_as_menu = self._file_menu.Append(wx.NewId(), '&Save Configuration As', 'Save Configuration')
self._settings_menu = self._file_menu.Append(wx.NewId(), "&Settings...\tCtrl+,")
self._exit = self._file_menu.Append(wx.NewId(), '&Quit\tCtrl+Q', 'Quit application')
# View Menu Option
self._default_view_menu = self.view_menu.Append(wx.NewId(), '&Restore Default View', 'Returns the view to the default (initial) state', wx.ITEM_NORMAL)
self.toggle_menu = wx.Menu()
self._toggle_toolbar_menu = self.toggle_menu.Append(wx.ID_ANY, "Toggle Toolbar", help="Toggle Toolbar", kind=wx.ITEM_CHECK)
self._toggle_console_menu = self.toggle_menu.Append(wx.ID_ANY, "Toggle Console", help="Toggle Console", kind=wx.ITEM_CHECK)
self.view_menu.AppendMenu(wx.ID_ANY, "Toggle", self.toggle_menu)
# Data Menu Option
self._add_csv_file_menu = self.data_menu.Append(wx.NewId(), "&Add CSV File")
self._add_netcdf = self.data_menu.Append(wx.NewId(), '&Add NetCDF')
self._open_dap_viewer_menu = self.data_menu.Append(wx.NewId(), "&OpenDap Explorer")
# Add menu items
self._menu_bar.Append(self._file_menu, "&File")
self._menu_bar.Append(self.view_menu, "&View")
self._menu_bar.Append(self.data_menu, "Data")
self.SetMenuBar(self._menu_bar)
# creating components
self.Toolbox = ToolboxCtrl(self.pnlDocking)
self.Canvas = CanvasCtrl(self.pnlDocking)
self.model_details = ModelDetailsCtrl(self.pnlDocking)
self.Toolbox.Hide()
self.initAUIManager()
self._init_sizers()
self.filename = None
self.Center()
self.Show()
def _init_sizers(self):
self.s = wx.BoxSizer(wx.VERTICAL)
self.s.AddWindow(self.pnlDocking, 85, flag=wx.ALL | wx.EXPAND)
self.SetSizer(self.s)
def initAUIManager(self):
self.m_mgr = aui.AuiManager()
self.m_mgr.SetManagedWindow(self.pnlDocking)
self.m_mgr.AddPane(self.Canvas,
aui.AuiPaneInfo().
Center().
Name("Canvas").
CloseButton(False).
MaximizeButton(False).
MinimizeButton(False).
Floatable(False).
BestSize(wx.Size(1000, 400)).CaptionVisible(False)
)
self.m_mgr.AddPane(self.Toolbox,
aui.AuiPaneInfo().
Left().
Dock().
Name("Toolbox").
CloseButton(False).
MaximizeButton(False).
MinimizeButton(False).
PinButton(False).
BestSize(wx.Size(275, 400)).
Floatable(False).
Movable(False).
Show(show=True).CaptionVisible(False)
)
self.m_mgr.AddPane(self.bnb,
aui.AuiPaneInfo().
Bottom().
Name("Console").
CloseButton(False).
MaximizeButton(False).
MinimizeButton(False).
PinButton(False).
Movable(False).
Floatable(False).
BestSize(wx.Size(1200, 225)).CaptionVisible(False)
)
self.m_mgr.AddPane(self.model_details,
aui.AuiPaneInfo()
.Right()
.Dock()
.Name("Details")
.CloseButton(False)
.MaximizeButton(False)
.MinimizeButton(False)
.PinButton(True).
PaneBorder(True).
CaptionVisible(False)
.Show(show=False).
BestSize((200, -1)))
self.m_mgr.Update()
self._default_perspective = self.m_mgr.SavePerspective()
| gpl-2.0 | -8,642,003,472,008,537,000 | 40.659574 | 159 | 0.519408 | false |
GenericMappingTools/gmt-python | examples/gallery/symbols/multi_parameter_symbols.py | 1 | 3546 | """
Multi-parameter symbols
-------------------------
The :meth:`pygmt.Figure.plot` method can plot individual multi-parameter
symbols by passing the corresponding shortcuts (**e**, **j**, **r**, **R**,
**w**) to the ``style`` parameter:
- **e**: ellipse
- **j**: rotated rectangle
- **r**: rectangle
- **R**: rounded rectangle
- **w**: pie wedge
"""
# sphinx_gallery_thumbnail_number = 2
import pygmt
########################################################################################
# We can plot multi-parameter symbols using the same symbol style. We need to
# define locations (lon, lat) via the ``x`` and ``y`` parameters (scalar for
# a single symbol or 1d list for several ones) and two or three symbol
# parameters after those shortcuts via the ``style`` parameter.
#
# The multi-parameter symbols in the ``style`` parameter are defined as:
#
# - **e**: ellipse, ``direction/major_axis/minor_axis``
# - **j**: rotated rectangle, ``direction/width/height``
# - **r**: rectangle, ``width/height``
# - **R**: rounded rectangle, ``width/height/radius``
# - **w**: pie wedge, ``radius/startdir/stopdir``, the last two arguments are
# directions given in degrees counter-clockwise from horizontal
#
# Upper-case versions **E**, **J**, and **W** are similar to **e**, **j** and
# **w** but expect geographic azimuths and distances.
fig = pygmt.Figure()
fig.basemap(region=[0, 6, 0, 2], projection="x3c", frame=True)
# Ellipse
fig.plot(x=0.5, y=1, style="e45/3/1", color="orange", pen="2p,black")
# Rotated rectangle
fig.plot(x=1.5, y=1, style="j120/5/0.5", color="red3", pen="2p,black")
# Rectangle
fig.plot(x=3, y=1, style="r4/1.5", color="dodgerblue", pen="2p,black")
# Rounded rectangle
fig.plot(x=4.5, y=1, style="R1.25/4/0.5", color="seagreen", pen="2p,black")
# Pie wedge
fig.plot(x=5.5, y=1, style="w2.5/45/330", color="lightgray", pen="2p,black")
fig.show()
########################################################################################
# We can also plot symbols with varying parameters via defining those values in
# a 2d list or numpy array (``[[parameters]]`` for a single symbol or
# ``[[parameters_1],[parameters_2],[parameters_i]]`` for several ones) or using
# an appropriately formatted input file and passing it to ``data``.
#
# The symbol parameters in the 2d list or numpy array are defined as:
#
# - **e**: ellipse, ``[[lon, lat, direction, major_axis, minor_axis]]``
# - **j**: rotated rectangle, ``[[lon, lat, direction, width, height]]``
# - **r**: rectangle, ``[[lon, lat, width, height]]``
# - **R**: rounded rectangle, ``[[lon, lat, width, height, radius]]``
# - **w**: pie wedge, ``[[lon, lat, radius, startdir, stopdir]]``, the last two
# arguments are directions given in degrees counter-clockwise from horizontal
fig = pygmt.Figure()
fig.basemap(region=[0, 6, 0, 4], projection="x3c", frame=["xa1f0.2", "ya0.5f0.1"])
# Ellipse
data = [[0.5, 1, 45, 3, 1], [0.5, 3, 135, 2, 1]]
fig.plot(data=data, style="e", color="orange", pen="2p,black")
# Rotated rectangle
data = [[1.5, 1, 120, 5, 0.5], [1.5, 3, 50, 3, 0.5]]
fig.plot(data=data, style="j", color="red3", pen="2p,black")
# Rectangle
data = [[3, 1, 4, 1.5], [3, 3, 3, 1.5]]
fig.plot(data=data, style="r", color="dodgerblue", pen="2p,black")
# Rounded rectangle
data = [[4.5, 1, 1.25, 4, 0.5], [4.5, 3, 1.25, 2.0, 0.2]]
fig.plot(data=data, style="R", color="seagreen", pen="2p,black")
# Pie wedge
data = [[5.5, 1, 2.5, 45, 330], [5.5, 3, 1.5, 60, 300]]
fig.plot(data=data, style="w", color="lightgray", pen="2p,black")
fig.show()
| bsd-3-clause | 8,020,776,406,592,209,000 | 38.842697 | 88 | 0.607727 | false |
iproduct/course-social-robotics | image-recognition-python-new/cv2_haar_cascade_demo.py | 1 | 1814 | # import numpy as np
import cv2 as cv
faceCascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
eyeCascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_eye.xml')
# img = cv.imread('sachin.jpg')
video_capture = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eyeCascade.detectMultiScale(
roi_gray,
scaleFactor=1.1,
minNeighbors=5,
maxSize=(50, 40),
flags=cv.CASCADE_SCALE_IMAGE)
for (ex, ey, ew, eh) in eyes:
cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (255, 0, 0), 2)
# Display the resulting frame
cv.imshow('Video', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv.destroyAllWindows()
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# for (x,y,w,h) in faces:
# cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# roi_gray = gray[y:y+h, x:x+w]
# roi_color = img[y:y+h, x:x+w]
# eyes = eye_cascade.detectMultiScale(roi_gray)
# for (ex,ey,ew,eh) in eyes:
# cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# cv.imshow('img',img)
# cv.waitKey(0)
# cv.destroyAllWindows()
| gpl-2.0 | -1,039,303,057,272,358,000 | 29.275862 | 96 | 0.57828 | false |
Samneetsingh/OutlierDetection | odt/nearestneighbour/gknn.py | 1 | 1899 | ###############################
## Global KNN Implementation ##
###############################
### Import Python Libraries ###
import pandas as pd
from pandas import DataFrame
from numpy import array, matrix
### Import R Libraries ###
import rpy2.robjects as R
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
pandas2ri.activate()
base = importr("base")
utils = importr("utils")
odtpackage = importr("dbscan")
######################
## Global KNN Class ##
######################
class GlobalKNN(object):
### Global KNN Class Constructor ###
def __init__(self, xdf, minPts):
self.xdf = xdf
self.minPts = minPts
self.score = []
self.label = []
### [TODO:] Implement Nromalization functionality ###
def normalizeData(self):
pass
### Global KNN Distance estimation Function ###
def kNNDistance(self, xdf):
rdf = pandas2ri.py2ri(xdf)
return odtpackage.kNNdist(base.as_matrix(rdf), self.minPts)
### Global KNN Execution Function ###
def getOutlier(self, threshold=0.5):
distance = array(self.kNNDistance(self.xdf))
for i in range(0, len(distance)):
self.score.append(reduce(lambda x, y: x+y, list(distance[i]))/self.minPts)
if self.score[i] > threshold:
self.label.append('outlier')
else:
self.label.append('normal')
return DataFrame(data={'Score': self.score, 'Label': self.label}, )
if __name__ == "__main__":
url = '/Users/warchief/Documents/Projects/DataRepository/AnomalyDetection/test.csv'
df = DataFrame.from_csv(path=url, header=0, sep=',', index_col=False)
X = df['SL_RRC_CONN_AVG_PER_CELL'].values
Y = df['I_DL_DRB_CELL_TPUT_MBPS'].values
d = {'x': X, 'y': Y}
pdf = DataFrame(data=d)
nn = GlobalKNN(pdf, 200)
print nn.getOutlier(0.5)
| gpl-3.0 | 7,724,768,562,600,736,000 | 26.521739 | 87 | 0.589784 | false |
plumgrid/plumgrid-nova | nova/network/floating_ips.py | 1 | 32330 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import context
from nova.db import base
from nova import exception
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import processutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import uuidutils
from nova import quota
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
floating_opts = [
cfg.StrOpt('default_floating_pool',
default='nova',
help='Default pool for floating ips'),
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating ip to VM'),
cfg.StrOpt('floating_ip_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='full class name for the DNS Manager for floating IPs'),
cfg.StrOpt('instance_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='full class name for the DNS Manager for instance IPs'),
cfg.StrOpt('instance_dns_domain',
default='',
help='full class name for the DNS Zone for instance IPs'),
]
CONF = cfg.CONF
CONF.register_opts(floating_opts)
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('network_topic', 'nova.network.rpcapi')
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
servicegroup_api = None
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
fixed_ip_id = floating_ip.get('fixed_ip_id')
if fixed_ip_id:
try:
fixed_ip = self.db.fixed_ip_get(admin_context,
fixed_ip_id,
get_network=True)
except exception.FixedIpNotFound:
msg = _('Fixed ip %s not found') % fixed_ip_id
LOG.debug(msg)
continue
interface = CONF.public_interface or floating_ip['interface']
try:
self.l3driver.add_floating_ip(floating_ip['address'],
fixed_ip['address'],
interface,
fixed_ip['network'])
except processutils.ProcessExecutionError:
LOG.debug(_('Interface %s not found'), interface)
raise exception.NoFloatingIpInterface(interface=interface)
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_uuid = kwargs.get('instance_id')
if not uuidutils.is_uuid_like(instance_uuid):
instance_uuid = kwargs.get('instance_uuid')
project_id = kwargs.get('project_id')
requested_networks = kwargs.get('requested_networks')
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
nw_info = super(FloatingIP, self).allocate_for_instance(context,
**kwargs)
if CONF.auto_assign_floating_ip:
# allocate a floating ip
floating_address = self.allocate_floating_ip(context, project_id,
True)
LOG.debug(_("floating IP allocation for instance "
"|%s|"), floating_address,
instance_uuid=instance_uuid, context=context)
# set auto_assigned column to true for the floating ip
self.db.floating_ip_set_auto_assigned(context, floating_address)
# get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips()
fixed_address = fixed_ips[0]['address']
# associate the floating ip to fixed_ip
self.associate_floating_ip(context,
floating_address,
fixed_address,
affect_auto_assigned=True)
# create a fresh set of network info that contains the floating ip
nw_info = self.get_instance_nw_info(context, **kwargs)
return nw_info
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
instance_uuid = kwargs.get('instance_id')
if not uuidutils.is_uuid_like(instance_uuid):
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get deleted
# instances too
instance = self.db.instance_get(
context.elevated(read_deleted='yes'), instance_uuid)
instance_uuid = instance['uuid']
try:
fixed_ips = self.db.fixed_ip_get_by_instance(context,
instance_uuid)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
fixed_id = fixed_ip['id']
floating_ips = self.db.floating_ip_get_by_fixed_ip_id(context,
fixed_id)
# disassociate floating ips related to fixed_ip
for floating_ip in floating_ips:
address = floating_ip['address']
try:
self.disassociate_floating_ip(context,
address,
affect_auto_assigned=True)
except exception.FloatingIpNotAssociated:
LOG.exception(_("Floating IP is not associated. Ignore."))
# deallocate if auto_assigned
if floating_ip['auto_assigned']:
self.deallocate_floating_ip(context, address,
affect_auto_assigned=True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
"""Raises if floating ip does not belong to project."""
if context.is_admin:
return
if floating_ip['project_id'] != context.project_id:
if floating_ip['project_id'] is None:
LOG.warn(_('Address |%(address)s| is not allocated'),
{'address': floating_ip['address']})
raise exception.NotAuthorized()
else:
LOG.warn(_('Address |%(address)s| is not allocated to your '
'project |%(project)s|'),
{'address': floating_ip['address'],
'project': context.project_id})
raise exception.NotAuthorized()
def allocate_floating_ip(self, context, project_id, auto_assigned=False,
pool=None):
"""Gets a floating ip from the pool."""
# NOTE(tr3buchet): all network hosts in zone now use the same pool
pool = pool or CONF.default_floating_pool
use_quota = not auto_assigned
# Check the quota; can't put this in the API because we get
# called into from other places
try:
if use_quota:
reservations = QUOTAS.reserve(context, floating_ips=1)
except exception.OverQuota:
LOG.warn(_("Quota exceeded for %s, tried to allocate "
"floating IP"), context.project_id)
raise exception.FloatingIpLimitExceeded()
try:
floating_ip = self.db.floating_ip_allocate_address(context,
project_id,
pool)
payload = dict(project_id=project_id, floating_ip=floating_ip)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.allocate',
notifier.INFO, payload)
# Commit the reservations
if use_quota:
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
if use_quota:
QUOTAS.rollback(context, reservations)
return floating_ip
@rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Returns a floating ip to the pool."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
use_quota = not floating_ip.get('auto_assigned')
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is not associated
if floating_ip['fixed_ip_id']:
floating_address = floating_ip['address']
raise exception.FloatingIpAssociated(address=floating_address)
# clean up any associated DNS entries
self._delete_all_entries_for_ip(context,
floating_ip['address'])
payload = dict(project_id=floating_ip['project_id'],
floating_ip=floating_ip['address'])
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.deallocate',
notifier.INFO, payload=payload)
# Get reservations...
try:
if use_quota:
reservations = QUOTAS.reserve(context, floating_ips=-1)
else:
reservations = None
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"floating IP"))
self.db.floating_ip_deallocate(context, address)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
@rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
Access to the floating_address is verified but access to the
fixed_address is not verified. This assumes that that the calling
side has already verified that the fixed_address is legal by
checking access to the instance.
"""
floating_ip = self.db.floating_ip_get_by_address(context,
floating_address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# disassociate any already associated
orig_instance_uuid = None
if floating_ip['fixed_ip_id']:
# find previously associated instance
fixed_ip = self.db.fixed_ip_get(context,
floating_ip['fixed_ip_id'])
if fixed_ip['address'] == fixed_address:
# NOTE(vish): already associated to this address
return
orig_instance_uuid = fixed_ip['instance_uuid']
self.disassociate_floating_ip(context, floating_address)
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_address)
# send to correct host, unless i'm the correct host
network = self.db.network_get(context.elevated(),
fixed_ip['network_id'])
if network['multi_host']:
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
host = instance['host']
else:
host = network['host']
interface = floating_ip.get('interface')
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
fixed_address, interface,
fixed_ip['instance_uuid'])
else:
# send to correct host
self.network_rpcapi._associate_floating_ip(context,
floating_address, fixed_address, interface, host,
fixed_ip['instance_uuid'])
return orig_instance_uuid
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
"""Performs db and driver calls to associate floating ip & fixed ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(floating_address))
def do_associate():
# associate floating ip
fixed = self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
self.host)
if not fixed:
# NOTE(vish): ip was already associated
return
try:
# gogo driver time
self.l3driver.add_floating_ip(floating_address, fixed_address,
interface, fixed['network'])
except processutils.ProcessExecutionError as e:
self.db.floating_ip_disassociate(context, floating_address)
if "Cannot find device" in str(e):
LOG.error(_('Interface %s not found'), interface)
raise exception.NoFloatingIpInterface(interface=interface)
raise
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=floating_address)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.associate',
notifier.INFO, payload=payload)
do_associate()
@rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
floating_ip = self.db.floating_ip_get_by_address(context, address)
# handle auto assigned
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is associated
if not floating_ip.get('fixed_ip_id'):
floating_address = floating_ip['address']
raise exception.FloatingIpNotAssociated(address=floating_address)
fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
# send to correct host, unless i'm the correct host
network = self.db.network_get(context.elevated(),
fixed_ip['network_id'])
interface = floating_ip.get('interface')
if network['multi_host']:
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
service = self.db.service_get_by_host_and_topic(
context.elevated(), instance['host'], CONF.network_topic)
if service and self.servicegroup_api.service_is_up(service):
host = instance['host']
else:
# NOTE(vish): if the service is down just deallocate the data
# locally. Set the host to local so the call will
# not go over rpc and set interface to None so the
# teardown in the driver does not happen.
host = self.host
interface = None
else:
host = network['host']
if host == self.host:
# i'm the correct host
self._disassociate_floating_ip(context, address, interface,
fixed_ip['instance_uuid'])
else:
# send to correct host
self.network_rpcapi._disassociate_floating_ip(context, address,
interface, host, fixed_ip['instance_uuid'])
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
"""Performs db and driver calls to disassociate floating ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(address))
def do_disassociate():
# NOTE(vish): Note that we are disassociating in the db before we
# actually remove the ip address on the host. We are
# safe from races on this host due to the decorator,
# but another host might grab the ip right away. We
# don't worry about this case because the minuscule
# window where the ip is on both hosts shouldn't cause
# any problems.
fixed = self.db.floating_ip_disassociate(context, address)
if not fixed:
# NOTE(vish): ip was already disassociated
return
if interface:
# go go driver time
self.l3driver.remove_floating_ip(address, fixed['address'],
interface, fixed['network'])
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=address)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.disassociate',
notifier.INFO, payload=payload)
do_disassociate()
@rpc_common.client_exceptions(exception.FloatingIpNotFound)
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return dict(self.db.floating_ip_get(context, id).iteritems())
def get_floating_pools(self, context):
"""Returns list of floating pools."""
# NOTE(maurosr) This method should be removed in future, replaced by
# get_floating_ip_pools. See bug #1091668
return self.get_floating_ip_pools(context)
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
pools = self.db.floating_ip_get_pools(context)
return [dict(pool.iteritems()) for pool in pools]
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return dict(self.db.floating_ip_get_by_address(context,
address).iteritems())
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return [dict(ip.iteritems()) for ip in ips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
def _is_stale_floating_ip_address(self, context, floating_ip):
try:
self._floating_ip_owned_by_project(context, floating_ip)
except exception.NotAuthorized:
return True
return False if floating_ip.get('fixed_ip_id') else True
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if not floating_addresses or (source and source == dest):
return
LOG.info(_("Starting migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = self.db.floating_ip_get_by_address(context,
address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warn(_("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. Will not "
"migrate it "),
{'address': address, 'instance_uuid': instance_uuid})
continue
interface = CONF.public_interface or floating_ip['interface']
fixed_ip = self.db.fixed_ip_get(context,
floating_ip['fixed_ip_id'],
get_network=True)
self.l3driver.remove_floating_ip(floating_ip['address'],
fixed_ip['address'],
interface,
fixed_ip['network'])
# NOTE(ivoks): Destroy conntrack entries on source compute
# host.
self.l3driver.clean_conntrack(fixed_ip['address'])
# NOTE(wenjianhn): Make this address will not be bound to public
# interface when restarts nova-network on dest compute node
self.db.floating_ip_update(context,
floating_ip['address'],
{'host': None})
def migrate_instance_finish(self, context, instance_uuid,
floating_addresses, host=None,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if host and not dest:
dest = host
if not floating_addresses or (source and source == dest):
return
LOG.info(_("Finishing migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = self.db.floating_ip_get_by_address(context,
address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warn(_("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. Will not"
"setup it."),
{'address': address, 'instance_uuid': instance_uuid})
continue
self.db.floating_ip_update(context,
floating_ip['address'],
{'host': dest})
interface = CONF.public_interface or floating_ip['interface']
fixed_ip = self.db.fixed_ip_get(context,
floating_ip['fixed_ip_id'],
get_network=True)
self.l3driver.add_floating_ip(floating_ip['address'],
fixed_ip['address'],
interface,
fixed_ip['network'])
def _prepare_domain_entry(self, context, domain):
domainref = self.db.dnsdomain_get(context, domain)
scope = domainref['scope']
if scope == 'private':
av_zone = domainref['availability_zone']
this_domain = {'domain': domain,
'scope': scope,
'availability_zone': av_zone}
else:
project = domainref['project_id']
this_domain = {'domain': domain,
'scope': scope,
'project': project}
return this_domain
def get_dns_domains(self, context):
domains = []
db_domain_list = self.db.dnsdomain_list(context)
floating_driver_domain_list = self.floating_dns_manager.get_domains()
instance_driver_domain_list = self.instance_dns_manager.get_domains()
for db_domain in db_domain_list:
if (db_domain in floating_driver_domain_list or
db_domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context,
db_domain)
if domain_entry:
domains.append(domain_entry)
else:
LOG.warn(_('Database inconsistency: DNS domain |%s| is '
'registered in the Nova db but not visible to '
'either the floating or instance DNS driver. It '
'will be ignored.'), db_domain)
return domains
def add_dns_entry(self, context, address, name, dns_type, domain):
self.floating_dns_manager.create_entry(name, address,
dns_type, domain)
def modify_dns_entry(self, context, address, name, domain):
self.floating_dns_manager.modify_address(name, address,
domain)
def delete_dns_entry(self, context, name, domain):
self.floating_dns_manager.delete_entry(name, domain)
def _delete_all_entries_for_ip(self, context, address):
domain_list = self.get_dns_domains(context)
for domain in domain_list:
names = self.get_dns_entries_by_address(context,
address,
domain['domain'])
for name in names:
self.delete_dns_entry(context, name, domain['domain'])
def get_dns_entries_by_address(self, context, address, domain):
return self.floating_dns_manager.get_entries_by_address(address,
domain)
def get_dns_entries_by_name(self, context, name, domain):
return self.floating_dns_manager.get_entries_by_name(name,
domain)
def create_private_dns_domain(self, context, domain, av_zone):
self.db.dnsdomain_register_for_zone(context, domain, av_zone)
try:
self.instance_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warn(_('Domain |%(domain)s| already exists, '
'changing zone to |%(av_zone)s|.'),
{'domain': domain, 'av_zone': av_zone})
def create_public_dns_domain(self, context, domain, project):
self.db.dnsdomain_register_for_project(context, domain, project)
try:
self.floating_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warn(_('Domain |%(domain)s| already exists, '
'changing project to |%(project)s|.'),
{'domain': domain, 'project': project})
def delete_dns_domain(self, context, domain):
self.db.dnsdomain_unregister(context, domain)
self.floating_dns_manager.delete_domain(domain)
def _get_project_for_domain(self, context, domain):
return self.db.dnsdomain_project(context, domain)
class LocalManager(base.Base, FloatingIP):
def __init__(self):
super(LocalManager, self).__init__()
# NOTE(vish): setting the host to none ensures that the actual
# l3driver commands for l3 are done via rpc.
self.host = None
self.servicegroup_api = servicegroup.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
self.floating_dns_manager = importutils.import_object(
CONF.floating_ip_dns_manager)
self.instance_dns_manager = importutils.import_object(
CONF.instance_dns_manager)
| apache-2.0 | -7,822,012,265,129,886,000 | 44.923295 | 79 | 0.548283 | false |
jaraco/aspen | aspen/http/mapping.py | 1 | 3764 | """
aspen.http.mapping
~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
NO_DEFAULT = object()
class Mapping(dict):
"""Base class for HTTP mappings: Path, Querystring, Headers, Cookie, Body.
Mappings in HTTP differ from Python dictionaries in that they may have one
or more values. This dictionary subclass maintains a list of values for
each key. However, access semantics are asymetric: subscript assignment
clobbers to the list, while subscript access returns the last item. Think
about it.
"""
def __getitem__(self, name):
"""Given a name, return the last value or raise Response(400).
"""
try:
return dict.__getitem__(self, name)[-1]
except KeyError:
from .. import Response
raise Response(400, "Missing key: %s" % repr(name))
def __setitem__(self, name, value):
"""Given a name and value, clobber any existing values.
"""
dict.__setitem__(self, name, [value])
def pop(self, name, default=NO_DEFAULT):
"""Given a name, return a value.
This removes the last value from the list for name and returns it. If
there was only one value in the list then the key is removed from the
mapping. If name is not present and default is given, that is returned
instead.
"""
if name not in self:
if default is not NO_DEFAULT:
return default
else:
dict.pop(self, name) # KeyError
values = dict.__getitem__(self, name)
value = values.pop()
if not values:
del self[name]
return value
popall = dict.pop
def all(self, name):
"""Given a name, return a list of values.
"""
try:
return dict.__getitem__(self, name)
except KeyError:
from .. import Response
raise Response(400)
def get(self, name, default=None):
"""Override to only return the last value.
"""
return dict.get(self, name, [default])[-1]
def add(self, name, value):
"""Given a name and value, clobber any existing values with the new one.
"""
if name in self:
self.all(name).append(value)
else:
dict.__setitem__(self, name, [value])
def ones(self, *names):
"""Given one or more names of keys, return a list of their values.
"""
lowered = []
for name in names:
n = name.lower()
if n not in lowered:
lowered.append(n)
return [self[name] for name in lowered]
class CaseInsensitiveMapping(Mapping):
def __init__(self, *a, **kw):
if a:
d = a[0]
items = d.iteritems if hasattr(d, 'iteritems') else d
for k, v in items():
self[k] = v
for k, v in kw.iteritems():
self[k] = v
def __contains__(self, name):
return Mapping.__contains__(self, name.title())
def __getitem__(self, name):
return Mapping.__getitem__(self, name.title())
def __setitem__(self, name, value):
return Mapping.__setitem__(self, name.title(), value)
def add(self, name, value):
return Mapping.add(self, name.title(), value)
def get(self, name, default=None):
return Mapping.get(self, name.title(), default)
def all(self, name):
return Mapping.all(self, name.title())
def pop(self, name):
return Mapping.pop(self, name.title())
def popall(self, name):
return Mapping.popall(self, name.title())
| mit | 3,913,725,705,590,676,500 | 28.40625 | 80 | 0.570404 | false |
USGSDenverPychron/pychron | pychron/extraction_line/tasks/extraction_line_actions.py | 1 | 4998 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.action.api import Action
from pyface.tasks.action.task_action import TaskAction
# ============= standard library imports ========================
# ============= local library imports ==========================
# from pychron.envisage.core.action_helper import open_manager
from pychron.envisage.resources import icon
from pychron.envisage.view_util import open_view
class AutoReloadAction(TaskAction):
name = 'Auto Reload'
method = 'enable_auto_reload'
class SampleLoadingAction(TaskAction):
name = 'Load Samples'
method = 'do_sample_loading'
image = icon('arrow_out')
class ExtractionLineAction(Action):
def _get_manager(self, event, app=None):
EL_PROTOCOL = 'pychron.extraction_line.extraction_line_manager.ExtractionLineManager'
if app is None:
app = event.task.window.application
return app.get_service(EL_PROTOCOL)
class OpenExtractionLineManager(ExtractionLineAction):
description = 'Open extraction line manager'
name = 'Open Extraction Line Manager'
accelerator = 'Ctrl+E'
def perform(self, event):
man = self._get_manager(event)
open_view(man)
class OpenExtractionLineExplanation(ExtractionLineAction):
description = 'Open extraction line explanation'
def perform(self, event):
man = self._get_manager(event)
open_view(man.explanation)
class LoadCanvasAction(ExtractionLineAction):
"""
"""
description = 'load an updated canvas file'
name = 'Load Canvas'
enabled = False
def perform(self, event):
"""
"""
manager = self._get_manager(event)
# manager.window = self.window
manager.load_canvas()
class RefreshCanvasAction(ExtractionLineAction):
description = 'reload the scene graph to reflect changes made to setupfiles'
name = 'Refresh Canvas'
# enabled = False
def perform(self, event):
manager = self._get_manager(event)
# manager.window = self.window
manager.reload_canvas()
# class OpenViewControllerAction(ExtractionLineAction):
# description = 'Open User views'
# name = 'Open User Views'
# enabled = True
#
# # def __init__(self, *args, **kw):
# # super(OpenViewControllerAction, self).__init__(*args, **kw)
# #
# # man = get_manager(self.window)
# # man.on_trait_change(self.update, 'ui')
# #
# # def update(self, obj, name, old, new):
# # if new:
# # self.enabled = True
# # else:
# # self.enabled = False
#
# def perform(self, event):
# '''
# '''
# manager = self._get_manager(event)
# app = self.window.application
# app.open_view(manager.view_controller)
# # open_manager(app, manager.view_controller, kind='livemodal',
# # parent=manager.ui.control
# # )
# class OpenDeviceStreamerAction(Action):
# description = 'Open the device streamer manager'
# name = 'Open Device Streamer'
#
# enabled = False
#
# def __init__(self, *args, **kw):
# super(OpenDeviceStreamerAction, self).__init__(*args, **kw)
# manager = get_manager(self.window)
# if manager.device_stream_manager is not None:
# self.enabled = True
#
# def perform(self, event):
# manager = get_manager(self.window)
# manager.window = self.window
# manager.show_device_streamer()
class OpenPyScriptEditorAction(ExtractionLineAction):
def perform(self, event):
manager = self._get_manager(event)
open_view(manager.pyscript_editor)
class OpenMultiplexerAction(ExtractionLineAction):
accelerator = 'Ctrl+Shift+M'
def __init__(self, *args, **kw):
super(OpenMultiplexerAction, self).__init__(*args, **kw)
manager = self._get_manager(None, app=self.window.application)
if manager.multiplexer_manager is None:
self.enabled = False
def perform(self, event):
manager = self._get_manager(event)
if manager.multiplexer_manager:
open_view(manager.multiplexer_manager)
# ============= EOF ====================================
| apache-2.0 | 4,341,163,700,956,145,700 | 30.2375 | 93 | 0.611645 | false |
allenai/allennlp | tests/modules/feedforward_test.py | 1 | 4285 | from numpy.testing import assert_almost_equal
import inspect
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import FeedForward
from allennlp.nn import InitializerApplicator, Initializer, Activation
from allennlp.common.testing import AllenNlpTestCase
class TestFeedForward(AllenNlpTestCase):
def test_can_construct_from_params(self):
params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
feedforward = FeedForward.from_params(params)
assert len(feedforward._activations) == 2
assert [isinstance(a, torch.nn.ReLU) for a in feedforward._activations]
assert len(feedforward._linear_layers) == 2
assert [layer.weight.size(-1) == 3 for layer in feedforward._linear_layers]
params = Params(
{
"input_dim": 2,
"hidden_dims": [3, 4, 5],
"activations": ["relu", "relu", "linear"],
"dropout": 0.2,
"num_layers": 3,
}
)
feedforward = FeedForward.from_params(params)
assert len(feedforward._activations) == 3
assert isinstance(feedforward._activations[0], torch.nn.ReLU)
assert isinstance(feedforward._activations[1], torch.nn.ReLU)
# It's hard to check that the last activation is the lambda function we use for `linear`,
# so this is good enough.
assert not isinstance(feedforward._activations[2], torch.nn.ReLU)
assert len(feedforward._linear_layers) == 3
assert feedforward._linear_layers[0].weight.size(0) == 3
assert feedforward._linear_layers[1].weight.size(0) == 4
assert feedforward._linear_layers[2].weight.size(0) == 5
assert len(feedforward._dropout) == 3
assert [d.p == 0.2 for d in feedforward._dropout]
def test_init_checks_hidden_dim_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, [5, 5], Activation.by_name("relu")())
def test_init_checks_activation_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, 5, [Activation.by_name("relu")(), Activation.by_name("relu")()])
def test_forward_gives_correct_output(self):
params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
feedforward = FeedForward.from_params(params)
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(feedforward)
input_tensor = torch.FloatTensor([[-3, 1]])
output = feedforward(input_tensor).data.numpy()
assert output.shape == (1, 3)
# This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
# which then gets a bias added in the second layer to be [1, 1, 1].
assert_almost_equal(output, [[1, 1, 1]])
def test_textual_representation_contains_activations(self):
params = Params(
{
"input_dim": 2,
"hidden_dims": 3,
"activations": ["linear", "relu", "swish"],
"num_layers": 3,
}
)
feedforward = FeedForward.from_params(params)
expected_text_representation = inspect.cleandoc(
"""
FeedForward(
(_activations): ModuleList(
(0): Linear()
(1): ReLU()
(2): Swish()
)
(_linear_layers): ModuleList(
(0): Linear(in_features=2, out_features=3, bias=True)
(1): Linear(in_features=3, out_features=3, bias=True)
(2): Linear(in_features=3, out_features=3, bias=True)
)
(_dropout): ModuleList(
(0): Dropout(p=0.0, inplace=False)
(1): Dropout(p=0.0, inplace=False)
(2): Dropout(p=0.0, inplace=False)
)
)
"""
)
actual_text_representation = str(feedforward)
assert actual_text_representation == expected_text_representation
| apache-2.0 | -2,292,689,591,873,804,000 | 40.601942 | 99 | 0.589498 | false |
pytorch/fairseq | fairseq/tasks/__init__.py | 1 | 4326 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent, populate_dataclass
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = populate_dataclass(dc(), cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available tasks: {TASK_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
import_tasks(tasks_dir, "fairseq.tasks")
| mit | 2,169,655,994,506,281,200 | 30.808824 | 99 | 0.591308 | false |
CubicERP/odoo | addons/mrp/__openerp__.py | 1 | 3590 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP',
'version': '1.1',
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'category': 'Manufacturing',
'sequence': 18,
'summary': 'Manufacturing Orders, Bill of Materials, Routing',
'depends': ['product', 'procurement', 'stock_account', 'resource', 'report'],
'description': """
Manage the Manufacturing process in OpenERP
===========================================
The manufacturing module allows you to cover planning, ordering, stocks and the manufacturing or assembly of products from raw materials and components. It handles the consumption and production of products according to a bill of materials and the necessary operations on machinery, tools or human resources according to routings.
It supports complete integration and planification of stockable goods, consumables or services. Services are completely integrated with the rest of the software. For instance, you can set up a sub-contracting service in a bill of materials to automatically purchase on order the assembly of your production.
Key Features
------------
* Make to Stock/Make to Order
* Multi-level bill of materials, no limit
* Multi-level routing, no limit
* Routing and work center integrated with analytic accounting
* Periodical scheduler computation
* Allows to browse bills of materials in a complete structure that includes child and phantom bills of materials
Dashboard / Reports for MRP will include:
-----------------------------------------
* Procurements in Exception (Graph)
* Stock Value Variation (Graph)
* Work Order Analysis
""",
'data': [
'security/mrp_security.xml',
'security/ir.model.access.csv',
'mrp_workflow.xml',
'mrp_data.xml',
'wizard/mrp_product_produce_view.xml',
'wizard/change_production_qty_view.xml',
'wizard/mrp_price_view.xml',
'wizard/mrp_workcenter_load_view.xml',
'wizard/stock_move_view.xml',
'mrp_view.xml',
'mrp_report.xml',
'company_view.xml',
'report/mrp_report_view.xml',
'res_config_view.xml',
'views/report_mrporder.xml',
'views/report_mrpbomstructure.xml',
'stock_view.xml',
],
'demo': ['mrp_demo.xml'],
'test': [
'test/bom_with_service_type_product.yml',
'test/mrp_users.yml',
'test/order_demo.yml',
'test/order_process.yml',
'test/cancel_order.yml',
],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,567,801,789,478,058,000 | 40.744186 | 330 | 0.639833 | false |
lmr/avocado-virt | avocado/virt/test.py | 1 | 2787 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (C) 2014 Red Hat Inc
#
# Author: Lucas Meneghel Rodrigues <[email protected]>
import os
from avocado import Test
from avocado.utils import process
from avocado.virt.qemu import machine
class VirtTest(Test):
def __init__(self, methodName='runTest', name=None, params=None,
base_logdir=None, tag=None, job=None, runner_queue=None):
super(VirtTest, self).__init__(methodName=methodName, name=name,
params=params, base_logdir=base_logdir,
tag=tag, job=job,
runner_queue=runner_queue)
self.vm = None
def _restore_guest_images(self):
"""
Restore any guest images defined in the command line.
"""
drive_file = self.params.get('image_path', '/plugins/virt/guest/*')
# Check if there's a compressed drive file
compressed_drive_file = drive_file + '.7z'
if os.path.isfile(compressed_drive_file):
self.log.debug('Found compressed image %s and restore guest '
'image set. Restoring image...',
compressed_drive_file)
cwd = os.getcwd()
os.chdir(os.path.dirname(compressed_drive_file))
process.run('7za -y e %s' %
os.path.basename(compressed_drive_file))
os.chdir(cwd)
else:
self.log.debug('Restore guest image set, but could not find '
'compressed image %s. Skipping restore...',
compressed_drive_file)
def setUp(self):
"""
Restore guest image, according to params directives.
By default, always restore.
If only the test level restore is disabled, execute one restore (job).
If both are disabled, then never restore.
"""
if not self.params.get('disable_restore_image_test',
'/plugins/virt/guest/*'):
self._restore_guest_images()
self.vm = machine.VM(params=self.params, logdir=self.logdir)
self.vm.devices.add_nodefaults()
self.vm.devices.add_vga('std')
self.vm.devices.add_vnc()
self.vm.devices.add_drive()
self.vm.devices.add_net()
| gpl-2.0 | 8,708,044,856,643,234,000 | 38.814286 | 78 | 0.594905 | false |
oholiab/grimmly | weechat/grimmly.py | 1 | 2113 | import weechat
w = weechat
import re
import htmllib
import os
import urllib
import urllib2
SCRIPT_NAME = "grimmly"
SCRIPT_AUTHOR = "oholiab <[email protected]>"
SCRIPT_VERSION = "1.1"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Create short urls in private grimmly URL shortener"
settings = {
"ignore_prefix" : "^\s+-",
"private_server_url" : "http://localhost:8080",
"public_server_url" : "https://localhost:8080",
"buffer_blacklist" : ""
}
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", ""):
for option, default_value in settings.iteritems():
if not w.config_is_set_plugin(option):
w.config_set_plugin(option, default_value)
octet = r'(?:2(?:[0-4]\d|5[0-5])|1\d\d|\d{1,2})'
ipAddr = r'%s(?:\,.%s){3}' % (octet, octet)
# Base domain regex off RFC 1034 and 1738
label = r'[0-9a-z][-0-9a-z]*[0-9a-z]?'
domain = r'%s(?:\.%s)*\.[a-z][-0-9a-z]*[a-z]?' % (label, label)
urlRe = re.compile(r'(\w+://(?:%s|%s)(?::\d+)?(?:/[^\])>\s]*)?)' % (domain, ipAddr), re.I)
home = os.environ['HOME']
testout = open('%s/testoutput' % home, 'a')
ignoreRe = re.compile(r'(%s)' % w.config_get_plugin('ignore_prefix'))
blacklist = w.config_get_plugin('buffer_blacklist').split(",")
def wee_print(message, buffer):
weechat.prnt(buffer, '-- %s' % message)
def test_write_url(data, buffer, time, tags, displayed, highlight, prefix, message):
#if not ignoreRe.match(message):
# wee_print("doing nothing", buffer)
# return w.WEECHAT_RC_OK
if weechat.buffer_get_string(buffer, "name") in blacklist:
return w.WEECHAT_RC_OK
for url in urlRe.findall(message):
post_url = w.config_get_plugin('private_server_url')
get_url = w.config_get_plugin('public_server_url')
req = urllib2.Request(post_url, url)
response = urllib2.urlopen(req)
shorturl = get_url + '/' + response.read()
wee_print('%s' % shorturl, buffer)
return w.WEECHAT_RC_OK
if __name__ == "__main__":
w.hook_print("", "", "://", 1, "test_write_url", "")
| bsd-3-clause | -7,340,715,907,588,340,000 | 34.813559 | 90 | 0.601988 | false |
levelcert/freesugars | setup.py | 1 | 2343 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='freesugars',
version='0.1.0',
license='BSD',
description='A utility to calculate the free sugars in a food or drink product.',
long_description='%s\n%s' % (read('README.rst'), re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Level International',
author_email='[email protected]',
url='https://github.com/levelcert/freesugars',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# eg: 'aspectlib==1.1.1', 'six>=1.7',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'freesugars = freesugars.cli:main',
]
},
)
| mit | 9,200,520,328,663,304,000 | 30.24 | 116 | 0.597098 | false |
City-of-Bloomington/green-rental | building/migrations/0019_auto__chg_field_building_air_conditioning__add_field_unit_deposit__add.py | 2 | 27229 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Building.air_conditioning'
db.alter_column(u'building_building', 'air_conditioning', self.gf('django.db.models.fields.CharField')(max_length=50))
# Adding field 'Unit.deposit'
db.add_column(u'building_unit', 'deposit',
self.gf('django.db.models.fields.FloatField')(default=0),
keep_default=False)
# Adding field 'Unit.electricity_min'
db.add_column(u'building_unit', 'electricity_min',
self.gf('django.db.models.fields.FloatField')(default=0),
keep_default=False)
# Adding field 'Unit.electricity_max'
db.add_column(u'building_unit', 'electricity_max',
self.gf('django.db.models.fields.FloatField')(default=0),
keep_default=False)
# Adding field 'Unit.gas_min'
db.add_column(u'building_unit', 'gas_min',
self.gf('django.db.models.fields.FloatField')(default=0),
keep_default=False)
# Adding field 'Unit.gas_max'
db.add_column(u'building_unit', 'gas_max',
self.gf('django.db.models.fields.FloatField')(default=0),
keep_default=False)
def backwards(self, orm):
# Changing field 'Building.air_conditioning'
db.alter_column(u'building_building', 'air_conditioning', self.gf('django.db.models.fields.BooleanField')())
# Deleting field 'Unit.deposit'
db.delete_column(u'building_unit', 'deposit')
# Deleting field 'Unit.electricity_min'
db.delete_column(u'building_unit', 'electricity_min')
# Deleting field 'Unit.electricity_max'
db.delete_column(u'building_unit', 'electricity_max')
# Deleting field 'Unit.gas_min'
db.delete_column(u'building_unit', 'gas_min')
# Deleting field 'Unit.gas_max'
db.delete_column(u'building_unit', 'gas_max')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'building.building': {
'Meta': {'object_name': 'Building'},
'active_listings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'air_conditioning': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'amenities': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'average_electricity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_gas': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_sqft': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_trash': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_water': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'bike_friendly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bike_friendly_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bike_friendly_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'bike_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'built_year': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']"}),
'composting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'energy_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'energy_saving_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'energy_saving_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'energy_saving_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'energy_score': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'estimated_total_max': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'estimated_total_min': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'game_room': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'garden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'garden_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'garden_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'geocoder': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'gym': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heat_source_details': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20', 'blank': 'True'}),
'heat_source_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'laundry': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'max_rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'max_rent_listing': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'min_rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'min_rent_listing': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80', 'blank': 'True'}),
'number_of_units': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parcel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['building.Parcel']"}),
'parking_options': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'pets': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pets_options': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'pets_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'recycling': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'renewable_energy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'renewable_energy_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'renewable_energy_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'smart_living': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source.Source']", 'null': 'True', 'blank': 'True'}),
'sqft': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'total_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'transit_friendly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'transit_friendly_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'transit_friendly_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'transit_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'utility_data_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'walk_friendly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'walk_friendly_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'walk_friendly_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'walk_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'who_pays_cable': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_electricity': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_gas': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_internet': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_trash': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_water': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'})
},
u'building.buildingcomment': {
'Meta': {'object_name': 'BuildingComment'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['building.Building']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'building.buildingdocument': {
'Meta': {'object_name': 'BuildingDocument'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'blob_key': ('django.db.models.fields.TextField', [], {}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['building.Building']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'documents'", 'null': 'True', 'to': u"orm['building.Unit']"})
},
u'building.buildingperson': {
'Meta': {'object_name': 'BuildingPerson'},
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'people'", 'to': u"orm['building.Building']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['person.Person']"}),
'relation': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '50'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['building.Unit']", 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'building.buildingphoto': {
'Meta': {'object_name': 'BuildingPhoto'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'blob_key': ('django.db.models.fields.TextField', [], {}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': u"orm['building.Building']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['building.Unit']"})
},
u'building.changedetails': {
'Meta': {'object_name': 'ChangeDetails'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'changes'", 'to': u"orm['building.Building']"}),
'diffs': ('jsonfield.fields.JSONField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'note': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'changes'", 'null': 'True', 'to': u"orm['building.Unit']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'building.listing': {
'Meta': {'object_name': 'Listing'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'available_end': ('django.db.models.fields.DateTimeField', [], {}),
'available_start': ('django.db.models.fields.DateTimeField', [], {}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'listings'", 'null': 'True', 'to': u"orm['building.Building']"}),
'cost': ('django.db.models.fields.FloatField', [], {}),
'cost_cycle': ('django.db.models.fields.CharField', [], {'default': "'month'", 'max_length': '10'}),
'deposit': ('django.db.models.fields.FloatField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lease_term': ('django.db.models.fields.CharField', [], {'default': "'12 Months'", 'max_length': '200'}),
'lease_type': ('django.db.models.fields.CharField', [], {'default': "'Standard'", 'max_length': '200'}),
'pets': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'listings'", 'to': u"orm['building.Unit']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'building.parcel': {
'Meta': {'object_name': 'Parcel'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'from_st': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape': ('django.db.models.fields.TextField', [], {}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'street_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'to_st': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'building.permit': {
'Meta': {'object_name': 'Permit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'building.renthistory': {
'Meta': {'object_name': 'RentHistory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rent_history'", 'to': u"orm['building.Unit']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'building.unit': {
'Meta': {'ordering': "['number']", 'object_name': 'Unit'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'average_electricity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_gas': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_trash': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_water': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'bathrooms': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bedrooms': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'units'", 'to': u"orm['building.Building']"}),
'deposit': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'electricity_max': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'electricity_min': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gas_max': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'gas_min': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_occupants': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'sqft': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20', 'blank': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'building.unittype': {
'Meta': {'object_name': 'UnitType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'city.city': {
'Meta': {'object_name': 'City'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tag': ('django.db.models.fields.CharField', [], {'default': "'<django.db.models.fields.charfield>'", 'unique': 'True', 'max_length': '200'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'city'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'source.feedinfo': {
'Meta': {'object_name': 'FeedInfo'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'building_id_definition': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']"}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parcel_id_definition': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
u'source.source': {
'Meta': {'object_name': 'Source'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source.FeedInfo']", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['person.Person']", 'blank': 'True'})
}
}
complete_apps = ['building'] | agpl-3.0 | 339,383,079,143,093,100 | 78.387755 | 195 | 0.54688 | false |
agile-geoscience/bruges | setup.py | 1 | 1888 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python installation file.
"""
from setuptools import setup
import re
verstr = 'unknown'
VERSIONFILE = "bruges/_version.py"
with open(VERSIONFILE, "r")as f:
verstrline = f.read().strip()
pattern = re.compile(r"__version__ = ['\"](.*)['\"]")
mo = pattern.search(verstrline)
if mo:
verstr = mo.group(1)
print("Version "+verstr)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
REQUIREMENTS = ['numpy', 'scipy']
DEV_REQUIREMENTS = ['sphinx', 'twine']
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
setup(name='bruges',
version=verstr,
author='Agile Scientific',
author_email='[email protected]',
packages=['bruges',
'bruges.attribute',
'bruges.filters',
'bruges.models',
'bruges.noise',
'bruges.reflection',
'bruges.petrophysics',
'bruges.rockphysics',
'bruges.transform',
'bruges.unit',
'bruges.util',
],
description='Useful geophysics functions',
long_description=open('README.rst').read(),
url='http://pypi.python.org/pypi/bruges/',
install_requires=REQUIREMENTS,
classifiers=CLASSIFIERS,
license='Apache 2',
)
| apache-2.0 | -1,214,113,653,251,901,000 | 31 | 79 | 0.548729 | false |
warrickball/figures | animate_sph_harm_pyplot.py | 1 | 3709 | #!/usr/bin/env python
import numpy as np
from matplotlib import pyplot as pl
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from scipy.special import sph_harm
from numpy import sin, cos, pi
from argparse import ArgumentParser
parser = ArgumentParser(description="""Uses matplotlib to animates a spherical harmonic with a chosen
angular degree and azimuthal order. """)
parser.add_argument('-l', '--ell', type=int, default=6,
help="angular degree")
parser.add_argument('-m', '--emm', type=int, default=3,
help="azimuthal order")
parser.add_argument('-o', '--output', type=str, default=None,
help="save figure to given filename without displaying "
"it (forces software rendering)")
parser.add_argument('--Ntheta', type=int, default=101,
help="number of points in latitude (default=101)")
parser.add_argument('--Nphi', type=int, default=101,
help="number of points in longitude (default=101)")
parser.add_argument('--cmap', type=str, default='seismic',
help="colour map for surface of sphere (default='seismic')")
parser.add_argument('--vmax', type=float, default=1.0,
help="maximum range of colour map; < 1.0 will saturate "
"(default=1.0)")
parser.add_argument('--vmin', type=float, default=None,
help="minimum range of colour map (default=-vmax)")
parser.add_argument('--pattern', type=str, default='',
help="pattern for surface colours; options are \n"
"'displacement' or 'dr' to use the local displacement "
"or anything else for a static spherical harmonic map.")
parser.add_argument('-a', '--amplitude', type=float, default=1.0/3.0,
help="amplitude of oscillation (default=1/3)")
parser.add_argument('-P', '--period', type=float, default=1.0,
help="period of oscillation, in seconds (default=1.0)")
parser.add_argument('--Nframes', type=int, default=40,
help="number of frames per oscillation (default=40)")
parser.add_argument('--view', type=float, nargs=2, default=[35.0, 45.0],
help="viewing angle")
args = parser.parse_args()
Nframes = args.Nframes
interval = args.period/Nframes*1e3 # in milliseconds
vmin = args.vmin if args.vmin else -args.vmax
def update(i, ax):
ax.cla()
phase = 2.*pi*i/Nframes
dr = s*sin(phase)*args.amplitude
x = (1.+dr)*sin(Th)*cos(Ph)
y = (1.+dr)*sin(Th)*sin(Ph)
z = (1.+dr)*cos(Th)
v = s/(args.vmax-vmin)
if args.pattern in ['displacement', 'dr']:
v = v*np.sin(phase)
v += 0.5
surf = ax.plot_surface(x, y, z,
facecolors=pl.cm.get_cmap(args.cmap)(v),
**plot_kwargs)
ax.set_xlim(-0.9,0.9)
ax.set_ylim(-0.9,0.9)
ax.set_zlim(-0.9,0.9)
pl.axis('off')
ax.view_init(*args.view)
return surf,
plot_kwargs = {'rstride': 1,
'cstride': 1,
'linewidth': 0,
'antialiased': False}
ell = args.ell
emm = args.emm
fig = pl.figure(figsize=(6,6))
# ax = Axes3D.Axes3D(fig) # this is what tutorial uses
ax = pl.gca(projection='3d')
th = np.linspace(0., pi, args.Ntheta)
ph = np.linspace(-pi, pi, args.Nphi)
Th, Ph = np.meshgrid(th, ph)
s = sph_harm(emm,ell,Ph,Th).real
s = s/np.max(s)
update(0, ax)
ani = animation.FuncAnimation(fig, update, Nframes,
fargs=(ax,), interval=interval, repeat=True)
# Much smoother if we save it
if args.output:
ani.save(args.output, writer='imagemagick')
else:
pl.show()
| gpl-3.0 | -4,441,925,569,285,819,400 | 35.722772 | 101 | 0.602049 | false |
albertz/music-player | mac/pyobjc-framework-Quartz/PyObjCTest/test_cgdirectpalette.py | 1 | 2654 |
from PyObjCTools.TestSupport import *
from Quartz.CoreGraphics import *
try:
long
except NameError:
long = int
class TestCGDirectPalette (TestCase):
def testStructs(self):
v = CGDeviceByteColor()
self.assertTrue(hasattr(v, 'red'))
self.assertTrue(hasattr(v, 'green'))
self.assertTrue(hasattr(v, 'blue'))
v = CGDeviceColor()
self.assertTrue(hasattr(v, 'red'))
self.assertTrue(hasattr(v, 'green'))
self.assertTrue(hasattr(v, 'blue'))
def testTypes(self):
self.assertIsOpaquePointer(CGDirectPaletteRef)
def testFunctions(self):
self.assertResultIsCFRetained(CGPaletteCreateDefaultColorPalette)
v = CGPaletteCreateDefaultColorPalette()
self.assertIsInstance(v, CGDirectPaletteRef)
self.assertResultIsCFRetained(CGPaletteCreateWithDisplay)
v = CGPaletteCreateWithDisplay(CGMainDisplayID())
if v is not None:
self.assertIsInstance(v, CGDirectPaletteRef)
self.assertResultIsCFRetained(CGPaletteCreateWithCapacity)
v = CGPaletteCreateWithCapacity(128)
self.assertIsInstance(v, CGDirectPaletteRef)
self.assertResultIsCFRetained(CGPaletteCreateWithSamples)
v = CGPaletteCreateWithSamples([(0, 0, 0), (0.5, 0.5, 0.5), (1, 1, 1)], 3)
self.assertIsInstance(v, CGDirectPaletteRef)
self.assertResultIsCFRetained(CGPaletteCreateWithByteSamples)
v = CGPaletteCreateWithByteSamples([(0, 0, 0), (100, 100, 100), (255, 255, 255)], 3)
self.assertIsInstance(v, CGDirectPaletteRef)
CFRetain(v)
CGPaletteRelease(v)
palette = CGPaletteCreateDefaultColorPalette()
v = CGPaletteGetColorAtIndex(palette, 0)
self.assertIsInstance(palette, CGDirectPaletteRef)
v = CGPaletteGetIndexForColor(palette, v)
self.assertIsInstance(v, (int, long))
v = CGPaletteGetNumberOfSamples(palette)
self.assertIsInstance(v, (int, long))
CGPaletteSetColorAtIndex(palette, (0.5, 0.5, 0.5), 0)
self.assertResultIsCFRetained(CGPaletteCreateCopy)
v = CGPaletteCreateCopy(palette)
self.assertIsInstance(v, CGDirectPaletteRef)
self.assertResultHasType(CGPaletteIsEqualToPalette, objc._C_BOOL)
v = CGPaletteIsEqualToPalette(palette, v)
self.assertTrue(v is True)
self.assertResultIsCFRetained(CGPaletteCreateFromPaletteBlendedWithColor)
v = CGPaletteCreateFromPaletteBlendedWithColor(palette,
0.5, (0.3, 0.7, 0.1))
self.assertIsInstance(v, CGDirectPaletteRef)
if __name__ == "__main__":
main()
| bsd-2-clause | 8,031,069,687,409,468,000 | 33.467532 | 92 | 0.682366 | false |
dresiu/compressor_requirejs | compressor_requirejs/compiler.py | 1 | 7470 | import codecs
import subprocess
import os
import sys
from django.core.exceptions import ImproperlyConfigured
from django.contrib.staticfiles import finders
import execjs
from .config import settings
try:
from django.core.cache import caches
def get_cache(name):
return caches[name]
except ImportError:
from django.core.cache import get_cache
APP_NAME = 'compressor_requirejs'
def current_exc_type():
return sys.exc_info()[0]
class RequireJSCompilerException(Exception):
pass
class RequireJSCompiler(object):
def __init__(self):
self.r = getattr(settings, 'COMPRESSOR_REQUIREJS_R_JS', None)
if not self.r:
raise ImproperlyConfigured('COMPRESSOR_REQUIREJS_R_JS not set')
self.tmp = getattr(settings, 'COMPRESSOR_REQUIREJS_TMP', None)
if not self.tmp:
raise ImproperlyConfigured('COMPRESSOR_REQUIREJS_TMP not set')
self.libs = self.required_libs()
self.global_config = getattr(settings, 'COMPRESSOR_REQUIREJS_GLOBAL_CONFIG', None)
self.global_preconfig = getattr(settings, 'COMPRESSOR_REQUIREJS_GLOBAL_PRECONFIG', None)
self.printf = settings.COMPRESSOR_REQUIREJS_LOGGING_OUTPUT_FUNCTION
def get_fullpath(self, path, resolve_path=True):
if os.path.isabs(path):
return path
if not resolve_path:
return path
files = finders.find(path, all=True)
if isinstance(files, list):
if len(files) > 0:
return files[0]
else:
return path
elif files is not None:
return files
else:
return path
def required_libs(self):
paths = []
if hasattr(settings, 'COMPRESSOR_REQUIREJS_REQUIRED_LIBS'):
for arg in settings.COMPRESSOR_REQUIREJS_REQUIRED_LIBS.keys():
path = self.get_fullpath(settings.COMPRESSOR_REQUIREJS_REQUIRED_LIBS[arg])
if path.endswith('.js'):
path = path[:-3]
paths.append('paths.%s=%s' % (arg, path))
return paths
def _tmp_file_gen(self, filename, postfix):
return os.path.join(self.tmp, filename.replace('\\', '_').replace('/', '_').replace('.', '_') + postfix)
def requirejs(self, filename, resolve_path=True, include_tags=True):
outfile = self._tmp_file_gen(filename, '_build.js')
build_filename = self.get_fullpath(filename, resolve_path)
#check cache
c = CacheFilesAccess(build_filename, outfile)
if not c.validate():
self.printf('[%s] cache invalid, compiling: %s' % (APP_NAME, filename))
process_args = [settings.COMPRESSOR_REQUIREJS_NODE_EXECUTABLE,
self.r,
'-o', build_filename,
'out=' + outfile]
process_args += self.libs
if self.global_config:
process_args.append('mainConfigFile=' + self.get_fullpath(self.global_config))
else:
process_args.append('mainConfigFile=' + self.global_preconfig)
try:
output = subprocess.check_output(process_args)
c.do_caching(output, self.get_fullpath(self.global_config) if self.global_config else None)
except current_exc_type() as e:
c.invalidate()
if hasattr(e, 'output'):
raise RequireJSCompilerException(e.output.decode('utf-8'))
raise e
if u'Error' in output.decode('utf-8'):
c.invalidate()
raise RequireJSCompilerException(output.decode('utf-8'))
else:
self.printf('[%s] skipping compilation: %s' % (APP_NAME, filename))
f = codecs.open(outfile, 'r', 'utf-8')
ret = '<script>%s</script>' % f.read() if include_tags else f.read()
f.close()
return ret
def requirejs_dir(self, filename, dirpath, resolve_path=True):
build_filename = self.get_fullpath(filename, resolve_path)
process_args = [settings.COMPRESSOR_REQUIREJS_NODE_EXECUTABLE,
self.r,
'-o', build_filename,
'dir=' + dirpath]
process_args += self.libs
if self.global_config:
process_args.append('mainConfigFile=' + self.get_fullpath(self.global_config))
else:
process_args.append('mainConfigFile=' + self.global_preconfig)
try:
output = subprocess.check_output(process_args)
except current_exc_type() as e:
if hasattr(e, 'output'):
raise RequireJSCompilerException(e.output.decode('utf-8'))
raise e
if u'Error' in output:
raise RequireJSCompilerException(output.decode('utf-8'))
class CacheFileModel(object):
modified_time = ''
filename = ''
def __unicode__(self):
return '%s %s' % (self.modified_time, self.filename)
class CacheFilesAccess(object):
PATH_SPLIT = '/'
MODULE_PATH_SPLIT = '!'
def __init__(self, build_file, output_file):
self.cache = get_cache(settings.COMPRESSOR_REQUIREJS_CACHE_BACKEND)
self.build_file = build_file
self.output_file = output_file
self.cache_timeout = settings.COMPRESSOR_REQUIREJS_CACHE_TIMEOUT
self.base_path = self._get_build_base_url()
def _cache_hash_gen(self, module_file):
return self.output_file + '::' + module_file
def do_caching(self, output, global_config):
module_files = self._get_files(output)
files_dict = dict()
if global_config is not None:
module_files += [global_config]
for module in module_files:
if os.path.exists(module):
cm = CacheFileModel()
cm.filename = module
cm.modified_time = os.path.getmtime(module)
files_dict[module] = cm
self.cache.set(self._cache_hash_gen(''), files_dict, self.cache_timeout)
def validate(self):
files = self.cache.get(self._cache_hash_gen(''))
if not files:
return False
files = files.values()
for file_model in files:
if os.path.exists(file_model.filename):
if file_model.modified_time != os.path.getmtime(file_model.filename):
return False
else:
return False
return True
def invalidate(self):
self.cache.delete(self._cache_hash_gen(''))
def _get_files(self, output):
lines = output.decode('utf-8').split(u'\n')
module_files = [self.build_file] + lines
return [self._normalize(m) for m in module_files if os.path.isfile(m) or self.MODULE_PATH_SPLIT in m]
def _normalize(self, file_path):
if self.MODULE_PATH_SPLIT in file_path:
relative_path = file_path.split(self.MODULE_PATH_SPLIT)[1]
return os.path.normpath(os.path.join(self.base_path, *relative_path.split(self.PATH_SPLIT)))
else:
return os.path.normpath(file_path)
def _get_build_base_url(self):
runtime = execjs.get('Node')
runtime._command = settings.COMPRESSOR_REQUIREJS_NODE_EXECUTABLE
ctx = runtime.eval(open(self.build_file, 'r').read())
return os.path.join(os.path.dirname(self.build_file), *ctx.get('baseUrl', '').split(self.PATH_SPLIT))
| mit | -5,865,335,164,976,793,000 | 36.164179 | 112 | 0.592503 | false |
jctanner/odp-scripts | jartools/jardumper.py | 1 | 1524 | #!/usr/bin/env python
import json
import os
import sys
import subprocess
import tempfile
def run_command(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(so, se) = p.communicate()
return (p.returncode, so, se)
def which(cmd):
''' Get the path for a command '''
cmd = "which %s" % cmd
(rc, so, se) = run_command(cmd)
return so.strip()
def listjarcontents(jarfile):
# jar tf ~/jars/commons-io-2.4.jar
jarfiles = []
jarcmd = which('jar')
thiscmd = "%s tf %s" % (jarcmd, jarfile)
(rc, so, se) = run_command(thiscmd)
jarfiles = so.split('\n')
jarfiles = [x.strip() for x in jarfiles if x.strip()]
return jarfiles
def processjar(jarfile):
classes = {}
javap = which('javap')
# list files
jarfiles = listjarcontents(jarfile)
for jf in jarfiles:
if not jf.endswith('.class'):
continue
print jf
thiscmd = javap + ' -classpath ' + jarfile
thiscmd += ' ' + jf.replace('.class', '')
(rc, so, se) = run_command(thiscmd)
classes[jf] = so
#import pdb; pdb.set_trace()
#import pdb; pdb.set_trace()
return classes
def main():
print "hello world"
print sys.argv
jarA = sys.argv[1]
classes = processjar(jarA)
outfile = os.path.basename(jarA)
outfile = outfile.replace('.jar', '.data')
with open(outfile, 'wb') as f:
f.write(json.dumps(classes,indent=2))
if __name__ == "__main__":
main()
| apache-2.0 | -5,818,770,157,537,226,000 | 22.446154 | 89 | 0.591864 | false |
JoeGermuska/agate | tests/test_table.py | 1 | 86346 | #!/usr/bin/env python
# -*- coding: utf8 -*-
try:
from cdecimal import Decimal
except ImportError: # pragma: no cover
from decimal import Decimal
import warnings
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import os
import sys
import six
from six.moves import html_parser
from six.moves import range
from agate import Table, TableSet
from agate.aggregations import Count, Sum
from agate.computations import Percent
from agate.data_types import *
from agate.computations import Formula
from agate.exceptions import DataTypeError
from agate.testcase import AgateTestCase
from agate.warns import DuplicateColumnWarning
class TestBasic(AgateTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, u'👍')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_create_table(self):
table = Table(self.rows)
self.assertColumnNames(table, ['a', 'b', 'c'])
self.assertColumnTypes(table, [Number, Number, Text])
self.assertRows(table, self.rows)
def test_create_filename(self):
with self.assertRaises(ValueError):
table = Table('foo.csv') # noqa
def test_create_empty_table(self):
table = Table([])
table2 = Table([], self.column_names, self.column_types)
self.assertColumnNames(table, [])
self.assertColumnTypes(table, [])
self.assertRows(table, [])
self.assertColumnNames(table2, self.column_names)
self.assertColumnTypes(table2, [Number, Number, Text])
self.assertRows(table2, [])
def test_create_table_column_types(self):
column_types = [self.number_type, self.text_type, self.text_type]
table = Table(self.rows, column_types=column_types)
self.assertColumnNames(table, ['a', 'b', 'c'])
self.assertColumnTypes(table, [Number, Text, Text])
self.assertRows(table, [
(1, '4', 'a'),
(2, '3', 'b'),
(None, '2', u'👍')
])
def test_create_table_column_names(self):
table = Table(self.rows, self.column_names)
self.assertColumnNames(table, self.column_names)
self.assertColumnTypes(table, [Number, Number, Text])
self.assertRows(table, self.rows)
def test_create_table_column_types_and_names(self):
table = Table(self.rows, self.column_names, self.column_types)
self.assertColumnNames(table, self.column_names)
self.assertColumnTypes(table, [Number, Number, Text])
self.assertRows(table, self.rows)
def test_create_table_non_string_columns(self):
column_names = ['one', 'two', 3]
with self.assertRaises(ValueError):
Table(self.rows, column_names, self.column_types)
def test_create_table_null_column_names(self):
column_names = ['one', None, 'three']
with warnings.catch_warnings():
warnings.simplefilter('error')
with self.assertRaises(RuntimeWarning):
table1 = Table(self.rows, column_types=self.column_types) # noqa
with self.assertRaises(RuntimeWarning):
table2 = Table(self.rows, column_names, self.column_types) # noqa
table3 = Table(self.rows, column_names, self.column_types)
self.assertColumnNames(table3, ['one', 'b', 'three'])
def test_create_table_non_datatype_columns(self):
column_types = [self.number_type, self.number_type, 'foo']
with self.assertRaises(ValueError):
Table(self.rows, self.column_names, column_types)
def test_create_duplicate_column_names(self):
column_names = ['one', 'two', 'two']
warnings.simplefilter('error')
with self.assertRaises(DuplicateColumnWarning):
table = Table(self.rows, column_names, self.column_types)
warnings.simplefilter('ignore')
table = Table(self.rows, column_names, self.column_types)
self.assertColumnNames(table, ['one', 'two', 'two_2'])
self.assertColumnTypes(table, [Number, Number, Text])
self.assertRows(table, self.rows)
def test_column_names_types_different_lengths(self):
column_names = ['one', 'two', 'three', 'four']
with self.assertRaises(ValueError):
Table(self.rows, column_names, self.column_types)
def test_create_variable_length_rows(self):
rows = (
(1, 4, 'a'),
(2,),
(None, 2)
)
table = Table(rows, self.column_names, self.column_types)
table2 = Table(rows)
self.assertColumnNames(table, self.column_names)
self.assertColumnTypes(table, [Number, Number, Text])
self.assertRows(table, [
(1, 4, 'a'),
(2, None, None),
(None, 2, None)
])
self.assertColumnTypes(table2, [Number, Number, Text])
self.assertRows(table2, [
(1, 4, 'a'),
(2, None, None),
(None, 2, None)
])
def test_create_table_no_column_names(self):
table = Table(self.rows, None, self.column_types)
self.assertEqual(len(table.rows), 3)
self.assertEqual(len(table.columns), 3)
self.assertSequenceEqual(table.columns[0], (1, 2, None))
self.assertSequenceEqual(table.columns['a'], (1, 2, None))
with self.assertRaises(KeyError):
table.columns[None]
with self.assertRaises(KeyError):
table.columns['one']
self.assertSequenceEqual(table.columns[2], ('a', 'b', u'👍'))
self.assertSequenceEqual(table.columns['c'], ('a', 'b', u'👍'))
with self.assertRaises(KeyError):
table.columns['']
def test_row_too_long(self):
rows = (
(1, 4, 'a', 'foo'),
(2,),
(None, 2)
)
with self.assertRaises(ValueError):
table = Table(rows, self.column_names, self.column_types) # noqa
def test_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
self.assertRowNames(table, ['a', 'b', u'👍'])
def test_row_names_non_string(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='one')
self.assertSequenceEqual(table.row_names, [
Decimal('1'),
Decimal('2'),
None
])
self.assertSequenceEqual(table.rows[Decimal('1')], (1, 4, 'a'))
self.assertSequenceEqual(table.rows[Decimal('2')], (2, 3, 'b'))
self.assertSequenceEqual(table.rows[None], (None, 2, u'👍'))
def test_row_names_func(self):
table = Table(self.rows, self.column_names, self.column_types, row_names=lambda r: (r['one'], r['three']))
self.assertSequenceEqual(table.row_names, [
(Decimal('1'), 'a'),
(Decimal('2'), 'b'),
(None, u'👍')
])
self.assertSequenceEqual(table.rows[(Decimal('1'), 'a')], (1, 4, 'a'))
self.assertSequenceEqual(table.rows[(Decimal('2'), 'b')], (2, 3, 'b'))
self.assertSequenceEqual(table.rows[(None, u'👍')], (None, 2, u'👍'))
def test_row_names_invalid(self):
with self.assertRaises(ValueError):
table = Table( # noqa
self.rows,
self.column_names,
self.column_types,
row_names={'a': 1, 'b': 2, 'c': 3}
)
def test_stringify(self):
column_names = ['foo', 'bar', u'👍']
table = Table(self.rows, column_names)
if six.PY2:
u = unicode(table)
self.assertIn('foo', u)
self.assertIn('bar', u)
self.assertIn(u'👍', u)
s = str(table)
self.assertIn('foo', s)
self.assertIn('bar', s)
self.assertIn(u'👍'.encode('utf-8'), s)
else:
u = str(table)
self.assertIn('foo', u)
self.assertIn('bar', u)
self.assertIn(u'👍', u)
def test_str(self):
table = Table(self.rows)
self.assertColumnNames(table, ['a', 'b', 'c'])
self.assertColumnTypes(table, [Number, Number, Text])
self.assertRows(table, self.rows)
def test_get_column_types(self):
table = Table(self.rows, self.column_names, self.column_types)
self.assertSequenceEqual(table.column_types, self.column_types)
def test_get_column_names(self):
table = Table(self.rows, self.column_names, self.column_types)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_select(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.select(('two', 'three'))
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, ['two', 'three'])
self.assertColumnTypes(new_table, [Number, Text])
self.assertRows(new_table, [
[4, 'a'],
[3, 'b'],
[2, u'👍']
])
def test_select_single(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.select('three')
self.assertColumnNames(new_table, ['three'])
self.assertColumnTypes(new_table, [Text])
self.assertRows(new_table, [
['a'],
['b'],
[u'👍']
])
def test_select_with_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
new_table = table.select(('three',))
self.assertRowNames(new_table, ['a', 'b', u'👍'])
def test_select_does_not_exist(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(KeyError):
table.select(('four',))
def test_exclude(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.exclude(('one', 'two'))
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, ['three'])
self.assertColumnTypes(new_table, [Text])
self.assertRows(new_table, [
['a'],
['b'],
[u'👍']
])
def test_exclude_single(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.exclude('one')
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, ['two', 'three'])
self.assertColumnTypes(new_table, [Number, Text])
self.assertRows(new_table, [
[4, 'a'],
[3, 'b'],
[2, u'👍']
])
def test_exclude_with_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
new_table = table.exclude(('one', 'two'))
self.assertRowNames(new_table, ['a', 'b', u'👍'])
def test_where(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.where(lambda r: r['one'] in (2, None))
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
self.rows[1],
self.rows[2]
])
def test_where_with_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
new_table = table.where(lambda r: r['one'] in (2, None))
self.assertRowNames(new_table, ['b', u'👍'])
def test_find(self):
table = Table(self.rows, self.column_names, self.column_types)
row = table.find(lambda r: r['two'] - r['one'] == 1)
self.assertIs(row, table.rows[1])
def test_find_none(self):
table = Table(self.rows, self.column_names, self.column_types)
row = table.find(lambda r: r['one'] == 'FOO')
self.assertIs(row, None)
def test_order_by(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.order_by('two')
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
self.rows[2],
self.rows[1],
self.rows[0]
])
# Verify old table not changed
self.assertRows(table, self.rows)
def test_order_by_multiple_columns(self):
rows = (
(1, 2, 'a'),
(2, 1, 'b'),
(1, 1, 'c')
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.order_by(['one', 'two'])
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[2],
rows[0],
rows[1]
])
def test_order_by_func(self):
rows = (
(1, 2, 'a'),
(2, 1, 'b'),
(1, 1, 'c')
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.order_by(lambda r: (r['one'], r['two']))
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[2],
rows[0],
rows[1]
])
def test_order_by_reverse(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.order_by(lambda r: r['two'], reverse=True)
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
self.rows[0],
self.rows[1],
self.rows[2]
])
def test_order_by_nulls(self):
rows = (
(1, 2, None),
(2, None, None),
(1, 1, 'c'),
(1, None, 'a')
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.order_by('two')
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[2],
rows[0],
rows[1],
rows[3]
])
new_table = table.order_by('three')
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[3],
rows[2],
rows[0],
rows[1]
])
def test_order_by_with_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
new_table = table.order_by('two')
self.assertRowNames(new_table, [u'👍', 'b', 'a'])
def test_order_by_empty_table(self):
table = Table([], self.column_names)
new_table = table.order_by('three') # noqa
def test_limit(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.limit(2)
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, self.rows[:2])
def test_limit_slice(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.limit(0, 3, 2)
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, self.rows[0:3:2])
def test_limit_slice_negative(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.limit(-2, step=-1)
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, self.rows[-2:-1])
def test_limit_step_only(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.limit(step=2)
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, self.rows[::2])
def test_limit_with_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
new_table = table.limit(2)
self.assertRowNames(new_table, ['a', 'b'])
def test_distinct_column(self):
rows = (
(1, 2, 'a'),
(2, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.distinct('one')
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[0],
rows[1]
])
def test_distinct_multiple_columns(self):
rows = (
(1, 2, 'a'),
(2, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.distinct(['two', 'three'])
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[0],
rows[1],
rows[2]
])
def test_distinct_func(self):
rows = (
(1, 2, 'a'),
(2, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.distinct(lambda row: (row['two'], row['three']))
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[0],
rows[1],
rows[2]
])
def test_distinct_none(self):
rows = (
(1, 2, 'a'),
(1, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_names, self.column_types)
new_table = table.distinct()
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names)
self.assertColumnTypes(new_table, [Number, Number, Text])
self.assertRows(new_table, [
rows[0],
rows[1],
rows[2]
])
def test_distinct_with_row_names(self):
rows = (
(1, 2, 'a'),
(2, None, None),
(1, 1, 'c'),
(1, None, 'd')
)
table = Table(rows, self.column_names, self.column_types, row_names='three')
new_table = table.distinct('one')
self.assertRowNames(new_table, ['a', None])
def test_chain_select_where(self):
table = Table(self.rows, self.column_names, self.column_types)
new_table = table.select(('one', 'two')).where(lambda r: r['two'] == 3)
self.assertIsNot(new_table, table)
self.assertColumnNames(new_table, self.column_names[:2])
self.assertColumnTypes(new_table, [Number, Number])
self.assertRows(new_table, [
self.rows[1][:2],
])
class TestCSV(AgateTestCase):
def setUp(self):
self.rows = (
(1, 'a', True, '11/4/2015', '11/4/2015 12:22 PM', '4:15'),
(2, u'👍', False, '11/5/2015', '11/4/2015 12:45 PM', '6:18'),
(None, 'b', None, None, None, None)
)
self.column_names = [
'number', 'text', 'boolean', 'date', 'datetime', 'timedelta'
]
self.column_types = [
Number(), Text(), Boolean(), Date(), DateTime(), TimeDelta()
]
def test_from_csv(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test.csv')
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_file_like_object(self):
table1 = Table(self.rows, self.column_names, self.column_types)
with open('examples/test.csv') as f:
table2 = Table.from_csv(f)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_type_tester(self):
tester = TypeTester(force={
'number': Text()
})
table = Table.from_csv('examples/test.csv', column_types=tester)
self.assertColumnTypes(table, [Text, Text, Boolean, Date, DateTime, TimeDelta])
def test_from_csv_no_type_tester(self):
tester = TypeTester(limit=0)
table = Table.from_csv('examples/test.csv', column_types=tester)
self.assertColumnTypes(table, [Text, Text, Text, Text, Text, Text])
def test_from_csv_no_header(self):
table = Table.from_csv('examples/test_no_header.csv', header=False)
self.assertColumnNames(table, ['a', 'b', 'c', 'd', 'e', 'f'])
self.assertColumnTypes(table, [Number, Text, Boolean, Date, DateTime, TimeDelta])
def test_from_csv_no_header_columns(self):
table = Table.from_csv('examples/test_no_header.csv', self.column_names, header=False)
self.assertColumnNames(table, self.column_names)
self.assertColumnTypes(table, [Number, Text, Boolean, Date, DateTime, TimeDelta])
def test_from_csv_sniff_limit(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test_csv_sniff.csv', sniff_limit=None)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_to_csv(self):
table = Table(self.rows, self.column_names, self.column_types)
table.to_csv('.test.csv')
with open('.test.csv') as f:
contents1 = f.read()
with open('examples/test.csv') as f:
contents2 = f.read()
self.assertEqual(contents1, contents2)
os.remove('.test.csv')
def test_to_csv_file_like_object(self):
table = Table(self.rows, self.column_names, self.column_types)
with open('.test.csv', 'w') as f:
table.to_csv(f)
# Should leave the file open
self.assertFalse(f.closed)
with open('.test.csv') as f:
contents1 = f.read()
with open('examples/test.csv') as f:
contents2 = f.read()
self.assertEqual(contents1, contents2)
os.remove('.test.csv')
def test_to_csv_to_stdout(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
table.to_csv(output)
contents1 = output.getvalue()
with open('examples/test.csv') as f:
contents2 = f.read()
self.assertEqual(contents1, contents2)
def test_to_csv_make_dir(self):
table = Table(self.rows, self.column_names, self.column_types)
table.to_csv('newdir/test.csv')
with open('newdir/test.csv') as f:
contents1 = f.read()
with open('examples/test.csv') as f:
contents2 = f.read()
self.assertEqual(contents1, contents2)
os.remove('newdir/test.csv')
os.rmdir('newdir/')
def test_print_csv(self):
table = Table(self.rows, self.column_names, self.column_types)
old = sys.stdout
sys.stdout = StringIO()
try:
table.print_csv()
contents1 = sys.stdout.getvalue()
with open('examples/test.csv') as f:
contents2 = f.read()
self.assertEqual(contents1, contents2)
finally:
sys.stdout = old
class TestJSON(AgateTestCase):
def setUp(self):
self.rows = (
(1, 'a', True, '11/4/2015', '11/4/2015 12:22 PM', '4:15'),
(2, u'👍', False, '11/5/2015', '11/4/2015 12:45 PM', '6:18'),
(None, 'b', None, None, None, None)
)
self.column_names = [
'number', 'text', 'boolean', 'date', 'datetime', 'timedelta'
]
self.column_types = [
Number(), Text(), Boolean(), Date(), DateTime(), TimeDelta()
]
def test_from_json(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_json('examples/test.json')
self.assertColumnNames(table2, self.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_json_file_like_object(self):
table1 = Table(self.rows, self.column_names, self.column_types)
with open('examples/test.json') as f:
table2 = Table.from_json(f)
self.assertColumnNames(table2, self.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_json_with_key(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_json('examples/test_key.json', key='data')
self.assertColumnNames(table2, self.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_json_mixed_keys(self):
table = Table.from_json('examples/test_mixed.json')
self.assertColumnNames(table, ['one', 'two', 'three', 'four', 'five'])
self.assertColumnTypes(table, [Number, Number, Text, Text, Number])
self.assertRows(table, [
[1, 4, 'a', None, None],
[2, 3, 'b', 'd', None],
[None, 2, u'👍', None, 5]
])
def test_from_json_nested(self):
table = Table.from_json('examples/test_nested.json')
self.assertColumnNames(table, ['one', 'two/two_a', 'two/two_b', 'three/0', 'three/1', 'three/2'])
self.assertColumnTypes(table, [Number, Text, Text, Text, Number, Text])
self.assertRows(table, [
[1, 'a', 'b', 'a', 2, 'c'],
[2, 'c', 'd', 'd', 2, 'f']
])
def test_from_json_newline_delimited(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_json('examples/test_newline.json', newline=True)
self.assertColumnNames(table2, self.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_json_no_type_tester(self):
tester = TypeTester(limit=0)
table = Table.from_json('examples/test.json', column_types=tester)
self.assertColumnTypes(table, [Text, Text, Text, Text, Text, Text])
def test_from_json_error_newline_key(self):
with self.assertRaises(ValueError):
table = Table.from_json('examples/test.json', newline=True, key='test') # noqa
def test_to_json(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
table.to_json(output, indent=4)
js1 = json.loads(output.getvalue())
with open('examples/test.json') as f:
js2 = json.load(f)
self.assertEqual(js1, js2)
def test_to_json_key(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
table.to_json(output, key='text', indent=4)
js1 = json.loads(output.getvalue())
with open('examples/test_keyed.json') as f:
js2 = json.load(f)
self.assertEqual(js1, js2)
def test_to_json_key_func(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
table.to_json(output, key=lambda r: r['text'], indent=4)
js1 = json.loads(output.getvalue())
with open('examples/test_keyed.json') as f:
js2 = json.load(f)
self.assertEqual(js1, js2)
def test_to_json_newline_delimited(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
table.to_json(output, newline=True)
js1 = json.loads(output.getvalue().split('\n')[0])
with open('examples/test_newline.json') as f:
js2 = json.loads(list(f)[0])
self.assertEqual(js1, js2)
def test_to_json_error_newline_indent(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
with self.assertRaises(ValueError):
table.to_json(output, newline=True, indent=4)
def test_to_json_error_newline_key(self):
table = Table(self.rows, self.column_names, self.column_types)
output = StringIO()
with self.assertRaises(ValueError):
table.to_json(output, key='three', newline=True)
def test_to_json_file_output(self):
table = Table(self.rows, self.column_names, self.column_types)
table.to_json('.test.json')
with open('.test.json') as f1:
js1 = json.load(f1)
with open('examples/test.json') as f2:
js2 = json.load(f2)
self.assertEqual(js1, js2)
os.remove('.test.json')
def test_to_json_make_dir(self):
table = Table(self.rows, self.column_names, self.column_types)
table.to_json('newdir/test.json')
with open('newdir/test.json') as f1:
js1 = json.load(f1)
with open('examples/test.json') as f2:
js2 = json.load(f2)
self.assertEqual(js1, js2)
os.remove('newdir/test.json')
os.rmdir('newdir/')
def test_print_json(self):
table = Table(self.rows, self.column_names, self.column_types)
old = sys.stdout
sys.stdout = StringIO()
try:
table.print_json()
js1 = json.loads(sys.stdout.getvalue())
with open('examples/test.json') as f:
js2 = json.load(f)
self.assertEqual(js1, js2)
finally:
sys.stdout = old
class TestBins(AgateTestCase):
def setUp(self):
self.number_type = Number()
self.column_names = ['number']
self.column_types = [self.number_type]
def test_bins(self):
rows = []
for i in range(0, 100):
rows.append([i]),
new_table = Table(rows, self.column_names, self.column_types).bins('number')
self.assertColumnNames(new_table, ['number', 'Count'])
self.assertColumnTypes(new_table, [Text, Number])
self.assertSequenceEqual(new_table.rows[0], ['[0 - 10)', 10])
self.assertSequenceEqual(new_table.rows[3], ['[30 - 40)', 10])
self.assertSequenceEqual(new_table.rows[9], ['[90 - 100]', 10])
self.assertRowNames(new_table, [
'[0 - 10)',
'[10 - 20)',
'[20 - 30)',
'[30 - 40)',
'[40 - 50)',
'[50 - 60)',
'[60 - 70)',
'[70 - 80)',
'[80 - 90)',
'[90 - 100]',
])
def test_bins_negative(self):
rows = []
for i in range(0, -100, -1):
rows.append([i])
new_table = Table(rows, self.column_names, self.column_types).bins('number', 10, -100, 0)
self.assertColumnNames(new_table, ['number', 'Count'])
self.assertColumnTypes(new_table, [Text, Number])
self.assertSequenceEqual(new_table.rows[0], ['[-100 - -90)', 9])
self.assertSequenceEqual(new_table.rows[3], ['[-70 - -60)', 10])
self.assertSequenceEqual(new_table.rows[9], ['[-10 - 0]', 11])
def test_bins_mixed_signs(self):
rows = []
for i in range(0, -100, -1):
rows.append([i + 50])
new_table = Table(rows, self.column_names, self.column_types).bins('number')
self.assertColumnNames(new_table, ['number', 'Count'])
self.assertColumnTypes(new_table, [Text, Number])
self.assertSequenceEqual(new_table.rows[0], ['[-50 - -40)', 9])
self.assertSequenceEqual(new_table.rows[3], ['[-20 - -10)', 10])
self.assertSequenceEqual(new_table.rows[9], ['[40 - 50]', 11])
def test_bins_small_numbers(self):
rows = []
for i in range(0, 100):
rows.append([Decimal(i) / Decimal('10')])
new_table = Table(rows, self.column_names, self.column_types).bins('number')
self.assertSequenceEqual(new_table.rows[0], ['[0 - 1)', 10])
self.assertSequenceEqual(new_table.rows[3], ['[3 - 4)', 10])
self.assertSequenceEqual(new_table.rows[9], ['[9 - 10]', 10])
def test_bins_decimals(self):
rows = []
for i in range(0, 100):
rows.append([Decimal(i) / Decimal('100')])
new_table = Table(rows, self.column_names, self.column_types).bins('number')
self.assertColumnNames(new_table, ['number', 'Count'])
self.assertColumnTypes(new_table, [Text, Number])
self.assertSequenceEqual(new_table.rows[0], ['[0.0 - 0.1)', 10])
self.assertSequenceEqual(new_table.rows[3], ['[0.3 - 0.4)', 10])
self.assertSequenceEqual(new_table.rows[9], ['[0.9 - 1.0]', 10])
def test_bins_nulls(self):
rows = []
for i in range(0, 100):
rows.append([Decimal(i) / Decimal('100')])
rows.append([None])
new_table = Table(rows, self.column_names, self.column_types).bins('number')
self.assertColumnNames(new_table, ['number', 'Count'])
self.assertColumnTypes(new_table, [Text, Number])
self.assertSequenceEqual(new_table.rows[0], ['[0.0 - 0.1)', 10])
self.assertSequenceEqual(new_table.rows[3], ['[0.3 - 0.4)', 10])
self.assertSequenceEqual(new_table.rows[9], ['[0.9 - 1.0]', 10])
self.assertSequenceEqual(new_table.rows[10], [None, 1])
class TestPrettyPrint(AgateTestCase):
def setUp(self):
self.rows = (
('1.7', 2000, 'a'),
('11.18', None, None),
('0', 1, 'c')
)
self.number_type = Number()
self.international_number_type = Number(locale='de_DE')
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [
self.number_type,
self.international_number_type,
self.text_type
]
def test_print_table(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 8)
self.assertEqual(len(lines[0]), 27)
def test_print_table_max_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_rows=2, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 8)
self.assertEqual(len(lines[0]), 27)
def test_print_table_max_columns(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 8)
self.assertEqual(len(lines[0]), 25)
def test_print_table_max_column_width(self):
rows = (
('1.7', 2, 'this is long'),
('11.18', None, None),
('0', 1, 'nope')
)
table = Table(rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(output=output, max_column_width=7)
lines = output.getvalue().split('\n')
self.assertIn(' this... ', lines[3])
self.assertIn(' nope ', lines[5])
def test_print_table_locale(self):
"""
Verify that the locale of the international number is correctly
controlling the format of how it is printed.
"""
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output, locale='de_DE')
# If it's working, the english '2,000' should appear as '2.000'
self.assertTrue("2.000" in output.getvalue())
def test_print_html(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_html(output=output)
html = output.getvalue()
self.assertEqual(html.count('<tr>'), 4)
self.assertEqual(html.count('<th>'), 3)
self.assertEqual(html.count('<td>'), 9)
def test_print_html_max_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_html(max_rows=2, output=output)
html = output.getvalue()
self.assertEqual(html.count('<tr>'), 3)
self.assertEqual(html.count('<th>'), 3)
self.assertEqual(html.count('<td>'), 6)
def test_print_html_max_columns(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_html(max_columns=2, output=output)
html = output.getvalue()
self.assertEqual(html.count('<tr>'), 4)
self.assertEqual(html.count('<th>'), 2)
self.assertEqual(html.count('<td>'), 6)
def test_print_bars(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_bars('three', 'one', output=output)
lines = output.getvalue().split('\n') # noqa
def test_print_bars_width(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_bars('three', 'one', width=40, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(max([len(l) for l in lines]), 40)
def test_print_bars_width_overlap(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_bars('three', 'one', width=20, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(max([len(l) for l in lines]), 20)
def test_print_bars_domain(self):
table = Table(self.rows, self.column_names, self.column_types)
table.print_bars('three', 'one', domain=(0, 300))
def test_print_bars_domain_invalid(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(ValueError):
table.print_bars('three', 'one', domain=(5, 0))
def test_print_bars_negative(self):
rows = (
('-1.7', 2, 'a'),
('-11.18', None, None),
('0', 1, 'c')
)
table = Table(rows, self.column_names, self.column_types)
table.print_bars('three', 'one')
def test_print_bars_mixed_signs(self):
rows = (
('-1.7', 2, 'a'),
('11.18', None, None),
('0', 1, 'c')
)
table = Table(rows, self.column_names, self.column_types)
table.print_bars('three', 'one')
def test_print_bars_invalid_values(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(DataTypeError):
table.print_bars('one', 'three')
def test_print_structure(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_structure(output=output)
lines = output.getvalue().strip().split('\n')
self.assertEqual(len(lines), 7)
class TestGrouping(AgateTestCase):
def setUp(self):
self.rows = (
('a', 2, 3, 4),
(None, 3, 5, None),
('a', 2, 4, None),
('b', 3, 4, None)
)
self.number_type = Number()
self.text_type = Text()
self.column_names = [
'one', 'two', 'three', 'four'
]
self.column_types = [
self.text_type, self.number_type, self.number_type, self.number_type
]
def test_group_by(self):
table = Table(self.rows, self.column_names, self.column_types)
tableset = table.group_by('one')
self.assertIsInstance(tableset, TableSet)
self.assertEqual(len(tableset), 3)
self.assertEqual(tableset.key_name, 'one')
self.assertIsInstance(tableset.key_type, Text)
self.assertIn('a', tableset.keys())
self.assertIn('b', tableset.keys())
self.assertIn(None, tableset.keys())
self.assertSequenceEqual(tableset['a'].columns['one'], ('a', 'a'))
self.assertSequenceEqual(tableset['b'].columns['one'], ('b',))
def test_group_by_number(self):
table = Table(self.rows, self.column_names, self.column_types)
tableset = table.group_by('two')
self.assertIsInstance(tableset, TableSet)
self.assertEqual(len(tableset), 2)
self.assertEqual(tableset.key_name, 'two')
self.assertIsInstance(tableset.key_type, Number)
self.assertIn(Decimal('2'), tableset.keys())
self.assertIn(Decimal('3'), tableset.keys())
self.assertSequenceEqual(tableset[Decimal('2')].columns['one'], ('a', 'a'))
self.assertSequenceEqual(tableset[Decimal('3')].columns['one'], (None, 'b'))
def test_group_by_key_name(self):
table = Table(self.rows, self.column_names, self.column_types)
tableset = table.group_by('one', key_name='test')
self.assertIsInstance(tableset, TableSet)
self.assertEqual(tableset.key_name, 'test')
self.assertIsInstance(tableset.key_type, Text)
self.assertIn('a', tableset.keys())
self.assertIn('b', tableset.keys())
self.assertIn(None, tableset.keys())
self.assertSequenceEqual(tableset['a'].columns['one'], ('a', 'a'))
self.assertSequenceEqual(tableset['b'].columns['one'], ('b',))
def test_group_by_key_type(self):
table = Table(self.rows, self.column_names, self.column_types)
tableset = table.group_by('two', key_type=Text())
self.assertIsInstance(tableset, TableSet)
self.assertEqual(tableset.key_name, 'two')
self.assertIsInstance(tableset.key_type, Text)
self.assertIn('2', tableset.keys())
self.assertIn('3', tableset.keys())
self.assertSequenceEqual(tableset['2'].columns['one'], ('a', 'a'))
self.assertSequenceEqual(tableset['3'].columns['one'], (None, 'b'))
def test_group_by_function(self):
table = Table(self.rows, self.column_names, self.column_types)
tableset = table.group_by(lambda r: r['three'] < 5, key_type=Boolean())
self.assertIsInstance(tableset, TableSet)
self.assertEqual(len(tableset), 2)
self.assertEqual(tableset.key_name, 'group')
self.assertIn(True, tableset.keys())
self.assertIn(False, tableset.keys())
self.assertSequenceEqual(tableset[True].columns['one'], ('a', 'a', 'b'))
self.assertSequenceEqual(tableset[False].columns['one'], (None,))
def test_group_by_bad_column(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(KeyError):
table.group_by('bad')
class TestAggregate(AgateTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, u'👍')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
self.table = Table(self.rows, self.column_names, self.column_types)
def test_count(self):
self.assertEqual(self.table.aggregate(Count()), 3)
def test_sum(self):
self.assertEqual(self.table.aggregate(Sum('two')), 9)
def test_multiple(self):
self.assertSequenceEqual(
self.table.aggregate([
Count(),
Sum('two')
]),
[3, 9]
)
class TestCompute(AgateTestCase):
def setUp(self):
self.rows = (
('a', 2, 3, 4),
(None, 3, 5, None),
('a', 2, 4, None),
('b', 3, 6, None)
)
self.number_type = Number()
self.text_type = Text()
self.column_names = [
'one', 'two', 'three', 'four'
]
self.column_types = [
self.text_type, self.number_type, self.number_type, self.number_type
]
self.table = Table(self.rows, self.column_names, self.column_types)
def test_compute(self):
new_table = self.table.compute([
('test', Formula(self.number_type, lambda r: r['two'] + r['three']))
])
self.assertIsNot(new_table, self.table)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'test'])
self.assertColumnTypes(new_table, [Text, Number, Number, Number, Number])
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 3, 4, 5))
self.assertSequenceEqual(new_table.columns['test'], (5, 8, 6, 9))
def test_compute_multiple(self):
new_table = self.table.compute([
('number', Formula(self.number_type, lambda r: r['two'] + r['three'])),
('text', Formula(self.text_type, lambda r: (r['one'] or '-') + six.text_type(r['three'])))
])
self.assertIsNot(new_table, self.table)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'number', 'text'])
self.assertColumnTypes(new_table, [Text, Number, Number, Number, Number, Text])
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 3, 4, 5, 'a3'))
self.assertSequenceEqual(new_table.columns['number'], (5, 8, 6, 9))
self.assertSequenceEqual(new_table.columns['text'], ('a3', '-5', 'a4', 'b6'))
def test_compute_with_row_names(self):
table = Table(self.rows, self.column_names, self.column_types, row_names='three')
new_table = table.compute([
('number', Formula(self.number_type, lambda r: r['two'] + r['three'])),
('text', Formula(self.text_type, lambda r: (r['one'] or '-') + six.text_type(r['three'])))
])
self.assertRowNames(new_table, [3, 5, 4, 6])
class TestJoin(AgateTestCase):
def setUp(self):
self.left_rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.right_rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.left_column_names = ['one', 'two', 'three']
self.right_column_names = ['four', 'five', 'six']
self.column_types = [self.number_type, self.number_type, self.text_type]
self.left = Table(self.left_rows, self.left_column_names, self.column_types)
self.right = Table(self.right_rows, self.right_column_names, self.column_types)
def test_join(self):
new_table = self.left.join(self.right, 'one', 'four')
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'five', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', 4, 'a'),
(2, 3, 'b', 3, 'b'),
(None, 2, 'c', 2, 'c')
])
def test_join_match_multiple(self):
left_rows = (
(1, 4, 'a'),
(2, 3, 'b')
)
right_rows = (
(1, 1, 'a'),
(1, 2, 'a'),
(2, 2, 'b')
)
left = Table(left_rows, self.left_column_names, self.column_types)
right = Table(right_rows, self.right_column_names, self.column_types)
new_table = left.join(right, 'one', 'five')
self.assertIsNot(new_table, left)
self.assertIsNot(new_table, right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', 1, 'a'),
(2, 3, 'b', 1, 'a'),
(2, 3, 'b', 2, 'b')
])
def test_join2(self):
new_table = self.left.join(self.right, 'one', 'five')
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', None, None),
(2, 3, 'b', None, 'c'),
(None, 2, 'c', None, None)
])
def test_join_same_column_name(self):
right_column_names = ['four', 'one', 'six']
right = Table(self.right_rows, right_column_names, self.column_types)
new_table = self.left.join(right, 'one')
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', None, None),
(2, 3, 'b', None, 'c'),
(None, 2, 'c', None, None)
])
def test_join_multiple_columns(self):
new_table = self.left.join(
self.right,
['two', 'three'],
['five', 'six']
)
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number])
self.assertRows(new_table, [
(1, 4, 'a', 1),
(2, 3, 'b', 2),
(None, 2, 'c', None)
])
def test_join_func(self):
new_table = self.left.join(
self.right,
lambda left: '%i%s' % (left['two'], left['three']),
lambda right: '%i%s' % (right['five'], right['six'])
)
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'five', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', 1, 4, 'a'),
(2, 3, 'b', 2, 3, 'b'),
(None, 2, 'c', None, 2, 'c')
])
def test_join_column_does_not_exist(self):
with self.assertRaises(KeyError):
self.left.join(self.right, 'one', 'seven')
def test_inner_join(self):
new_table = self.left.join(self.right, 'one', 'four', inner=True)
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'five', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', 4, 'a'),
(2, 3, 'b', 3, 'b'),
(None, 2, 'c', 2, 'c')
])
def test_inner_join2(self):
new_table = self.left.join(self.right, 'one', 'five', inner=True)
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(2, 3, 'b', None, 'c')
])
def test_inner_join_same_column_name(self):
right_column_names = ['four', 'one', 'six']
right = Table(self.right_rows, right_column_names, self.column_types)
new_table = self.left.join(right, 'one', inner=True)
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(2, 3, 'b', None, 'c')
])
def test_inner_join_func(self):
new_table = self.left.join(
self.right,
lambda left: '%i%s' % (left['two'], left['three']),
lambda right: '%i%s' % (right['five'], right['six']),
inner=True
)
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'five', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', 1, 4, 'a')
])
def test_join_with_row_names(self):
left = Table(self.left_rows, self.left_column_names, self.column_types, row_names='three')
new_table = left.join(self.right, 'one', 'four')
self.assertRowNames(new_table, ('a', 'b', 'c'))
def test_join_require_match(self):
with self.assertRaises(ValueError):
new_table = self.left.join(self.right, 'one', 'five', require_match=True) # noqa
with self.assertRaises(ValueError):
new_table = self.left.join(self.right, 'one', 'five', require_match=True) # noqa
new_table = self.left.join(self.right, 'one', 'four', require_match=True) # noqa
def test_join_columns_kwarg(self):
new_table = self.left.join(self.right, 'one', 'four', columns=['six'])
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Text])
self.assertRows(new_table, [
(1, 4, 'a', 'a'),
(2, 3, 'b', 'b'),
(None, 2, 'c', 'c')
])
def test_join_columns_kwarg_right_key(self):
new_table = self.left.join(self.right, 'one', 'four', columns=['four', 'six'])
self.assertIsNot(new_table, self.left)
self.assertIsNot(new_table, self.right)
self.assertColumnNames(new_table, ['one', 'two', 'three', 'four', 'six'])
self.assertColumnTypes(new_table, [Number, Number, Text, Number, Text])
self.assertRows(new_table, [
(1, 4, 'a', 1, 'a'),
(2, 3, 'b', 2, 'b'),
(None, 2, 'c', None, 'c')
])
class TestHomogenize(AgateTestCase):
def setUp(self):
self.rows = (
(0, 4, 'a'),
(1, 3, 'b'),
(None, 2, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_homogenize_column_name(self):
table = Table(self.rows, self.column_names, self.column_types)
compare_values = range(3)
homogenized = table.homogenize('one', compare_values, [3, 'd'])
rows = (
(0, 4, 'a'),
(1, 3, 'b'),
(None, 2, 'c'),
(2, 3, 'd')
)
self.assertColumnNames(homogenized, self.column_names)
self.assertColumnTypes(homogenized, [Number, Number, Text])
self.assertRows(homogenized, rows)
def test_homogenize_default_row(self):
table = Table(self.rows, self.column_names, self.column_types)
compare_values = [0, 1, 2]
homogenized = table.homogenize(['one'], compare_values)
rows = (
(0, 4, 'a'),
(1, 3, 'b'),
(None, 2, 'c'),
(2, None, None)
)
self.assertColumnNames(homogenized, self.column_names)
self.assertColumnTypes(homogenized, [Number, Number, Text])
self.assertRows(homogenized, rows)
def test_homogenize_multiple_columns(self):
table = Table(self.rows, self.column_names, self.column_types)
def column_two(count):
return [chr(ord('a') + c) for c in range(count)]
homogenized = table.homogenize(['one', 'three'], zip(range(3), column_two(3)), [5])
rows = (
(0, 4, 'a'),
(1, 3, 'b'),
(None, 2, 'c'),
(2, 5, 'c')
)
self.assertColumnNames(homogenized, self.column_names)
self.assertColumnTypes(homogenized, [Number, Number, Text])
self.assertRows(homogenized, rows)
def test_homogenize_lambda_default(self):
table = Table(self.rows, self.column_names, self.column_types)
def default_row(d):
return [d[0], d[0] * 2, d[1]]
def column_two(count):
return [chr(ord('a') + c) for c in range(count)]
homogenized = table.homogenize(['one', 'three'], zip(range(3), column_two(3)), default_row)
rows = (
(0, 4, 'a'),
(1, 3, 'b'),
(None, 2, 'c'),
(2, 4, 'c')
)
self.assertColumnNames(homogenized, self.column_names)
self.assertColumnTypes(homogenized, [Number, Number, Text])
self.assertRows(homogenized, rows)
class TestMerge(AgateTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_merge(self):
table_a = Table(self.rows, self.column_names, self.column_types)
table_b = Table(self.rows, self.column_names)
table_c = Table.merge([table_a, table_b])
self.assertIsNot(table_c, table_a)
self.assertIsNot(table_c, table_b)
self.assertColumnNames(table_c, self.column_names)
self.assertColumnTypes(table_c, [Number, Number, Text])
self.assertRows(table_c, self.rows + self.rows)
def test_merge_different_names(self):
table_a = Table(self.rows, self.column_names, self.column_types)
column_names = ['a', 'b', 'c']
table_b = Table(self.rows, column_names, self.column_types)
table_c = Table.merge([table_a, table_b])
self.assertIsNot(table_c, table_a)
self.assertIsNot(table_c, table_b)
self.assertColumnNames(table_c, self.column_names + column_names)
self.assertColumnTypes(table_c, [Number, Number, Text, Number, Number, Text])
self.assertSequenceEqual(table_c.rows[0], [1, 4, 'a', None, None, None])
self.assertSequenceEqual(table_c.rows[3], [None, None, None, 1, 4, 'a'])
for row in table_c.rows:
self.assertSequenceEqual(row.keys(), self.column_names + column_names)
def test_merge_mixed_names(self):
table_a = Table(self.rows, self.column_names, self.column_types)
column_names = ['two', 'one', 'four']
table_b = Table(self.rows, column_names, self.column_types)
table_c = Table.merge([table_a, table_b])
self.assertIsNot(table_c, table_a)
self.assertIsNot(table_c, table_b)
self.assertColumnNames(table_c, ['one', 'two', 'three', 'four'])
self.assertColumnTypes(table_c, [Number, Number, Text, Text])
self.assertSequenceEqual(table_c.rows[0], [1, 4, 'a', None])
self.assertSequenceEqual(table_c.rows[3], [4, 1, None, 'a'])
for row in table_c.rows:
self.assertSequenceEqual(row.keys(), ['one', 'two', 'three', 'four'])
def test_merge_different_types(self):
table_a = Table(self.rows, self.column_names, self.column_types)
column_types = [self.number_type, self.text_type, self.text_type]
table_b = Table(self.rows, self.column_names, column_types)
with self.assertRaises(DataTypeError):
table_c = Table.merge([table_a, table_b]) # noqa
def test_merge_with_row_names(self):
table_a = Table(self.rows, self.column_names, self.column_types, row_names='three')
b_rows = (
(1, 4, 'd'),
(2, 3, 'e'),
(None, 2, 'f')
)
table_b = Table(b_rows, self.column_names, self.column_types, row_names='three')
table_c = Table.merge([table_a, table_b], row_names='three')
self.assertRowNames(table_c, ['a', 'b', 'c', 'd', 'e', 'f'])
def test_merge_with_column_names(self):
table_a = Table(self.rows, self.column_names, self.column_types, row_names='three')
b_rows = (
(1, 4, 'd'),
(2, 3, 'e'),
(None, 2, 'f')
)
c_rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c'),
(None, 4, 'd'),
(None, 3, 'e'),
(None, 2, 'f')
)
table_b = Table(b_rows, ['a', 'two', 'three'], self.column_types, row_names='three')
table_c = Table.merge([table_a, table_b], column_names=table_a.column_names)
self.assertRows(table_c, c_rows)
class TestPivot(AgateTestCase):
def setUp(self):
self.rows = (
('joe', 'white', 'male', 20, 'blue'),
('jane', 'white', 'female', 20, 'blue'),
('josh', 'black', 'male', 20, 'blue'),
('jim', 'latino', 'male', 25, 'blue'),
('julia', 'white', 'female', 25, 'green'),
('joan', 'asian', 'female', 25, 'green')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['name', 'race', 'gender', 'age', 'color']
self.column_types = [self.text_type, self.text_type, self.text_type, self.number_type, self.text_type]
def test_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender')
pivot_rows = (
('white', 1, 2),
('black', 1, 0),
('latino', 1, 0),
('asian', 0, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertRowNames(pivot_table, ['white', 'black', 'latino', 'asian'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'])
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['group', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'], key_name='gender')
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['gender', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name_sequence_invalid(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(ValueError):
pivot_table = table.pivot(['race', 'gender'], key_name='foo') # noqa
def test_pivot_no_key(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(pivot='gender')
pivot_rows = (
(3, 3),
)
self.assertColumnNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race')
pivot_rows = (
('white', 3),
('black', 1),
('latino', 1),
('asian', 1)
)
self.assertColumnNames(pivot_table, ['race', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_sum(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', Sum('age'))
pivot_rows = (
('white', 20, 45),
('black', 20, 0),
('latino', 25, 0),
('asian', 0, 25)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_multiple_keys(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'], 'age')
pivot_rows = (
('white', 'male', 1, 0),
('white', 'female', 1, 1),
('black', 'male', 1, 0),
('latino', 'male', 0, 1),
('asian', 'female', 0, 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', '20', '25'])
self.assertRowNames(pivot_table, [
('white', 'male'),
('white', 'female'),
('black', 'male'),
('latino', 'male'),
('asian', 'female'),
])
self.assertColumnTypes(pivot_table, [Text, Text, Number, Number])
def test_pivot_multiple_keys_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'])
pivot_rows = (
('white', 'male', 1),
('white', 'female', 2),
('black', 'male', 1),
('latino', 'male', 1),
('asian', 'female', 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Text, Number])
def test_pivot_default_value(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', default_value=None)
pivot_rows = (
('white', 1, 2),
('black', 1, None),
('latino', 1, None),
('asian', None, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50)),
('female', Decimal(50)),
)
self.assertColumnNames(pivot_table, ['gender', 'Percent'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_pivots(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50), 0),
('female', Decimal(1) / Decimal(6) * Decimal(100), Decimal(1) / Decimal(3) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_kwargs(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count', total=8))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(3) / Decimal(8) * Decimal(100), 0),
('female', Decimal(1) / Decimal(8) * Decimal(100), Decimal(2) / Decimal(8) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
class TestNormalize(AgateTestCase):
def setUp(self):
self.rows = (
(1, 'c', 4, 'a'),
(2, 'e', 3, 'b'),
(None, 'g', 2, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three', 'four']
self.column_types = [self.number_type, self.text_type, self.number_type, self.text_type]
def test_normalize(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.normalize('one', 'three')
normal_rows = (
(1, 'three', 4),
(2, 'three', 3),
(None, 'three', 2)
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['one', 'property', 'value'])
self.assertColumnTypes(normalized_table, [Number, Text, Number])
def test_normalize_column_types(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.normalize('one', 'three', column_types=[Text(), Text()])
normal_rows = (
(1, 'three', '4'),
(2, 'three', '3'),
(None, 'three', '2')
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['one', 'property', 'value'])
self.assertColumnTypes(normalized_table, [Number, Text, Text])
def test_normalize_column_type_tester(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.normalize('one', 'three', column_types=TypeTester(force={'value': Text()}))
normal_rows = (
(1, 'three', '4'),
(2, 'three', '3'),
(None, 'three', '2')
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['one', 'property', 'value'])
self.assertColumnTypes(normalized_table, [Number, Text, Text])
def test_normalize_multiple_fields(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.normalize('one', ['three', 'four'])
normal_rows = (
(1, 'three', '4'),
(1, 'four', 'a'),
(2, 'three', '3'),
(2, 'four', 'b'),
(None, 'three', '2'),
(None, 'four', 'c')
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['one', 'property', 'value'])
self.assertColumnTypes(normalized_table, [Number, Text, Text])
def test_normalize_multiple_keys(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.normalize(['one', 'two'], ['three', 'four'])
normal_rows = (
(1, 'c', 'three', '4'),
(1, 'c', 'four', 'a'),
(2, 'e', 'three', '3'),
(2, 'e', 'four', 'b'),
(None, 'g', 'three', '2'),
(None, 'g', 'four', 'c')
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['one', 'two', 'property', 'value'])
self.assertColumnTypes(normalized_table, [Number, Text, Text, Text])
def test_normalize_change_order(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.normalize('three', ['one', 'four'])
normal_rows = (
(4, 'one', '1'),
(4, 'four', 'a'),
(3, 'one', '2'),
(3, 'four', 'b'),
(2, 'one', None),
(2, 'four', 'c')
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['three', 'property', 'value'])
self.assertColumnTypes(normalized_table, [Number, Text, Text])
class TestDenormalize(AgateTestCase):
def setUp(self):
self.rows = (
('Jane', 'Code', 'gender', 'female'),
('Jane', 'Code', 'age', '27'),
('Jim', 'Program', 'gender', 'male'),
('Jim', 'Bytes', 'age', '24')
)
self.text_type = Text()
self.column_names = ['first_name', 'last_name', 'property', 'value']
self.column_types = [self.text_type, self.text_type, self.text_type, self.text_type]
def test_denormalize(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.denormalize('first_name', 'property', 'value')
normal_rows = (
('Jane', 'female', 27),
('Jim', 'male', 24),
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['first_name', 'gender', 'age'])
self.assertColumnTypes(normalized_table, [Text, Text, Number])
self.assertRowNames(normalized_table, ['Jane', 'Jim'])
def test_denormalize_no_key(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.denormalize(None, 'property', 'value')
# NB: value has been overwritten
normal_rows = (
('male', 24),
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['gender', 'age'])
self.assertColumnTypes(normalized_table, [Text, Number])
def test_denormalize_multiple_keys(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.denormalize(['first_name', 'last_name'], 'property', 'value')
normal_rows = (
('Jane', 'Code', 'female', 27),
('Jim', 'Program', 'male', None),
('Jim', 'Bytes', None, 24),
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['first_name', 'last_name', 'gender', 'age'])
self.assertColumnTypes(normalized_table, [Text, Text, Text, Number])
self.assertRowNames(normalized_table, [('Jane', 'Code'), ('Jim', 'Program'), ('Jim', 'Bytes')])
def test_denormalize_default_value(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.denormalize(['first_name', 'last_name'], 'property', 'value', default_value='hello')
normal_rows = (
('Jane', 'Code', 'female', '27'),
('Jim', 'Program', 'male', 'hello'),
('Jim', 'Bytes', 'hello', '24'),
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['first_name', 'last_name', 'gender', 'age'])
self.assertColumnTypes(normalized_table, [Text, Text, Text, Text])
def test_denormalize_column_types(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.denormalize(None, 'property', 'value', column_types=[Text(), Number()])
# NB: value has been overwritten
normal_rows = (
('male', 24),
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['gender', 'age'])
self.assertColumnTypes(normalized_table, [Text, Number])
def test_denormalize_column_type_tester(self):
table = Table(self.rows, self.column_names, self.column_types)
normalized_table = table.denormalize(None, 'property', 'value', column_types=TypeTester(force={'gender': Text()}))
# NB: value has been overwritten
normal_rows = (
('male', 24),
)
self.assertRows(normalized_table, normal_rows)
self.assertColumnNames(normalized_table, ['gender', 'age'])
self.assertColumnTypes(normalized_table, [Text, Number])
class TestData(AgateTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_data_immutable(self):
rows = [
[1, 4, 'a'],
[2, 3, 'b'],
[None, 2, 'c']
]
table = Table(rows, self.column_names, self.column_types)
rows[0] = [2, 2, 2]
self.assertSequenceEqual(table.rows[0], [1, 4, 'a'])
def test_fork_preserves_data(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table._fork(table.rows)
self.assertIs(table.rows[0], table2.rows[0])
self.assertIs(table.rows[1], table2.rows[1])
self.assertIs(table.rows[2], table2.rows[2])
def test_where_preserves_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.where(lambda r: r['one'] == 1)
table3 = table2.where(lambda r: r['one'] == 1)
self.assertIs(table.rows[0], table2.rows[0])
self.assertIs(table2.rows[0], table3.rows[0])
def test_order_by_preserves_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.order_by(lambda r: r['one'])
table3 = table2.order_by(lambda r: r['one'])
self.assertIs(table.rows[0], table2.rows[0])
self.assertIs(table2.rows[0], table3.rows[0])
def test_limit_preserves_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.limit(2)
table3 = table2.limit(2)
self.assertIs(table.rows[0], table2.rows[0])
self.assertIs(table2.rows[0], table3.rows[0])
def test_compute_creates_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.compute([
('new2', Formula(self.number_type, lambda r: r['one']))
])
table3 = table2.compute([
('new3', Formula(self.number_type, lambda r: r['one']))
])
self.assertIsNot(table.rows[0], table2.rows[0])
self.assertNotEqual(table.rows[0], table2.rows[0])
self.assertIsNot(table2.rows[0], table3.rows[0])
self.assertNotEqual(table2.rows[0], table3.rows[0])
self.assertSequenceEqual(table.rows[0], (1, 4, 'a'))
def test_rename_row_names(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(row_names=['a', 'b', 'c'])
self.assertSequenceEqual(table2.row_names, ['a', 'b', 'c'])
self.assertSequenceEqual(table2.column_names, self.column_names)
self.assertIs(table.row_names, None)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_row_names_dict(self):
table = Table(self.rows, self.column_names, self.column_types, row_names=['a', 'b', 'c'])
table2 = table.rename(row_names={'b': 'd'})
self.assertSequenceEqual(table2.row_names, ['a', 'd', 'c'])
self.assertSequenceEqual(table2.column_names, self.column_names)
self.assertSequenceEqual(table.row_names, ['a', 'b', 'c'])
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_column_names(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(column_names=['d', 'e', 'f'])
self.assertIs(table2.row_names, None)
self.assertSequenceEqual(table2.column_names, ['d', 'e', 'f'])
self.assertIs(table.row_names, None)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_column_names_dict(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(column_names={'two': 'second'})
self.assertIs(table2.row_names, None)
self.assertSequenceEqual(table2.column_names, ['one', 'second', 'three'])
self.assertIs(table.row_names, None)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_column_names_renames_row_values(self):
table = Table(self.rows, self.column_names, self.column_types)
new_column_names = ['d', 'e', 'f']
table2 = table.rename(column_names=new_column_names)
self.assertColumnNames(table2, new_column_names)
class TableHTMLParser(html_parser.HTMLParser):
"""
Parser for use in testing HTML rendering of tables.
"""
def __init__(self, *args, **kwargs):
html_parser.HTMLParser.__init__(self, *args, **kwargs)
self.has_table = False
self.has_thead = False
self.has_tbody = False
self.header_rows = []
self.body_rows = []
self._in_table = False
self._in_thead = False
self._in_tbody = False
self._in_cell = False
def handle_starttag(self, tag, attrs):
if tag == 'table':
self._in_table = True
return
if tag == 'thead':
self._in_thead = True
return
if tag == 'tbody':
self._in_tbody = True
return
if tag == 'tr':
self._current_row = []
return
if tag in ('td', 'th'):
self._in_cell = True
return
def handle_endtag(self, tag):
if tag == 'table':
if self._in_table:
self.has_table = True
self._in_table = False
return
if tag == 'thead':
if self._in_thead:
self.has_thead = True
self._in_thead = False
return
if tag == 'tbody':
if self._in_tbody:
self.has_tbody = True
self._in_tbody = False
return
if tag == 'tr':
if self._in_tbody:
self.body_rows.append(self._current_row)
elif self._in_thead:
self.header_rows.append(self._current_row)
return
if tag in ('td', 'th'):
self._in_cell = False
return
def handle_data(self, data):
if self._in_cell:
self._current_row.append(data)
return
class TestPrintHTML(AgateTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, u'👍')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_print_html(self):
table = Table(self.rows, self.column_names, self.column_types)
table_html = six.StringIO()
table.print_html(output=table_html)
table_html = table_html.getvalue()
parser = TableHTMLParser()
parser.feed(table_html)
self.assertIs(parser.has_table, True)
self.assertIs(parser.has_tbody, True)
self.assertIs(parser.has_thead, True)
self.assertEqual(len(parser.header_rows), 1)
self.assertEqual(len(parser.body_rows), len(table.rows))
header_cols = parser.header_rows[0]
self.assertEqual(len(header_cols), len(table.column_names))
for i, column_name in enumerate(table.column_names):
self.assertEqual(header_cols[i], column_name)
for row_num, row in enumerate(table.rows):
html_row = parser.body_rows[row_num]
self.assertEqual(len(html_row), len(row))
for i, col in enumerate(row):
self.assertEqual(six.text_type(col), html_row[i])
| mit | 3,156,826,707,891,739,600 | 32.14176 | 122 | 0.564821 | false |
gkc1000/pyscf | pyscf/df/grad/rks.py | 1 | 5142 | #!/usr/bin/env python
#
# This code was copied from the data generation program of Tencent Alchemy
# project (https://github.com/tencent-alchemy).
#
#
# #
# # Copyright 2019 Tencent America LLC. All Rights Reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# #
# # Author: Qiming Sun <[email protected]>
# #
import time
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rks as rks_grad
from pyscf.df.grad import rhf as df_rhf_grad
def get_veff(ks_grad, mol=None, dm=None):
'''Coulomb + XC functional
'''
if mol is None: mol = ks_grad.mol
if dm is None: dm = ks_grad.base.make_rdm1()
t0 = (time.clock(), time.time())
mf = ks_grad.base
ni = mf._numint
if ks_grad.grids is not None:
grids = ks_grad.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
if mf.nlc != '':
raise NotImplementedError
#enabling range-separated hybrids
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ks_grad.max_memory*.9-mem_now)
if ks_grad.grid_response:
exc, vxc = rks_grad.get_vxc_full_response(
ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
exc, vxc = rks_grad.get_vxc(
ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
t0 = logger.timer(ks_grad, 'vxc', *t0)
if abs(hyb) < 1e-10 and abs(alpha) < 1e-10:
vj = ks_grad.get_j(mol, dm)
vxc += vj
if ks_grad.auxbasis_response:
e1_aux = vj.aux
else:
vj, vk = ks_grad.get_jk(mol, dm)
if ks_grad.auxbasis_response:
vk_aux = vk.aux * hyb
vk *= hyb
if abs(omega) > 1e-10: # For range separated Coulomb operator
raise NotImplementedError
vk_lr = ks_grad.get_k(mol, dm, omega=omega)
vk += vk_lr * (alpha - hyb)
if ks_grad.auxbasis_response:
vk_aux += vk_lr.aux * (alpha - hyb)
vxc += vj - vk * .5
if ks_grad.auxbasis_response:
e1_aux = vj.aux - vk_aux * .5
if ks_grad.auxbasis_response:
logger.debug1(ks_grad, 'sum(auxbasis response) %s', e1_aux.sum(axis=0))
vxc = lib.tag_array(vxc, exc1_grid=exc, aux=e1_aux)
else:
vxc = lib.tag_array(vxc, exc1_grid=exc)
return vxc
class Gradients(rks_grad.Gradients):
def __init__(self, mf):
# Whether to include the response of DF auxiliary basis when computing
# nuclear gradients of J/K matrices
self.auxbasis_response = True
rks_grad.Gradients.__init__(self, mf)
get_jk = df_rhf_grad.get_jk
def get_j(self, mol=None, dm=None, hermi=0):
return self.get_jk(mol, dm, with_k=False)[0]
def get_k(self, mol=None, dm=None, hermi=0):
return self.get_jk(mol, dm, with_j=False)[1]
get_veff = get_veff
def extra_force(self, atom_id, envs):
if self.auxbasis_response:
e1 = rks_grad.Gradients.extra_force(self, atom_id, envs)
return e1 + envs['vhf'].aux[atom_id]
else:
return 0
Grad = Gradients
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.build()
mf = dft.RKS(mol).density_fit(auxbasis='ccpvdz-jkfit')
mf.conv_tol = 1e-14
e0 = mf.scf()
g = Gradients(mf).set(auxbasis_response=False)
print(lib.finger(g.kernel()) - -0.04993147565973481)
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.04990283616418435)
# O 0.0000000000 -0.0000000000 0.0210278440
# H -0.0000000000 0.0282041778 -0.0105201841
# H -0.0000000000 -0.0282041778 -0.0105201841
g.grid_response = True
print(lib.finger(g.kernel()) - -0.04990623599165457)
# O 0.0000000000 -0.0000000000 0.0210353722
# H -0.0000000000 0.0282046127 -0.0105176861
# H -0.0000000000 -0.0282046127 -0.0105176861
mf.xc = 'b3lypg'
e0 = mf.kernel()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.03562514802969775)
# O 0.0000000000 -0.0000000000 0.0121660845
# H 0.0000000000 0.0211156739 -0.0060869839
# H -0.0000000000 -0.0211156739 -0.0060869839
| apache-2.0 | -6,712,460,053,075,018,000 | 32.174194 | 79 | 0.600739 | false |
webgeodatavore/pyqgis-samples | gui/qgis-sample-QgsCollapsibleGroupBox.py | 1 | 1325 | # coding: utf-8
from PyQt4.QtCore import QRect
from PyQt4.QtGui import (QDialog, QFrame, QLineEdit, QScrollArea,
QSizePolicy, QVBoxLayout, QWidget)
from qgis.gui import QgsCollapsibleGroupBox
new_dialog = QDialog()
new_dialog.resize(200, 100)
scroll_area = QScrollArea(new_dialog)
scroll_area.setFrameShape(QFrame.NoFrame)
scroll_area.setFrameShadow(QFrame.Plain)
scroll_area.setWidgetResizable(True)
scroll_area.setGeometry(QRect(10, 20, 170, 70))
scrollAreaWidgetContents = QWidget()
scrollAreaWidgetContents.setGeometry(QRect(0, 0, 170, 70))
vertical_layout = QVBoxLayout(scrollAreaWidgetContents)
collapsible_group_box = QgsCollapsibleGroupBox(scrollAreaWidgetContents)
collapsible_group_box.setTitle('Collapsible')
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
collapsible_group_box.sizePolicy().hasHeightForWidth()
)
collapsible_group_box.setSizePolicy(sizePolicy)
collapsible_group_box.setChecked(False)
vbox_layout = QVBoxLayout(collapsible_group_box)
line_edit = QLineEdit(collapsible_group_box)
line_edit.setGeometry(QRect(10, 20, 110, 30))
vertical_layout.addWidget(collapsible_group_box)
scroll_area.setWidget(scrollAreaWidgetContents)
new_dialog.show()
| gpl-2.0 | 671,552,018,990,206,300 | 33.868421 | 72 | 0.806792 | false |
YannThorimbert/ThorPy-1.4.3 | thorpy/elements/inserter.py | 1 | 11544 | """Inserter"""
# ! to forbid other element to react during insert, they should not be in
# the same menu as the inserter, and maybe use _hide_mouse = True
from pygame import event, K_ESCAPE, K_RETURN, K_BACKSPACE, KEYDOWN, K_LEFT, K_RIGHT
from pygame.mouse import set_visible as mouse_set_visible
from pygame.key import set_repeat as key_set_repeat
from thorpy.elements.clickable import Clickable
from thorpy.elements._inserterutils._insertwriter import _InsertWriter
from thorpy.elements._inserterutils._cursor import _Cursor
from thorpy.painting.mousecursor import change_cursor
from thorpy.miscgui.reaction import Reaction
from thorpy.miscgui.keyer import Keyer
from thorpy.miscgui import constants, functions, parameters, style, painterstyle
class Inserter(Clickable):
"""Element fo text insertion."""
def __init__(self,
name="",
elements=None,
normal_params=None,
press_params=None,
value="",
size=(None, None),
namestyle=None,
varlink_func=None,
quit_on_click=False,
value_type=str):
"""Element fo text insertion.
<name>: text of the title before the inserter.
<value>: initial text inside the inserter.
<size>: if not (None,None), a 2-tuple specifying the size of the text
insertion zone.
<quit_on_click>: if True, make the inserter lose focus when mouse click
outside its area.
"""
namestyle=style.STYLE_INSERTER_NAME if namestyle is None else namestyle
if size[0] is None:
s0 = style.SIZE[0]
else:
s0 = size[0]
if size[1] is None:
s1 = style.Y_SMALL_SIZE
else:
s1 = size[1]
size = (s0, s1)
self.cursor = None
super(Inserter, self).__init__("", elements, normal_params,
press_params)
self._name_element = self._get_name_element(name, namestyle)
self.add_elements([self._name_element])
self._iwriter = _InsertWriter(value)
self._iwriter.finish()
self.add_elements([self._iwriter])
self.quit_on_click = quit_on_click
self._value_type = value_type
painter = functions.obtain_valid_painter(painterstyle.INSERTER_PAINTER,
color=style.DEF_COLOR2,
pressed=True,
size=(s0,s1))
self.set_painter(painter)
self.normal_params.polite_set("states hover",
[constants.STATE_NORMAL,
constants.STATE_PRESSED])
self.press_params.polite_set("states hover",
[constants.STATE_NORMAL,
constants.STATE_PRESSED])
self._activated = False
self._value = value
self._inserted = self._value
self._cursor_index = len(self._inserted)
reac_keypress = Reaction(KEYDOWN, self._reaction_keydown,
reac_name=constants.REAC_KEYPRESS)
self.add_reaction(reac_keypress)
self._keyer = Keyer()
self._hide_mouse = self.normal_params.params.get("hide mouse", False)
self._varlink_func = varlink_func
self.repeat_delay = parameters.KEY_DELAY
self.repeat_interval = parameters.KEY_INTERVAL
def set_key_repeat(delay, interval):
"""Set delay to None for no repeat."""
self.repeat_delay = delay
self.repeat_interval = interval
def finish(self):
Clickable.finish(self)
# cursor is initialized in finish because _iwriter needs self.fusionner
# to initialize...
while not self._iwriter._is_small_enough(self._inserted) and self._inserted:
self._inserted = self._inserted[:-1]
self._iwriter.refresh_img()
self.cursor = _Cursor(self)
self.add_elements(list([self.cursor]))
self._refresh_pos()
self.cursor.finish()
self._name_element.user_func = self.enter
def _get_name_element(self, name, namestyle):
painter = functions.obtain_valid_painter(
painterstyle.INSERTER_NAME_PAINTER,
size=style.SIZE)
el = Clickable(name)
el.set_painter(painter)
if namestyle:
el.set_style(namestyle)
el.finish()
return el
def unblit(self, rect=None):
self._name_element.unblit(rect)
Clickable.unblit(self, rect)
def _hover(self):
Clickable._hover(self)
change_cursor(constants.CURSOR_TEXT)
def _unhover(self):
if not self._activated:
Clickable._unhover(self)
change_cursor(constants.CURSOR_NORMAL)
def transp_blit(self):
a = self.get_oldest_children_ancester()
r = self.get_storer_rect()
a.unblit(r)
a.partial_blit(None, r)
def _reaction_keydown(self, pygame_event):
if self._activated:
if pygame_event.type == KEYDOWN:
if pygame_event.key == K_ESCAPE:
self.exit()
elif pygame_event.key == K_RETURN: # way to exit saving insertion
self._value = self._inserted
self.exit()
functions.debug_msg("'" + self._inserted + "'", " inserted")
elif pygame_event.key == K_BACKSPACE:
if self._cursor_index > 0:
before = self._inserted[0:self._cursor_index-1]
after = self._inserted[self._cursor_index:]
self._inserted = before + after
self._cursor_index -= 1
self._urbu()
# if this is a modifier, the next char will be handled by the
# keyer...
elif pygame_event.key == K_LEFT:
if self._cursor_index > 1:
self._cursor_index -= 1
self._urbu()
elif pygame_event.key == K_RIGHT:
if self._cursor_index < len(self._inserted):
self._cursor_index += 1
self._urbu()
elif not pygame_event.key in self._keyer.modifiers:
char = self._keyer.get_char_from_key(pygame_event.key)
before = self._inserted[0:self._cursor_index]
after = self._inserted[self._cursor_index:]
new_word = before + char + after
if self._iwriter._is_small_enough(new_word):
self._inserted = new_word
self._cursor_index += 1
self._urbu()
def _urbu(self, graphical=True):
"""Unblit, Refresh cursor pos, Blit, Update.
Returns True if the text img has been refreshed.
"""
## a = self.get_oldest_children_ancester()
## r = self.get_storer_rect()
## a.unblit(r)
if graphical:
self.unblit()
txt_refreshed = self._refresh_cursor_pos() # refreshes iwriter's img!
## a.partial_blit(None, r)
if graphical:
self.blit()
self.update()
return txt_refreshed
def _reaction_press(self, pygame_event):
Clickable._reaction_press(self, pygame_event)
if self.current_state_key == constants.STATE_PRESSED:
self.enter()
elif self._activated:
if not self.quit_on_click:
self._value = self._inserted
self.exit()
def enter(self):
functions.debug_msg("Entering inserter ", self)
if self.repeat_delay is not None:
key_set_repeat(self.repeat_delay, self.repeat_interval)
if self._hide_mouse:
mouse_set_visible(False)
self._activated = True
self.cursor._activated = True
def exit(self):
key_set_repeat(parameters.KEY_DELAY, parameters.KEY_INTERVAL)
if self._activated:
functions.debug_msg("Leaving inserter ", self)
self._inserted = self._value
self._urbu()
mouse_set_visible(True)
self.cursor.exit()
self._activated = False
event_quit = event.Event(constants.THORPY_EVENT,
id=constants.EVENT_INSERT,
el=self,
value=self._value)
event.post(event_quit)
if self._varlink_func:
self._varlink_func(self._value)
def _refresh_cursor_pos(self):
"""Refresh position of the cursor. Used when inserted text changes.
Also refreshes iwriter's image! Is used through self._urbu().
Returns True if the text img has been refreshed.
"""
txt_refreshed = True
if self._iwriter.refresh_img() == -1: # text too large
txt_refreshed = False
self._inserted = self._inserted[:-1]
pos = self._iwriter._get_cursor_pos()
self.cursor.set_topleft(pos)
return txt_refreshed
def _refresh_pos(self):
"""Refresh position of the whole element."""
self._iwriter._refresh_pos()
l = self.get_fus_topleft()[0]
(x, y) = self.get_fus_center()
l -= self._name_element.get_fus_size()[0] + style.NAME_SPACING
self._name_element.set_center((None, y))
self._name_element.set_topleft((l, None))
def get_storer_rect(self):
return self.get_family_rect(constants.STATE_NORMAL)
def get_value(self):
try:
return self._value_type(self._inserted)
except ValueError:
functions.debug_msg("type of self._inserted is not " + \
str(self._value_type))
return self._value_type()
def set_value(self, value, refresh_draw=False):
if self._iwriter._is_small_enough(value):
self._inserted = value
self._cursor_index = len(value)
self._urbu(graphical=refresh_draw)
else:
raise Exception("Cannot insert value in inserter:", value)
def set_font_color(self, color, state=None, center_title=True):
"""set font color for a given state"""
Clickable.set_font_color(self, color, state, center_title)
self._name_element.set_font_color(color, state, center_title)
def set_font_size(self, size, state=None, center_title=True):
"""set font color for a given state"""
Clickable.set_font_size(self, size, state, center_title)
self._name_element.set_font_size(size, state, center_title)
def set_font(self, fontname, state=None, center_title=True):
"""set font for a given state"""
Element.set_font(self, fontname, state, center_title)
self.set_hovered_states(self._states_hover)
def set_font_effects(self, biu, state=None, center=True, preserve=False):
"""biu = tuple : (bold, italic, underline)"""
CLickable.set_font_effects(self, bio, state, center, preserve)
self._name_element.set_font_effects(biu, state, center, preserve)
def get_help_rect(self):
return self._name_element.get_help_rect()
def get_text(self):
return self._name_element.get_text()
| mit | -3,377,540,179,149,502,000 | 39.791519 | 84 | 0.557432 | false |
nextgis/ngq_compulink | qgis-installer/customization-conf/plugins/ru_geocoder/rb_result_renderer.py | 1 | 2612 | """
/***************************************************************************
RuGeocoder
A QGIS plugin
Geocode your csv files to shp
-------------------
begin : 2012-02-20
copyright : (C) 2012 by Nikulin Evgeniy
email : nikulin.e at gmail
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import QColor
from qgis.gui import QgsRubberBand
from qgis.core import QGis, QgsRectangle, QgsCoordinateReferenceSystem, QgsCoordinateTransform
class RubberBandResultRenderer():
def __init__(self, iface):
self.iface = iface
self.rb = QgsRubberBand(self.iface.mapCanvas(), QGis.Point)
self.rb.setColor(QColor('magenta'))
self.rb.setIconSize(12)
self.srs_wgs84 = QgsCoordinateReferenceSystem(4326)
self.transformation = QgsCoordinateTransform(self.srs_wgs84, self.srs_wgs84)
def show_point(self, point, center=False):
#check srs
if self.need_transform():
point = self.transform_point(point)
self.rb.addPoint(point)
if center:
self.center_to_point(point)
def clear(self):
self.rb.reset(QGis.Point)
def need_transform(self):
return self.iface.mapCanvas().mapRenderer().destinationCrs().postgisSrid() != 4326
def transform_point(self, point):
dest_srs_id = self.iface.mapCanvas().mapRenderer().destinationCrs().srsid()
self.transformation.setDestCRSID(dest_srs_id)
try:
return self.transformation.transform(point)
except:
print 'Error on transform!' # DEBUG! need message???
return
def center_to_point(self, point):
canvas = self.iface.mapCanvas()
new_extent = QgsRectangle(canvas.extent())
new_extent.scale(1, point)
canvas.setExtent(new_extent)
canvas.refresh()
| gpl-2.0 | -404,471,387,088,012,400 | 37.985075 | 94 | 0.498469 | false |
bieschke/nuffle | lib/python/cherrypy/test/test_combinedfilters.py | 1 | 2482 | """
Copyright (c) 2004, CherryPy Team ([email protected])
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the CherryPy Team nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import gzip, StringIO
import cherrypy
europoundUnicode = u'\x80\xa3'
class Root:
def index(self):
yield u"Hello,"
yield u"world"
yield europoundUnicode
index.exposed = True
cherrypy.root = Root()
cherrypy.config.update({
'server.logToScreen': False,
'server.environment': 'production',
'gzipFilter.on': True,
'encodingFilter.on': True,
})
import helper
class CombinedFiltersTest(helper.CPWebCase):
def testCombinedFilters(self):
expectedResult = (u"Hello,world" + europoundUnicode).encode('utf-8')
zbuf = StringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage("/", headers=[("Accept-Encoding", "gzip")])
self.assertInBody(zbuf.getvalue()[:3])
if __name__ == '__main__':
helper.testmain()
| gpl-2.0 | -4,207,088,237,905,795,600 | 37.184615 | 81 | 0.72361 | false |
erykoff/longdouble_conv | longdouble_conv/longdouble_lib.py | 1 | 1908 | """
longdouble_conv
Copyright (C) 2013 Eli Rykoff, SLAC. erykoff at gmail dot com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import numpy as np
from . import _longdouble_pywrap
def string2longdouble(_string):
return _longdouble_pywrap.string2longdouble(_string)
def longdouble2string(_longdouble,n=19):
return _longdouble_pywrap.longdouble2string(_longdouble,n)
def doubledouble2longdouble(_doubledouble):
return _longdouble_pywrap.doubledouble2longdouble(_doubledouble)
def longdouble2doubledouble(_longdouble):
return _longdouble_pywrap.longdouble2doubledouble(_longdouble)
def string2doubledouble(_string):
_longdouble = string2longdouble(_string)
return longdouble2doubledouble(_longdouble)
def doubledouble2string(_doubledouble):
_longdouble = doubledouble2longdouble(_doubledouble)
return longdouble2string(_longdouble)
def strings2longdoubles(_strings,array=True):
# I had intended to have this in the c code but this is much simpler, if a
# bit slower in those cases where you need a list rather than a numpy
# array.
arr = _longdouble_pywrap.strings2longdoubles(_strings)
if (not array):
return arr.tolist()
else :
return arr
| gpl-2.0 | -7,293,097,500,366,783,000 | 33.690909 | 78 | 0.748428 | false |
itslukej/zirc | zirc/test.py | 1 | 2395 | from six import print_
from .event import Event
from .wrappers import connection_wrapper
from . import errors, util
class TestCase(object):
def start(self, log):
print_("Starting...\n", flush=True)
log = log.split("\n")
for line in log:
try:
event = Event(line)
print_("Parsed line '{0}'".format(line), flush=True)
except Exception:
raise errors.InvalidLine(line)
args = {"event": event, "bot": self, "irc": connection_wrapper(self), "args": " ".join(event.arguments).split(" ")[1:]}
args.update({k: getattr(event, k) for k in dir(event) if not k.startswith("__") and not k.endswith("__")})
if hasattr(self, "on_all"):
print_("Attempting to run on_all...", flush=True)
util.function_argument_call(self.on_all, args)()
text_type_func_name = "on_" + event.text_type.lower()
if hasattr(self, text_type_func_name):
print_("Attempting to run {0}".format(text_type_func_name), flush=True)
util.function_argument_call(getattr(self, text_type_func_name), args)()
raw_type_func_name = "on_" + event.type.lower()
if raw_type_func_name != text_type_func_name:
if hasattr(self, raw_type_func_name):
print_("Attempting to run {0}".format(raw_type_func_name), flush=True)
util.function_argument_call(getattr(self, raw_type_func_name), args)()
if event.type == "PING":
self.send("PONG :{0}".format(" ".join(event.arguments)))
print_("Done!", flush=True)
def send(self, data):
print_("RESPONSE: '{0}'\n".format(data), flush=True)
if hasattr(self, "on_send"):
self.on_send(data)
def reply(self, event, message, background=None, rainbow=False, style=None):
if event.target == 'zIRC-test':
self.privmsg(event.source.nick, message, background=background, rainbow=rainbow, style=style)
else:
self.privmsg(event.target, message, background=background, rainbow=rainbow, style=style)
def privmsg(self, channel, message, background=None, rainbow=False, style=None):
del background
del rainbow
del style
self.send("PRIVMSG {0} :{1}".format(channel, message))
| gpl-3.0 | 3,439,073,094,418,084,400 | 42.545455 | 131 | 0.577871 | false |
amaudy/django-report-builder | report_builder/views.py | 1 | 32211 | from django.contrib.contenttypes.models import ContentType
from django.core import exceptions
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import permission_required
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields.related import ReverseManyRelatedObjectsDescriptor
from django.forms.models import inlineformset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, get_object_or_404, render
from django.template import RequestContext
from report_builder.models import Report, DisplayField, FilterField, Format
from report_builder.utils import javascript_date_format, duplicate
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import ListView
from django import forms
import datetime
import time
import re
from decimal import Decimal
from numbers import Number
from types import BooleanType
import copy
from dateutil import parser
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = ['name', 'distinct', 'root_model']
class ReportEditForm(forms.ModelForm):
class Meta:
model = Report
fields = ['name', 'distinct', 'description',]
widgets = {
'description': forms.TextInput(attrs={'style': 'width:99%;'}),
}
class DisplayFieldForm(forms.ModelForm):
class Meta:
model = DisplayField
widgets = {
'path': forms.HiddenInput(),
'path_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field': forms.HiddenInput(),
'width': forms.TextInput(attrs={'class':'small_input'}),
'total': forms.CheckboxInput(attrs={'class':'small_input'}),
'sort': forms.TextInput(attrs={'class':'small_input'}),
}
class FilterFieldForm(forms.ModelForm):
class Meta:
model = FilterField
widgets = {
'path': forms.HiddenInput(),
'path_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field': forms.HiddenInput(),
'filter_type': forms.Select(attrs={'onchange':'check_filter_type(event.target)'})
}
def __init__(self, *args, **kwargs):
super(FilterFieldForm, self).__init__(*args, **kwargs)
# override the filter_value field with the models native ChoiceField
if self.instance.choices:
self.fields['filter_value'].widget = forms.Select(choices=self.instance.choices)
if 'DateField' in self.instance.field_verbose or 'DateTimeField' in self.instance.field_verbose:
widget = self.fields['filter_value'].widget
widget.attrs['class'] = 'datepicker'
widget.attrs['data-date-format'] = javascript_date_format(settings.DATE_FORMAT)
class ReportCreateView(CreateView):
form_class = ReportForm
template_name = 'report_new.html'
def get_relation_fields_from_model(model_class):
relation_fields = []
all_fields_names = model_class._meta.get_all_field_names()
for field_name in all_fields_names:
field = model_class._meta.get_field_by_name(field_name)
if field[3] or not field[2] or hasattr(field[0], 'related'):
field[0].field_name = field_name
relation_fields += [field[0]]
return relation_fields
def get_direct_fields_from_model(model_class):
direct_fields = []
all_fields_names = model_class._meta.get_all_field_names()
for field_name in all_fields_names:
field = model_class._meta.get_field_by_name(field_name)
# Direct, not m2m, not FK
if field[2] and not field[3] and field[0].__class__.__name__ != "ForeignKey":
direct_fields += [field[0]]
return direct_fields
def get_custom_fields_from_model(model_class):
""" django-custom-fields support
"""
if 'custom_field' in settings.INSTALLED_APPS:
from custom_field.models import CustomField
try:
content_type = ContentType.objects.get(model=model_class._meta.module_name,app_label=model_class._meta.app_label)
except ContentType.DoesNotExist:
content_type = None
custom_fields = CustomField.objects.filter(content_type=content_type)
return custom_fields
def get_properties_from_model(model_class):
properties = []
for attr_name, attr in dict(model_class.__dict__).iteritems():
if type(attr) == property:
properties.append(dict(label=attr_name, name=attr_name.strip('_').replace('_',' ')))
return sorted(properties)
def filter_property(filter_field, value):
filter_type = filter_field.filter_type
filter_value = filter_field.filter_value
filtered = True
#TODO: i10n
WEEKDAY_INTS = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6,
}
#TODO instead of catch all, deal with all cases
# Example is 'a' < 2 is a valid python comparison
# But what about 2 < '1' which yeilds true! Not intuitive for humans.
try:
if filter_type == 'exact' and str(value) == filter_value:
filtered = False
if filter_type == 'iexact' and str(value).lower() == str(filter_value).lower():
filtered = False
if filter_type == 'contains' and filter_value in value:
filtered = False
if filter_type == 'icontains' and str(filter_value).lower() in str(value).lower():
filtered = False
if filter_type == 'in' and value in filter_value:
filtered = False
# convert dates and datetimes to timestamps in order to compare digits and date/times the same
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = str(time.mktime(value.timetuple()))
try:
filter_value_dt = parser.parse(filter_value)
filter_value = str(time.mktime(filter_value_dt.timetuple()))
except ValueError:
pass
if filter_type == 'gt' and Decimal(value) > Decimal(filter_value):
filtered = False
if filter_type == 'gte' and Decimal(value) >= Decimal(filter_value):
filtered = False
if filter_type == 'lt' and Decimal(value) < Decimal(filter_value):
filtered = False
if filter_type == 'lte' and Decimal(value) <= Decimal(filter_value):
filtered = False
if filter_type == 'startswith' and str(value).startswith(str(filter_value)):
filtered = False
if filter_type == 'istartswith' and str(value).lower().startswith(str(filter_value)):
filtered = False
if filter_type == 'endswith' and str(value).endswith(str(filter_value)):
filtered = False
if filter_type == 'iendswith' and str(value).lower().endswith(str(filter_value)):
filtered = False
if filter_type == 'range' and value in [int(x) for x in filter_value]:
filtered = False
if filter_type == 'week_day' and WEEKDAY_INTS.get(str(filter_value).lower()) == value.weekday:
filtered = False
if filter_type == 'isnull' and value == None:
filtered = False
if filter_type == 'regex' and re.search(filter_value, value):
filtered = False
if filter_type == 'iregex' and re.search(filter_value, value, re.I):
filtered = False
except:
pass
if filter_field.exclude:
return not filtered
return filtered
@staff_member_required
def ajax_get_related(request):
""" Get related model and fields
Requires get variables model and field
Returns the model the field belongs to
"""
field_name = request.GET['field']
model = ContentType.objects.get(pk=request.GET['model']).model_class()
field = model._meta.get_field_by_name(field_name)
path = request.GET['path']
path_verbose = request.GET['path_verbose']
if field[2]:
# Direct field
new_model = field[0].related.parent_model()
else:
# Indirect related field
new_model = field[0].model()
new_fields = get_relation_fields_from_model(new_model)
model_ct = ContentType.objects.get_for_model(new_model)
if path_verbose:
path_verbose += "::"
path_verbose += field[0].name
path += field_name
path += '__'
return render_to_response('report_builder/report_form_related_li.html', {
'model_ct': model_ct,
'related_fields': new_fields,
'path': path,
'path_verbose': path_verbose,
}, RequestContext(request, {}),)
@staff_member_required
def ajax_get_fields(request):
""" Get fields and properties for a particular model
"""
field_name = request.GET.get('field')
model = ContentType.objects.get(pk=request.GET['model']).model_class()
path = request.GET['path']
path_verbose = request.GET.get('path_verbose')
properties = get_properties_from_model(model)
custom_fields = get_custom_fields_from_model(model)
root_model = model.__name__.lower()
if field_name == '':
return render_to_response('report_builder/report_form_fields_li.html', {
'fields': get_direct_fields_from_model(model),
'properties': properties,
'custom_fields': custom_fields,
'root_model': root_model,
}, RequestContext(request, {}),)
field = model._meta.get_field_by_name(field_name)
if path_verbose:
path_verbose += "::"
# TODO: need actual model name to generate choice list (not pluralized field name)
# - maybe store this as a separate value?
if field[3] and hasattr(field[0], 'm2m_reverse_field_name'):
path_verbose += field[0].m2m_reverse_field_name()
else:
path_verbose += field[0].name
path += field_name
path += '__'
if field[2]:
# Direct field
new_model = field[0].related.parent_model
path_verbose = new_model.__name__.lower()
else:
# Indirect related field
new_model = field[0].model
path_verbose = new_model.__name__.lower()
fields = get_direct_fields_from_model(new_model)
custom_fields = get_custom_fields_from_model(new_model)
properties = get_properties_from_model(new_model)
return render_to_response('report_builder/report_form_fields_li.html', {
'fields': fields,
'custom_fields': custom_fields,
'properties': properties,
'path': path,
'path_verbose': path_verbose,
'root_model': root_model,
}, RequestContext(request, {}),)
@staff_member_required
def ajax_get_choices(request):
path_verbose = request.GET.get('path_verbose')
label = request.GET.get('label')
root_model = request.GET.get('root_model')
choices = FilterField().get_choices(path_verbose or root_model, label)
select_widget = forms.Select(choices=[('','---------')] + list(choices))
options_html = select_widget.render_options([], [0])
return HttpResponse(options_html)
@staff_member_required
def ajax_get_formats(request):
choices = Format.objects.values_list('pk', 'name')
select_widget = forms.Select(choices=[('','---------')] + list(choices))
options_html = select_widget.render_options([], [0])
return HttpResponse(options_html)
def get_model_from_path_string(root_model, path):
""" Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
"""
for path_section in path.split('__'):
if path_section:
field = root_model._meta.get_field_by_name(path_section)
if field[2]:
root_model = field[0].related.parent_model()
else:
root_model = field[0].model
return root_model
def sort_helper(x, sort_key):
# TODO: explain what's going on here - I think this is meant to deal with
# null comparisons for datetimes?
if x[sort_key] == None:
result = datetime.date(datetime.MINYEAR, 1, 1)
else:
result = x[sort_key]
return result.lower() if isinstance(result, basestring) else result
def report_to_list(report, user, preview=False, queryset=None):
""" Create list from a report with all data filtering
preview: Return only first 50
objects: Provide objects for list, instead of running filters
Returns list, message in case of issues
"""
message= ""
model_class = report.root_model.model_class()
if queryset != None:
objects = report.add_aggregates(queryset)
else:
try:
objects = report.get_query()
except exceptions.ValidationError, e:
message += "Validation Error: {0!s}. This probably means something is wrong with the report's filters.".format(e)
return [], message
# Display Values
display_field_paths = []
property_list = {}
custom_list = {}
display_totals = {}
def append_display_total(display_totals, display_field, display_field_key):
if display_field.total:
display_totals[display_field_key] = {'val': Decimal('0.00')}
for i, display_field in enumerate(report.displayfield_set.all()):
model = get_model_from_path_string(model_class, display_field.path)
if user.has_perm(model._meta.app_label + '.change_' + model._meta.module_name) \
or user.has_perm(model._meta.app_label + '.view_' + model._meta.module_name) \
or not model:
# TODO: clean this up a bit
display_field_key = display_field.path + display_field.field
if '[property]' in display_field.field_verbose:
property_list[i] = display_field_key
append_display_total(display_totals, display_field, display_field_key)
elif '[custom' in display_field.field_verbose:
custom_list[i] = display_field_key
append_display_total(display_totals, display_field, display_field_key)
elif display_field.aggregate == "Avg":
display_field_key += '__avg'
display_field_paths += [display_field_key]
append_display_total(display_totals, display_field, display_field_key)
elif display_field.aggregate == "Max":
display_field_key += '__max'
display_field_paths += [display_field_key]
append_display_total(display_totals, display_field, display_field_key)
elif display_field.aggregate == "Min":
display_field_key += '__min'
display_field_paths += [display_field_key]
append_display_total(display_totals, display_field, display_field_key)
elif display_field.aggregate == "Count":
display_field_key += '__count'
display_field_paths += [display_field_key]
append_display_total(display_totals, display_field, display_field_key)
elif display_field.aggregate == "Sum":
display_field_key += '__sum'
display_field_paths += [display_field_key]
append_display_total(display_totals, display_field, display_field_key)
else:
display_field_paths += [display_field_key]
append_display_total(display_totals, display_field, display_field_key)
else:
message += "You don't have permission to " + display_field.name
try:
if user.has_perm(report.root_model.app_label + '.change_' + report.root_model.model) \
or user.has_perm(report.root_model.app_label + '.view_' + report.root_model.model):
def increment_total(display_field_key, display_totals, val):
if display_totals.has_key(display_field_key):
# Booleans are Numbers - blah
if isinstance(val, Number) and not isinstance(val, BooleanType):
# do decimal math for all numbers
display_totals[display_field_key]['val'] += Decimal(str(val))
else:
display_totals[display_field_key]['val'] += Decimal('1.00')
# get pk for primary and m2m relations in order to retrieve objects
# for adding properties to report rows
display_field_paths.insert(0, 'pk')
m2m_relations = []
for position, property_path in property_list.iteritems():
property_root = property_path.split('__')[0]
root_class = report.root_model.model_class()
property_root_class = getattr(root_class, property_root)
if type(property_root_class) == ReverseManyRelatedObjectsDescriptor:
display_field_paths.insert(1, '%s__pk' % property_root)
m2m_relations.append(property_root)
values_and_properties_list = []
filtered_report_rows = []
group = None
for df in report.displayfield_set.all():
if df.group:
group = df.path + df.field
break
if group:
filtered_report_rows = report.add_aggregates(objects.values_list(group))
else:
values_list = objects.values_list(*display_field_paths)
if not group:
for row in values_list:
row = list(row)
obj = report.root_model.model_class().objects.get(pk=row.pop(0))
#related_objects
remove_row = False
values_and_properties_list.append(row)
# filter properties (remove rows with excluded properties)
property_filters = report.filterfield_set.filter(
field_verbose__contains='[property]'
)
for property_filter in property_filters:
root_relation = property_filter.path.split('__')[0]
if root_relation in m2m_relations:
pk = row[0]
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, [property_filter.field], m2m_obj)
else:
val = None
else:
val = reduce(getattr, (property_filter.path + property_filter.field).split('__'), obj)
if filter_property(property_filter, val):
remove_row = True
values_and_properties_list.pop()
break
if not remove_row:
# increment totals for fields
for i, field in enumerate(display_field_paths[1:]):
if field in display_totals.keys():
increment_total(field, display_totals, row[i])
for position, display_property in property_list.iteritems():
relations = display_property.split('__')
root_relation = relations[0]
if root_relation in m2m_relations:
pk = row.pop(0)
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, relations[1:], m2m_obj)
else:
val = None
else:
val = reduce(getattr, relations, obj)
values_and_properties_list[-1].insert(position, val)
increment_total(display_property, display_totals, val)
for position, display_custom in custom_list.iteritems():
val = obj.get_custom_value(display_custom)
values_and_properties_list[-1].insert(position, val)
increment_total(display_custom, display_totals, val)
filtered_report_rows += [values_and_properties_list[-1]]
if preview and len(filtered_report_rows) == 50:
break
sort_fields = report.displayfield_set.filter(sort__gt=0).order_by('-sort').\
values_list('position', 'sort_reverse')
for sort_field in sort_fields:
filtered_report_rows = sorted(
filtered_report_rows,
key=lambda x: sort_helper(x, sort_field[0]-1),
reverse=sort_field[1]
)
values_and_properties_list = filtered_report_rows
else:
values_and_properties_list = []
message = "Permission Denied on %s" % report.root_model.name
# add choice list display and display field formatting
choice_lists = {}
display_formats = {}
final_list = []
for df in report.displayfield_set.all():
if df.choices:
df_choices = df.choices_dict
# Insert blank and None as valid choices
df_choices[''] = ''
df_choices[None] = ''
choice_lists.update({df.position: df_choices})
if df.display_format:
display_formats.update({df.position: df.display_format})
for row in values_and_properties_list:
# add display totals for grouped result sets
# TODO: dry this up, duplicated logic in non-grouped total routine
if group:
# increment totals for fields
for i, field in enumerate(display_field_paths[1:]):
if field in display_totals.keys():
increment_total(field, display_totals, row[i])
row = list(row)
for position, choice_list in choice_lists.iteritems():
row[position-1] = choice_list[row[position-1]]
for position, display_format in display_formats.iteritems():
# convert value to be formatted into Decimal in order to apply
# numeric formats
try:
value = Decimal(row[position-1])
except:
value = row[position-1]
# Try to format the value, let it go without formatting for ValueErrors
try:
row[position-1] = display_format.string.format(value)
except ValueError:
row[position-1] = value
final_list.append(row)
values_and_properties_list = final_list
if display_totals:
display_totals_row = []
fields_and_properties = list(display_field_paths[1:])
for position, value in property_list.iteritems():
fields_and_properties.insert(position, value)
for i, field in enumerate(fields_and_properties):
if field in display_totals.keys():
display_totals_row += [display_totals[field]['val']]
else:
display_totals_row += ['']
# add formatting to display totals
for df in report.displayfield_set.all():
if df.display_format:
try:
value = Decimal(display_totals_row[df.position-1])
except:
value = display_totals_row[df.position-1]
display_totals_row[df.position-1] = df.display_format.string.\
format(value)
if display_totals:
values_and_properties_list = (
values_and_properties_list + [
['TOTALS'] + (len(fields_and_properties) - 1) * ['']
] + [display_totals_row]
)
except exceptions.FieldError:
message += "Field Error. If you are using the report builder then you found a bug!"
message += "If you made this in admin, then you probably did something wrong."
values_and_properties_list = None
return values_and_properties_list, message
@staff_member_required
def ajax_preview(request):
""" This view is intended for a quick preview useful when debugging
reports. It limits to 50 objects.
"""
report = get_object_or_404(Report, pk=request.POST['report_id'])
objects_list, message = report_to_list(report, request.user, preview=True)
return render_to_response('report_builder/html_report.html', {
'report': report,
'objects_dict': objects_list,
'message': message
}, RequestContext(request, {}),)
class ReportUpdateView(UpdateView):
""" This view handles the edit report builder
It includes attached formsets for display and criteria fields
"""
model = Report
form_class = ReportEditForm
success_url = './'
@method_decorator(permission_required('report_builder.change_report'))
def dispatch(self, request, *args, **kwargs):
return super(ReportUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(ReportUpdateView, self).get_context_data(**kwargs)
model_class = self.object.root_model.model_class()
model_ct = ContentType.objects.get_for_model(model_class)
properties = get_properties_from_model(model_class)
custom_fields = get_custom_fields_from_model(model_class)
direct_fields = get_direct_fields_from_model(model_class)
relation_fields = get_relation_fields_from_model(model_class)
DisplayFieldFormset = inlineformset_factory(
Report,
DisplayField,
extra=0,
can_delete=True,
form=DisplayFieldForm)
FilterFieldFormset = inlineformset_factory(
Report,
FilterField,
extra=0,
can_delete=True,
form=FilterFieldForm)
if self.request.POST:
ctx['field_list_formset'] = DisplayFieldFormset(self.request.POST, instance=self.object)
ctx['field_filter_formset'] = FilterFieldFormset(self.request.POST, instance=self.object, prefix="fil")
else:
ctx['field_list_formset'] = DisplayFieldFormset(instance=self.object)
ctx['field_filter_formset'] = FilterFieldFormset(instance=self.object, prefix="fil")
ctx['related_fields'] = relation_fields
ctx['fields'] = direct_fields
ctx['custom_fields'] = custom_fields
ctx['properties'] = properties
ctx['model_ct'] = model_ct
ctx['root_model'] = model_ct.model
return ctx
def form_valid(self, form):
context = self.get_context_data()
field_list_formset = context['field_list_formset']
field_filter_formset = context['field_filter_formset']
if field_list_formset.is_valid() and field_filter_formset.is_valid():
self.object = form.save()
field_list_formset.report = self.object
field_list_formset.save()
field_filter_formset.report = self.object
field_filter_formset.save()
self.object.check_report_display_field_positions()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
@staff_member_required
def download_xlsx(request, pk, queryset=None):
""" Download the full report in xlsx format
Why xlsx? Because there is no decent ods library for python and xls has limitations
queryset: predefined queryset to bypass filters
"""
import cStringIO as StringIO
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.cell import get_column_letter
import re
report = get_object_or_404(Report, pk=pk)
wb = Workbook()
ws = wb.worksheets[0]
ws.title = report.name
filename = re.sub(r'\W+', '', report.name) + '.xlsx'
i = 0
for field in report.displayfield_set.all():
cell = ws.cell(row=0, column=i)
cell.value = field.name
cell.style.font.bold = True
ws.column_dimensions[get_column_letter(i+1)].width = field.width
i += 1
objects_list, message = report_to_list(report, request.user, queryset=queryset)
for row in objects_list:
try:
ws.append(row)
except ValueError as e:
ws.append([e.message])
except:
ws.append(['Unknown Error'])
myfile = StringIO.StringIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response['Content-Length'] = myfile.tell()
return response
@staff_member_required
def ajax_add_star(request, pk):
""" Star or unstar report for user
"""
report = get_object_or_404(Report, pk=pk)
user = request.user
if user in report.starred.all():
added = False
report.starred.remove(request.user)
else:
added = True
report.starred.add(request.user)
return HttpResponse(added)
@staff_member_required
def create_copy(request, pk):
""" Copy a report including related fields """
report = get_object_or_404(Report, pk=pk)
new_report = duplicate(report, changes=(
('name', '{} (copy)'.format(report.name)),
('user_created', request.user),
('user_modified', request.user),
))
# duplicate does not get related
for display in report.displayfield_set.all():
new_display = copy.copy(display)
new_display.pk = None
new_display.report = new_report
new_display.save()
for report_filter in report.filterfield_set.all():
new_filter = copy.copy(report_filter)
new_filter.pk = None
new_filter.report = new_report
new_filter.save()
return redirect(new_report)
@staff_member_required
def export_to_report(request):
""" Export objects (by ID and content type) to an existing or new report
In effect this runs the report with it's display fields. It ignores
filters and filters instead the provided ID's. It can be select
as a global admin action.
"""
admin_url = request.GET.get('admin_url', '/')
ct = ContentType.objects.get_for_id(request.GET['ct'])
ids = request.GET['ids'].split(',')
number_objects = len(ids)
reports = Report.objects.filter(root_model=ct).order_by('-modified')
if 'download' in request.GET:
report = get_object_or_404(Report, pk=request.GET['download'])
queryset = ct.model_class().objects.filter(pk__in=ids)
return download_xlsx(request, report.id, queryset=queryset)
return render(request, 'report_builder/export_to_report.html', {
'object_list': reports,
'admin_url': admin_url,
'number_objects': number_objects,
'model': ct.model_class()._meta.verbose_name,
})
| bsd-3-clause | 3,142,864,098,546,205,700 | 41.327201 | 125 | 0.58955 | false |
skarlekar/chehara | objectpath/core/interpreter.py | 1 | 20142 | #!/usr/bin/env python
# This file is part of ObjectPath released under AGPL v3 license.
# Copyright (C) 2010-2014 Adrian Kalbarczyk
import sys, re
from .parser import parse
from objectpath.core import *
import objectpath.utils.colorify as color # pylint: disable=W0614
from objectpath.utils import flatten, filter_dict, timeutils, skip
from objectpath.utils.json_ext import py2JSON
from objectpath.core import ITER_TYPES, generator, chain
from objectpath.utils.debugger import Debugger
EPSILON=0.0000000000000001 #this is used in float comparison
EXPR_CACHE={}
# setting external modules to 0, thus enabling lazy loading. 0 ensures that Pythonic types are never matched.
# this way is efficient because if statement is fast and once loaded these variables are pointing to libraries.
ObjectId=generateID=calendar=escape=escapeDict=unescape=unescapeDict=0
class Tree(Debugger):
def __init__(self,obj,cfg=None):
if not cfg:
cfg={}
self.D=cfg.get("debug",False)
self.setData(obj)
self.current=self.node=None
if self.D: super(Tree, self).__init__()
def setData(self,obj):
if type(obj) in ITER_TYPES+[dict]:
self.data=obj
def compile(self,expr):
if expr in EXPR_CACHE:
return EXPR_CACHE[expr]
ret=EXPR_CACHE[expr]=parse(expr,self.D)
return ret
def execute(self,expr):
D=self.D
if D: self.start("Tree.execute")
TYPES=[str,int,float,bool,generator,chain]
try:
TYPES+=[long]
except NameError:
pass
# TODO change to yield?
def exe(node):
"""
node[0] - operator name
node[1:] - params
"""
if D: self.start("executing node '%s'", node)
type_node=type(node)
if node is None or type_node in TYPES:
return node
elif type_node is list:
return (exe(n) for n in node)
elif type_node is dict:
ret={}
for i in node.items():
ret[exe(i[0])]=exe(i[1])
return ret
op=node[0]
if op=="or":
if D: self.debug("%s or %s", node[1],node[2])
return exe(node[1]) or exe(node[2])
elif op=="and":
if D: self.debug("%s and %s", node[1],node[2])
return exe(node[1]) and exe(node[2])
elif op=="+":
if len(node)>2:
fst=exe(node[1])
snd=exe(node[2])
if None in (fst,snd):
return fst or snd
typefst=type(fst)
typesnd=type(snd)
if typefst is dict:
try:
fst.update(snd)
except Exception:
if type(snd) is not dict:
raise ProgrammingError("Can't add value of type %s to %s" % (color.bold(PY_TYPES_MAP.get(type(snd).__name__, type(snd).__name__)), color.bold("object")))
return fst
if typefst is list and typesnd is list:
if D: self.debug("both sides are lists, returning '%s'",fst+snd)
return fst+snd
if typefst in ITER_TYPES or typesnd in ITER_TYPES:
if typefst not in ITER_TYPES:
fst=[fst]
elif typesnd not in ITER_TYPES:
snd=[snd]
if D: self.debug("at least one side is generator and other is iterable, returning chain")
return chain(fst,snd)
if typefst in NUM_TYPES:
try:
return fst+snd
except Exception:
return fst+float(snd)
if typefst in STR_TYPES or typesnd in STR_TYPES:
if D: self.info("doing string comparison '%s' is '%s'",fst,snd)
if sys.version_info.major < 3:
if typefst is unicode:
fst=fst.encode("utf-8")
if typesnd is unicode:
snd=snd.encode("utf-8")
return str(fst)+str(snd)
try:
timeType=timeutils.datetime.time
if typefst is timeType and typesnd is timeType:
return timeutils.addTimes(fst,snd)
except Exception:
pass
if D: self.debug("standard addition, returning '%s'",fst+snd)
return fst + snd
else:
return exe(node[1])
elif op=="-":
if len(node)>2:
fst=exe(node[1])
snd=exe(node[2])
try:
return fst-snd
except Exception:
typefst=type(fst)
typesnd=type(snd)
timeType=timeutils.datetime.time
if typefst is timeType and typesnd is timeType:
return timeutils.subTimes(fst,snd)
else:
return - exe(node[1])
elif op=="*":
return exe(node[1]) * exe(node[2])
elif op=="%":
return exe(node[1]) % exe(node[2])
elif op=="/":
return exe(node[1]) / float(exe(node[2]))
elif op==">":
if D: self.debug("%s > %s", node[1],node[2])
return exe(node[1]) > exe(node[2])
elif op=="<":
return exe(node[1]) < exe(node[2])
elif op==">=":
return exe(node[1]) >= exe(node[2])
elif op=="<=":
return exe(node[1]) <= exe(node[2])
# TODO this algorithm produces 3 for 1<2<3 and should be true
# elif op in "<=>=":
# fst=exe(node[1])
# snd=exe(node[2])
# if op==">":
# return fst > snd and snd or False
# elif op=="<":
# return fst < snd and snd or False
# elif op==">=":
# return fst >= snd and snd or False
# elif op=="<=":
# return fst <= snd and snd or False
elif op=="not":
fst=exe(node[1])
if D: self.debug("doing not '%s'",fst)
return not fst
elif op=="in":
fst=exe(node[1])
snd=exe(node[2])
if D: self.debug("doing '%s' in '%s'",node[1],node[2])
if type(fst) in ITER_TYPES and type(snd) in ITER_TYPES:
return any(x in max(fst,snd,key=len) for x in min(fst,snd,key=len))
return exe(node[1]) in exe(node[2])
elif op=="not in":
fst=exe(node[1])
snd=exe(node[2])
if D: self.debug("doing '%s' not in '%s'",node[1],node[2])
if type(fst) in ITER_TYPES and type(snd) in ITER_TYPES:
return not any(x in max(fst,snd,key=len) for x in min(fst,snd,key=len))
return exe(node[1]) not in exe(node[2])
elif op in ("is","is not"):
if D: self.debug("found operator '%s'",op)
# try:
fst=exe(node[1])
# except Exception as e:
# if D: self.debug("NOT ERROR! Can't execute node[1] '%s', error: '%s'. Falling back to orginal value.",node[1],str(e))
# fst=node[1]
# try:
snd=exe(node[2])
# except Exception as e:
# if D: self.debug("NOT ERROR! Can't execute node[2] '%s', error: '%s'. Falling back to orginal value.",node[2],str(e))
# snd=node[2]
if op == "is" and fst == snd:
return True
# this doesn't work for 3 is not '3'
# if op == "is not" and fst != snd:
# return True
typefst=type(fst)
typesnd=type(snd)
if D: self.debug("type fst: '%s', type snd: '%s'",typefst,typesnd)
if typefst in STR_TYPES:
if D: self.info("doing string comparison '\"%s\" is \"%s\"'",fst,snd)
ret=fst==str(snd)
elif typefst is float:
if D: self.info("doing float comparison '%s is %s'",fst,snd)
ret=abs(fst-float(snd))<EPSILON
elif typefst is int:
if D: self.info("doing integer comparison '%s is %s'",fst,snd)
ret=fst==int(snd)
elif typefst is list and typesnd is list:
if D: self.info("doing array comparison '%s' is '%s'",fst,snd)
ret=fst==snd
elif typefst is dict and typesnd is dict:
if D: self.info("doing object comparison '%s' is '%s'",fst,snd)
ret=fst==snd
# else:
# try:
# global ObjectId
# if not ObjectId:
# from bson.objectid import ObjectId
# if typefst is ObjectId or typesnd is ObjectId:
# if D: self.info("doing MongoDB objectID comparison '%s' is '%s'",fst,snd)
# ret=str(fst)==str(snd)
# else:
# if D: self.info("doing standard comparison '%s' is '%s'",fst,snd)
# ret=fst is snd
# except Exception:
# pass
if op=="is not":
if D: self.info("'is not' found. Returning %s",not ret)
return not ret
else:
if D: self.info("returning '%s' is '%s'='%s'",fst,snd,ret)
return ret
elif op=="re":
return re.compile(exe(node[1]))
elif op=="matches":
return not not re.match(exe(node[1]), exe(node[2]))
# elif op=="(literal)":
# fstLetter=node[1][0]
# if fstLetter is "'":
# return node[1][1:-1]
# elif fstLetter.isdigit:
# return int(node[1])
elif op=="(root)": # this is $
return self.data
# elif op=="(node)":# this is !
# if D: self.debug("returning node %s",self.node)
# return self.node
elif op=="(current)": # this is @
if D: self.debug("returning current node %s", self.current)
return self.current
elif op=="name":
return node[1]
elif op==".":
fst=node[1]
if type(fst) is tuple:
fst=exe(fst)
typefst=type(fst)
if D: self.debug(color.op(".")+" left is '%s'", fst)
# try:
if node[2][0] == "*":
if D: self.end(color.op(".")+" returning '%s'", typefst in ITER_TYPES and fst or [fst])
return fst # typefst in ITER_TYPES and fst or [fst]
# except:
# pass
snd=exe(node[2])
if D: self.debug(color.op(".")+" right is '%s'",snd)
if typefst in ITER_TYPES:
if D: self.debug(color.op(".")+" filtering %s by %s",color.bold(fst),color.bold(snd))
if type(snd) in ITER_TYPES:
return filter_dict(fst, list(snd))
else:
# if D: self.debug(list(fst))
return (e[snd] for e in fst if type(e) is dict and snd in e)
try:
if D: self.end(color.op(".")+" returning '%s'",fst.get(snd))
return fst.get(snd)
except Exception:
if isinstance(fst,object):
try:
return fst.__getattribute__(snd)
except Exception:
pass
if D: self.end(color.op(".")+" returning '%s'", color.bold(fst))
return fst
elif op=="..":
fst=flatten(exe(node[1]))
if node[2][0]=="*":
if D: self.debug(color.op("..")+" returning '%s'", color.bold(fst))
return fst
# reduce objects to selected attributes
snd=exe(node[2])
if D: self.debug(color.op("..")+" finding all %s in %s", color.bold(snd), color.bold(fst))
if type(snd) in ITER_TYPES:
ret=filter_dict(fst, list(snd))
if D: self.debug(color.op("..")+" returning %s",color.bold(ret))
return ret
else:
ret=chain(*(type(x) in ITER_TYPES and x or [x] for x in (e[snd] for e in fst if snd in e)))
# print list(chain(*(type(x) in ITER_TYPES and x or [x] for x in (e[snd] for e in fst if snd in e))))
if D: self.debug(color.op("..")+" returning %s",color.bold(ret))
return ret
elif op=="[":
len_node=len(node)
# TODO move it to tree generation phase
if len_node is 1: # empty list
if D: self.debug("returning an empty list")
return []
if len_node is 2: # list - preserved to catch possible event of leaving it as '[' operator
if D: self.debug("doing list mapping")
return [exe(x) for x in node[1]]
if len_node is 3: # selector used []
fst=exe(node[1])
# check against None
if not fst:
return fst
selector=node[2]
if D: self.debug("found '%s' selector. executing on %s", color.bold(selector),color.bold(fst))
selectorIsTuple=type(selector) is tuple
if selectorIsTuple and selector[0] is "[":
nodeList=[]
nodeList_append=nodeList.append
for i in fst:
if D: self.debug("setting self.current to %s",color.bold(i))
self.current=i
nodeList_append(exe((selector[0],exe(selector[1]),exe(selector[2]))))
if D: self.debug("returning %s objects: %s", color.bold(len(nodeList)),color.bold(nodeList))
return nodeList
if selectorIsTuple and selector[0] == "(current)":
if D: self.warning(color.bold("$.*[@]")+" is eqivalent to "+color.bold("$.*")+"!")
return fst
if selectorIsTuple and selector[0] in SELECTOR_OPS:
if D: self.debug("found %s operator in selector", color.bold(selector[0]))
if type(fst) is dict:
fst=[fst]
# TODO move it to tree building phase
if type(selector[1]) is tuple and selector[1][0]=="name":
selector=(selector[0],selector[1][1],selector[2])
selector0=selector[0]
selector1=selector[1]
selector2=selector[2]
def exeSelector(fst):
for i in fst:
if D: self.debug("setting self.current to %s", color.bold(i))
self.current=i
if selector0=="fn":
yield exe(selector)
elif type(selector1) in STR_TYPES:
try:
if exe((selector0,i[selector1],selector2)):
yield i
if D: self.debug("appended")
if D: self.debug("discarded")
except Exception as e:
if D: self.debug("discarded, Exception: %s",color.bold(e))
else:
try:
# TODO optimize an event when @ is not used. exe(selector1) can be cached
if exe((selector0,exe(selector1),exe(selector2))):
yield i
if D: self.debug("appended")
if D: self.debug("discarded")
except Exception:
if D: self.debug("discarded")
if D: self.debug("returning '%s' objects: '%s'", color.bold(len(nodeList)), color.bold(nodeList))
return exeSelector(fst)
self.current=fst
snd=exe(node[2])
typefst=type(fst)
if typefst in [tuple]+ITER_TYPES+STR_TYPES:
typesnd=type(snd)
# nodes[N]
if typesnd in NUM_TYPES or typesnd is str and snd.isdigit():
n=int(snd)
if D: self.info("getting %sth element from '%s'", color.bold(n), color.bold(fst))
if typefst in (generator,chain):
if n>0:
return skip(fst,n)
elif n==0:
return next(fst)
else:
fst=list(fst)
else:
try:
return fst[n]
except (IndexError, TypeError):
return None
# $.*['string']==$.string
if type(snd) in STR_TYPES:
return exe((".",fst,snd))
else:
# $.*[@.string] - bad syntax, but allowed
return snd
else:
try:
if D: self.debug("returning %s", color.bold(fst[snd]))
return fst[snd]
except KeyError:
# CHECK - is it ok to do that or should it be ProgrammingError?
if D: self.debug("returning an empty list")
return []
raise ProgrammingError("Wrong usage of "+color.bold("[")+" operator")
elif op=="fn":
# Built-in functions
fnName=node[1]
args=None
try:
args=[exe(x) for x in node[2:]]
except IndexError:
if D: self.debug("NOT ERROR: can't map '%s' with '%s'",node[2:],exe)
# arithmetic
if fnName=="sum":
args=args[0]
if type(args) in NUM_TYPES:
return args
return sum((x for x in args if type(x) in NUM_TYPES))
elif fnName=="max":
args=args[0]
if type(args) in NUM_TYPES:
return args
return max((x for x in args if type(x) in NUM_TYPES))
elif fnName=="min":
args=args[0]
if type(args) in NUM_TYPES:
return args
return min((x for x in args if type(x) in NUM_TYPES))
elif fnName=="avg":
args=args[0]
if type(args) in NUM_TYPES:
return args
if type(args) not in ITER_TYPES:
raise Exception("Argument for avg() is not an array")
else:
args=list(args)
try:
return sum(args)/float(len(args))
except TypeError:
args=[x for x in args if type(x) in NUM_TYPES]
self.warning("Some items in array were ommited")
return sum(args)/float(len(args))
elif fnName=="round":
return round(*args)
# casting
elif fnName=="int":
return int(args[0])
elif fnName=="float":
return float(args[0])
elif fnName=="str":
return str(py2JSON(args[0]))
elif fnName in ("list","array"):
try:
a=args[0]
except IndexError:
return []
targs=type(a)
if targs is timeutils.datetime.datetime:
return timeutils.date2list(a)+timeutils.time2list(a)
if targs is timeutils.datetime.date:
return timeutils.date2list(a)
if targs is timeutils.datetime.time:
return timeutils.time2list(a)
return list(a)
# string
elif fnName=="upper":
return args[0].upper()
elif fnName=="lower":
return args[0].lower()
elif fnName=="capitalize":
return args[0].capitalize()
elif fnName=="title":
return args[0].title()
elif fnName=="split":
return args[0].split(*args[1:])
elif fnName=="slice":
if args and type(args[1]) not in ITER_TYPES:
raise ExecutionError("Wrong usage of slice(STRING, ARRAY). Second argument is not an array but %s."%color.bold(type(args[1]).__name__))
try:
pos=list(args[1])
if type(pos[0]) in ITER_TYPES:
if D: self.debug("run slice() for a list of slicers")
return (args[0][x[0]:x[1]] for x in pos)
return args[0][pos[0]:pos[1]]
except IndexError:
if len(args)!=2:
raise ProgrammingError("Wrong usage of slice(STRING, ARRAY). Provided %s argument, should be exactly 2."%len(args))
elif fnName=="escape":
global escape,escapeDict
if not escape:
from objectpath.utils import escape, escapeDict
return escape(args[0],escapeDict)
elif fnName=="unescape":
global unescape,unescapeDict
if not unescape:
from objectpath.utils import unescape, unescapeDict
return unescape(args[0],unescapeDict)
elif fnName=="replace":
if sys.version_info.major < 3 and type(args[0]) is unicode:
args[0]=args[0].encode("utf8")
return str.replace(args[0],args[1],args[2])
# TODO this should be supported by /regex/
# elif fnName=="REsub":
# return re.sub(args[1],args[2],args[0])
elif fnName=="sort":
if len(args)>1:
key=args[1]
a={"key":lambda x: x.get(key, 0)}
else:
a={}
args=args[0]
if D: self.debug("doing sort on '%s'",args)
try:
return sorted(args,**a)
except TypeError:
return args
elif fnName=="reverse":
args=args[0]
try:
args.reverse()
return args
except TypeError:
return args
elif fnName=="map":
return map(lambda x: exe(("fn",args[0],x)), args[1])
elif fnName in ("count","len"):
args=args[0]
if args in (True,False,None):
return args
if type(args) in ITER_TYPES:
return len(list(args))
return len(args)
elif fnName=="join":
try:
joiner=args[1]
except Exception:
joiner=""
try:
return joiner.join(args[0])
except TypeError:
try:
return joiner.join(map(str,args[0]))
except Exception:
return args[0]
# time
elif fnName in ("now","age","time","date","dateTime"):
if fnName=="now":
return timeutils.now()
if fnName=="date":
return timeutils.date(args)
if fnName=="time":
return timeutils.time(args)
if fnName=="dateTime":
return timeutils.dateTime(args)
# TODO move lang to localize() entirely!
if fnName=="age":
a={}
if len(args)>1:
a["reference"]=args[1]
if len(args)>2:
a["lang"]=args[2]
return list(timeutils.age(args[0],**a))
elif fnName=="toMillis":
args=args[0]
if args.utcoffset() is not None:
args=args-args.utcoffset() # pylint: disable=E1103
global calendar
if not calendar:
import calendar
return int(calendar.timegm(args.timetuple()) * 1000 + args.microsecond / 1000)
elif fnName=="localize":
if type(args[0]) is timeutils.datetime.datetime:
return timeutils.UTC2local(*args)
# polygons
elif fnName=="area":
def segments(p):
p=list(map(lambda x: x[0:2],p))
return zip(p, p[1:] + [p[0]])
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(args[0])))
# misc
elif fnName=="keys":
try:
return list(args[0].keys())
except AttributeError:
raise ExecutionError("Argument is not "+color.bold("object")+" but %s in keys()"%color.bold(type(args[0]).__name__))
elif fnName=="type":
ret=type(args[0])
if ret in ITER_TYPES:
return "array"
if ret is dict:
return "object"
return ret.__name__
else:
raise ProgrammingError("Function "+color.bold(fnName)+" does not exist.")
else:
return node
D=self.D
if type(expr) in STR_TYPES:
tree=self.compile(expr)
elif type(expr) not in (tuple,list,dict):
return expr
ret=exe(tree)
if D: self.end("Tree.execute with: '%s'", ret)
return ret
def __str__(self):
return "TreeObject()"
def __repr__(self):
return self.__str__()
| mit | -2,233,151,904,477,407,200 | 31.698052 | 161 | 0.59577 | false |
sunlightlabs/sunlight-rfid-doorman | acl.py | 1 | 3455 | #!/home/pi/.virtualenvs/sunlight_rfid_doorman/bin/python
import pickle
import datetime
import time
import itertools
from functools import wraps
import errno
import os
import signal
import redis
import gspread
from settings import *
ACL_KEY = 'sunlight-doorman-acl'
LOG_KEY = 'sunlight-doorman-log'
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
@timeout(10)
def refresh_access_control_list(gc=None):
if gc is None:
gc = gspread.login(SPREADSHEET_USER, SPREADSHEET_PASSWORD)
sh = gc.open(SPREADSHEET_NAME)
worksheet = sh.worksheet(SPREADSHEET_WORKSHEET)
key_cells = worksheet.col_values(1)
email_cells = worksheet.col_values(2)
active_cells = worksheet.col_values(3)
acl = {}
for (i, (key, email, active)) in enumerate(itertools.izip(key_cells, email_cells, active_cells)):
if i==0:
continue
if active.upper().strip()=='Y':
acl[key.strip()] = email.strip()
r = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
r.set(ACL_KEY, pickle.dumps(acl))
def get_access_control_list():
r = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
if not r.exists(ACL_KEY):
return False
else:
return pickle.loads(r.get(ACL_KEY))
def log(status):
r = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
r.rpush(LOG_KEY, pickle.dumps(status))
def _log_worksheet_name(timestamp):
dt = datetime.datetime.fromtimestamp(float(timestamp))
return 'log - %d/%d' % (dt.month, dt.year)
@timeout(30)
def store_log(gc=None):
if gc is None:
gc = gspread.login(SPREADSHEET_USER, SPREADSHEET_PASSWORD)
# open spreadsheet
ss = gc.open(SPREADSHEET_NAME)
# load log entries out of redis
log_items = []
r = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
while True:
log_item = r.lpop(LOG_KEY)
if log_item is None:
break
log_items.append(pickle.loads(log_item))
# assemble data by month
log_worksheets = {}
for l in log_items:
timestamp = l[0]
worksheet_name = _log_worksheet_name(timestamp)
if not log_worksheets.has_key(worksheet_name):
log_worksheets[worksheet_name] = {'log_items': []}
log_worksheets[worksheet_name]['log_items'].append(l)
# store log entries
for lw in log_worksheets:
# create monthly worksheets as necessary
try:
ws = ss.worksheet(lw)
ws_offset = len(ws.col_values(1)) + 1
except:
ws = ss.add_worksheet(title=lw, rows="10000", cols="3")
ws_offset = 1
# store log items
cell_list = ws.range('A%(begin)d:C%(end)d' % {'begin': ws_offset, 'end': ws_offset + len(log_worksheets[lw]['log_items']) - 1})
for (i, log_item) in enumerate(log_worksheets[lw]['log_items']):
cell_list[(3*i) + 0].value = datetime.datetime.fromtimestamp(float(log_item[0])).isoformat()
cell_list[(3*i) + 1].value = log_item[1]
cell_list[(3*i) + 2].value = log_item[2]
ws.update_cells(cell_list)
if __name__ == '__main__':
gc = gspread.login(SPREADSHEET_USER, SPREADSHEET_PASSWORD)
refresh_access_control_list(gc)
store_log(gc)
| mit | 3,621,352,978,966,756,400 | 25.992188 | 129 | 0.675543 | false |
adamfisk/littleshoot-client | server/appengine/customRegistration/forms.py | 1 | 3346 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.core.files.uploadedfile import UploadedFile
from django.utils.translation import ugettext_lazy as _, ugettext as __
#from myapp.models import Person, File, Contract
from ragendja.auth.models import UserTraits
from ragendja.forms import FormWithSets, FormSetField
from registration.forms import RegistrationForm, RegistrationFormUniqueEmail
from registration.models import RegistrationProfile
import logging
class UserRegistrationForm(forms.ModelForm):
logging.info("Creating UserRegistrationForm")
username = forms.RegexField(regex=r'^\w+$', max_length=30,
label=_(u'Username'))
email = forms.EmailField(widget=forms.TextInput(attrs=dict(maxlength=75)),
label=_(u'Email address'))
password1 = forms.CharField(widget=forms.PasswordInput(render_value=False),
label=_(u'Password'))
password2 = forms.CharField(widget=forms.PasswordInput(render_value=False),
label=_(u'Password (again)'))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
user = User.get_by_key_name("key_"+self.cleaned_data['username'].lower())
if user and user.is_active:
raise forms.ValidationError(__(u'This username is already taken. Please choose another.'))
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(__(u'You must type the same password each time'))
return self.cleaned_data
def save(self, domain_override=""):
"""
Create the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
new_user = RegistrationProfile.objects.create_inactive_user(
username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
domain_override=domain_override)
self.instance = new_user
return super(UserRegistrationForm, self).save()
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
email = self.cleaned_data['email'].lower()
if User.all().filter('email =', email).filter(
'is_active =', True).count(1):
raise forms.ValidationError(__(u'This email address is already in use. Please supply a different email address.'))
return email
class Meta:
model = User
exclude = UserTraits.properties().keys()
| gpl-2.0 | 5,029,836,949,590,700,000 | 38.364706 | 126 | 0.646145 | false |
akarol/cfme_tests | cfme/scripting/ipyshell.py | 1 | 1660 | # -*- coding: utf-8 -*-
import click
import sys
from IPython.terminal.interactiveshell import TerminalInteractiveShell
IMPORTS = [
'from cfme.utils import conf',
'from fixtures.pytest_store import store',
'from cfme.utils.appliance.implementations.ui import navigate_to',
'from cfme.utils import providers',
]
@click.command(help="Open an IPython shell")
@click.option('--no-quickstart', is_flag=True)
def main(no_quickstart):
"""Use quickstart to ensure we have correct env, then execute imports in ipython and done."""
if not no_quickstart:
from . import quickstart
quickstart.main(quickstart.parser.parse_args(['--mk-virtualenv', sys.prefix]))
print('Welcome to IPython designed for running CFME QE code.')
ipython = TerminalInteractiveShell.instance()
for code_import in IMPORTS:
print('> {}'.format(code_import))
ipython.run_cell(code_import)
from cfme.utils.path import conf_path
custom_import_path = conf_path.join('miq_python_startup.py')
if custom_import_path.exists():
with open(custom_import_path.strpath, 'r') as custom_import_file:
custom_import_code = custom_import_file.read()
print('Importing custom code:\n{}'.format(custom_import_code.strip()))
ipython.run_cell(custom_import_code)
else:
print(
'You can create your own python file with imports you use frequently. '
'Just create a conf/miq_python_startup.py file in your repo. '
'This file can contain arbitrary python code that is executed in this context.')
ipython.interact()
if __name__ == '__main__':
main()
| gpl-2.0 | 3,850,137,921,904,999,000 | 37.604651 | 97 | 0.677711 | false |
emory-libraries/eulexistdb | eulexistdb/__init__.py | 1 | 1348 | # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 21, 1, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| apache-2.0 | 3,653,169,226,399,467,500 | 36.444444 | 76 | 0.701039 | false |
cnchanghai/ctest | prtmon.py | 1 | 3402 | #!/usr/bin/env python
#coding:utf-8
import requests
import time
import sys
import os
from bs4 import BeautifulSoup as bss
import threading
import smtplib
from email.mime.text import MIMEText
from email.header import Header
mylock = threading.Lock()
global prtnum
prtnum=dict()
## 定义2420打印机类
class th24(threading.Thread):
def __init__(self,ip):
threading.Thread.__init__(self)
self.ip = ip
def run(self):
global prtnum
try:
url = 'http://' + self.ip + '/hp/device/this.LCDispatcher?nav=hp.Usage'
r = requests.get(url, verify=False)
content = r.content
soup = bss(content, 'lxml')
taa = soup.findAll('table', 'hpTable')[-1]
tab = taa.findAll('span', 'hpPageText')[-1].text
mylock.acquire()
prtnum[self.ip]=tab
mylock.release()
except:
mylock.acquire()
prtnum[self.ip] = 0
mylock.release()
## 定义3015打印机类
class th35(threading.Thread):
def __init__(self,ip):
threading.Thread.__init__(self)
self.ip = ip
def run(self):
global prtnum
try:
url = 'https://' + self.ip + '/hp/device/this.LCDispatcher?nav=hp.Usage'
r = requests.get(url, verify=False)
content = r.content
soup = bss(content, 'html5lib')
taa = soup.findAll('table', id='tbl-1847')[-1]
tab = taa.findAll('div', 'hpPageText')[-1].text
mylock.acquire()
prtnum[self.ip] = tab
mylock.release()
except:
mylock.acquire()
prtnum[self.ip] = 0
mylock.release()
def sendmms():
global prtnum
dt=time.strftime('%Y-%m-%d',time.localtime(time.time()))
sender = '[email protected]'
receivers = ['[email protected]']
day = dt.split('-')[-1]
if int(day)==5:
receivers.append('[email protected]')
receivers.append('[email protected]')
f=open(mulu+'log/'+dt+'.txt','w')#打开日志文件
mail_msg = '<h3>'+dt+'打印量记录</h3><br/><table style="width:360px;" border="1" cellspacing="0" cellpadding="0">'
for(k ,v) in prtnum.items():
kvs='<tr style="heigh:60px;"><td style="width:180px;">'+str(k)+'</td><td style="width:180px;">'+str(v)+'</td></tr>'
mail_msg+=kvs
f.write(str(k)+' '+str(v)+'\n')#写入日志文件
f.close()
mail_msg+='</table>'
message = MIMEText(mail_msg, 'html', 'utf-8')
message['From'] = Header("unreply", 'utf-8')
subject = '打印机每日监控'
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP('10.65.1.134')
smtpObj.sendmail(sender, receivers, message.as_string())
print "邮件发送成功"
except smtplib.SMTPException:
print "Error: 无法发送邮件"
if __name__=="__main__":
thlist2=[]
thlist3=[]
mulu=os.path.split(sys.argv[0])[0]+'/'
ip2=open(mulu+'ip2.txt','r')
ip3=open(mulu+'ip3.txt','r')
for ip in ip2:
th=th24(ip.strip())
th.start()
thlist2.append(th)
for ip in ip3:
th=th35(ip.strip())
th.start()
thlist3.append(th)
ip2.close()
ip3.close()
for th in thlist2:
th.join()
for th in thlist3:
th.join()
sendmms()
| apache-2.0 | 2,923,097,582,591,195,600 | 29.054545 | 123 | 0.554749 | false |
tomsilver/nupic | nupic/encoders/logenc.py | 1 | 10933 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
from nupic.encoders.base import Encoder, EncoderResult
from nupic.encoders.scalar import ScalarEncoder
class LogEncoder(Encoder):
"""
This class wraps the ScalarEncoder class.
A Log encoder represents a floating point value on a logarithmic scale.
valueToEncode = log10(input)
w -- number of bits to set in output
minval -- minimum input value. must be greater than 0. Lower values are
reset to this value
maxval -- maximum input value (input is strictly less if periodic == True)
periodic -- If true, then the input value "wraps around" such that minval =
maxval For a periodic value, the input must be strictly less than
maxval, otherwise maxval is a true upper bound.
Exactly one of n, radius, resolution must be set. "0" is a special
value that means "not set".
n -- number of bits in the representation (must be > w)
radius -- inputs separated by more than this distance in log space will have
non-overlapping representations
resolution -- The minimum change in scaled value needed to produce a change
in encoding. This should be specified in log space. For
example, the scaled values 10 and 11 will be distinguishable
in the output. In terms of the original input values, this
means 10^1 (1) and 10^1.1 (1.25) will be distinguishable.
name -- an optional string which will become part of the description
verbosity -- level of debugging output you want the encoder to provide.
clipInput -- if true, non-periodic inputs smaller than minval or greater
than maxval will be clipped to minval/maxval
forced -- (default False), if True, skip some safety checks
"""
def __init__(self,
w=5,
minval=1e-07,
maxval=10000,
periodic=False,
n=0,
radius=0,
resolution=0,
name="log",
verbosity=0,
clipInput=True,
forced=False):
# Lower bound for log encoding near machine precision limit
lowLimit = 1e-07
# Limit minval as log10(0) is undefined.
if minval < lowLimit:
minval = lowLimit
# Check that minval is still lower than maxval
if not minval < maxval:
raise ValueError("Max val must be larger than min val or the lower limit "
"for this encoder %.7f" % lowLimit)
self.encoders = None
self.verbosity = verbosity
# Scale values for calculations within the class
self.minScaledValue = math.log10(minval)
self.maxScaledValue = math.log10(maxval)
if not self.maxScaledValue > self.minScaledValue:
raise ValueError("Max val must be larger, in log space, than min val.")
self.clipInput = clipInput
self.minval = minval
self.maxval = maxval
self.encoder = ScalarEncoder(w=w,
minval=self.minScaledValue,
maxval=self.maxScaledValue,
periodic=False,
n=n,
radius=radius,
resolution=resolution,
verbosity=self.verbosity,
clipInput=self.clipInput,
forced=forced)
self.width = self.encoder.getWidth()
self.description = [(name, 0)]
self.name = name
# This list is created by getBucketValues() the first time it is called,
# and re-created whenever our buckets would be re-arranged.
self._bucketValues = None
############################################################################
def getWidth(self):
return self.width
############################################################################
def getDescription(self):
return self.description
############################################################################
def getDecoderOutputFieldTypes(self):
"""
Encoder class virtual method override
"""
return (FieldMetaType.float, )
############################################################################
def _getScaledValue(self, inpt):
"""
Convert the input, which is in normal space, into log space
"""
if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:
return None
else:
val = inpt
if val < self.minval:
val = self.minval
elif val > self.maxval:
val = self.maxval
scaledVal = math.log10(val)
return scaledVal
############################################################################
def getBucketIndices(self, inpt):
"""
See the function description in base.py
"""
# Get the scaled value
scaledVal = self._getScaledValue(inpt)
if scaledVal is None:
return [None]
else:
return self.encoder.getBucketIndices(scaledVal)
############################################################################
def encodeIntoArray(self, inpt, output):
"""
See the function description in base.py
"""
# Get the scaled value
scaledVal = self._getScaledValue(inpt)
if scaledVal is None:
output[0:] = 0
else:
self.encoder.encodeIntoArray(scaledVal, output)
if self.verbosity >= 2:
print "input:", inpt, "scaledVal:", scaledVal, "output:", output
print "decoded:", self.decodedToStr(self.decode(output))
############################################################################
def decode(self, encoded, parentFieldName=''):
"""
See the function description in base.py
"""
# Get the scalar values from the underlying scalar encoder
(fieldsDict, fieldNames) = self.encoder.decode(encoded)
if len(fieldsDict) == 0:
return (fieldsDict, fieldNames)
# Expect only 1 field
assert(len(fieldsDict) == 1)
# Convert each range into normal space
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
for (minV, maxV) in inRanges:
outRanges.append((math.pow(10, minV),
math.pow(10, maxV)))
# Generate a text description of the ranges
desc = ""
numRanges = len(outRanges)
for i in xrange(numRanges):
if outRanges[i][0] != outRanges[i][1]:
desc += "%.2f-%.2f" % (outRanges[i][0], outRanges[i][1])
else:
desc += "%.2f" % (outRanges[i][0])
if i < numRanges-1:
desc += ", "
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName])
############################################################################
def getBucketValues(self):
"""
See the function description in base.py
"""
# Need to re-create?
if self._bucketValues is None:
scaledValues = self.encoder.getBucketValues()
self._bucketValues = []
for scaledValue in scaledValues:
value = math.pow(10, scaledValue)
self._bucketValues.append(value)
return self._bucketValues
############################################################################
def getBucketInfo(self, buckets):
"""
See the function description in base.py
"""
scaledResult = self.encoder.getBucketInfo(buckets)[0]
scaledValue = scaledResult.value
value = math.pow(10, scaledValue)
return [EncoderResult(value=value, scalar=value,
encoding = scaledResult.encoding)]
############################################################################
def topDownCompute(self, encoded):
"""
See the function description in base.py
"""
scaledResult = self.encoder.topDownCompute(encoded)[0]
scaledValue = scaledResult.value
value = math.pow(10, scaledValue)
return EncoderResult(value=value, scalar=value,
encoding = scaledResult.encoding)
############################################################################
def closenessScores(self, expValues, actValues, fractional=True):
"""
See the function description in base.py
"""
# Compute the percent error in log space
if expValues[0] > 0:
expValue = math.log10(expValues[0])
else:
expValue = self.minScaledValue
if actValues [0] > 0:
actValue = math.log10(actValues[0])
else:
actValue = self.minScaledValue
if fractional:
err = abs(expValue - actValue)
pctErr = err / (self.maxScaledValue - self.minScaledValue)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
err = abs(expValue - actValue)
closeness = err
#print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
# "closeness", closeness
#import pdb; pdb.set_trace()
return numpy.array([closeness])
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.verbosity = proto.verbosity
encoder.minScaledValue = proto.minScaledValue
encoder.maxScaledValue = proto.maxScaledValue
encoder.clipInput = proto.clipInput
encoder.minval = proto.minval
encoder.maxval = proto.maxval
encoder.encoder = ScalarEncoder.read(proto.encoder)
encoder.name = proto.name
encoder.width = encoder.encoder.getWidth()
encoder.description = [(encoder.name, 0)]
encoder._bucketValues = None
return encoder
def write(self, proto):
proto.verbosity = self.verbosity
proto.minScaledValue = self.minScaledValue
proto.maxScaledValue = self.maxScaledValue
proto.clipInput = self.clipInput
proto.minval = self.minval
proto.maxval = self.maxval
self.encoder.write(proto.encoder)
proto.name = self.name
| gpl-3.0 | -7,972,900,606,985,096,000 | 32.64 | 80 | 0.58017 | false |
jtrobec/pants | src/python/pants/core_tasks/targets_help.py | 1 | 1872 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from colors import blue, cyan, green
from pants.help.build_dictionary_info_extracter import BuildDictionaryInfoExtracter
from pants.task.console_task import ConsoleTask
class TargetsHelp(ConsoleTask):
"""List available target types."""
@classmethod
def register_options(cls, register):
super(TargetsHelp, cls).register_options(register)
register('--details', help='Show details about this target type.')
def console_output(self, targets):
buildfile_aliases = self.context.build_file_parser.registered_aliases()
extracter = BuildDictionaryInfoExtracter(buildfile_aliases)
alias = self.get_options().details
if alias:
target_types = buildfile_aliases.target_types_by_alias.get(alias)
if target_types:
tti = next(x for x in extracter.get_target_type_info() if x.build_file_alias == alias)
yield blue('\n{}\n'.format(tti.description))
yield blue('{}('.format(alias))
for arg in extracter.get_target_args(list(target_types)[0]):
default = green('(default: {})'.format(arg.default) if arg.has_default else '')
yield '{:<30} {}'.format(
cyan(' {} = ...,'.format(arg.name)),
' {}{}{}'.format(arg.description, ' ' if arg.description else '', default))
yield blue(')')
else:
yield 'No such target type: {}'.format(alias)
else:
for tti in extracter.get_target_type_info():
description = tti.description or '<Add description>'
yield '{} {}'.format(cyan('{:>30}:'.format(tti.build_file_alias)), description)
| apache-2.0 | -4,069,224,662,935,813,000 | 39.695652 | 94 | 0.663996 | false |
ua-snap/downscale | snap_scripts/data_requests/extract_profiles_keith_dot_precip_cru_ts40.py | 1 | 5743 | # extraction for Keith Cunningham -- Glitter Gulch DOT
def make_gdf():
df = pd.DataFrame({'name':['Long Lake','Glitter Gulch'],
'lat':[61.8,63.76],'lon':[-148.2,-148.9]})
df['geometry'] = df.apply(lambda x: Point(x.lon, x.lat), axis=1)
shp = gpd.GeoDataFrame( df, crs={'init':'epsg:4326'}, geometry='geometry')
return shp.to_crs( epsg=3338 )
def list_data( base_dir ):
files = glob.glob( os.path.join( cur_path, '*.tif' ) )
df = pd.DataFrame([ os.path.basename(fn).split('.')[0].split('_')[-2:] for fn in files ], columns=['month','year'])
df['fn'] = files
return df.sort_values(['year','month']).reset_index(drop=True)
def rasterize( shapes, coords, latitude='lat', longitude='lon', fill=None, **kwargs ):
'''
Rasterize a list of (geometry, fill_value) tuples onto the given
xarray coordinates. This only works for 1d latitude and longitude
arrays.
ARGUMENTS:
----------
shapes = [list] of tuples of (shapely.geom, fill_value)
coords = [dict] of named 1d latitude and longitude arrays.
latitude = [str] name of latitude key. default:'latitude'
longitude = [str] name of longitude key. default:'longitude'
fill = fill_value
RETURNS:
--------
xarray.DataArray
'''
from rasterio import features
import xarray as xr
if fill == None:
fill = np.nan
transform = transform_from_latlon( coords[ latitude ], coords[ longitude ] )
out_shape = ( len( coords[ latitude ] ), len( coords[ longitude ] ) )
raster = features.rasterize( shapes, out_shape=out_shape,
fill=fill, transform=transform,
dtype=float, **kwargs )
# spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]}
# return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude))
return raster
def make_mask( fn ):
with rasterio.open(fn) as rst:
meta = rst.meta.copy()
arr = np.empty_like(rst.read(1))
# build the shape data we need in EPSG:3338
shp = make_gdf()
pts = shp.geometry.tolist()
pts = [ (i,count+1) for count,i in enumerate(pts) ]
return rasterize( pts, fill=0, out=arr, transform=meta['transform'], all_touched=True, dtype='float32' )
def open_raster( fn, band=1 ):
with rasterio.open(fn) as rst:
arr = rst.read(band)
return arr
def extract_values( files, mask ):
pool = mp.Pool(64)
f = partial(open_raster, band=1)
arr = np.array(pool.map( f, files ))
pool.close()
pool.join()
pool = None; del pool
mask_vals = np.unique(mask[mask > 0])
out = dict()
for mask_val in mask_vals:
ind = zip(*np.where(mask == mask_val))
for i,j in ind:
out.update({ mask_val:arr[:,i,j] })
del arr
return out
if __name__ == '__main__':
import os, glob, rasterio, itertools
import pandas as pd
import numpy as np
import rasterio
from rasterio.features import rasterize
from shapely.geometry import Point
import geopandas as gpd
from functools import partial
import multiprocessing as mp
base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/downscaled'
output_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/project_data_delivery/Keith_DOT_extractions'
template_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/downscaled/5ModelAvg/historical/tasmin/tasmin_mean_C_ar5_5ModelAvg_historical_08_2004.tif'
models = ['CRU-TS40']
scenarios = ['historical']
variables = ['pr']
# make mask
mask = make_mask( template_fn )
output_dict = dict()
for model, scenario, variable in itertools.product(models, scenarios, variables):
cur_path = os.path.join(base_dir,model,scenario,variable)
files_df = list_data( cur_path ) # these are sorted
decade_grouper = files_df.apply(lambda x:str(x.year)[:3], axis=1)
file_groups = [ j.fn.tolist() for i,j in files_df.groupby( decade_grouper ) ]
out = [ extract_values( files, mask ) for files in file_groups ]
out_df = pd.concat([ pd.DataFrame(i) for i in out ]) # stack the file groups chronologically
out_key = '{}_{}_{}'.format( model, scenario, variable )
output_dict[ out_key ] = out_df
print( 'completed:{}'.format(out_key) )
# # future index
# future_dates = pd.date_range('2006-01','2101-01',freq='M')
# future_dates = [ [str(i.year), str(i.month)] for i in future_dates ]
# future_dates = [ '-'.join([y,'0'+m]) if len(m) == 1 else '-'.join([y,m]) for y,m in future_dates ]
# historical index -- data needs slicing...
historical_dates = pd.date_range('1901-01','2016-01',freq='M')
historical_dates = [[str(i.month), str(i.year)] for i in historical_dates ]
historical_dates = [ '-'.join([y,'0'+m]) if len(m) == 1 else '-'.join([y,m]) for y,m in historical_dates ]
# make data frames
# df1_future, df2_future = [pd.DataFrame({key:np.array(output_dict[key][i]) for key in output_dict if 'historical' not in key }, index=future_dates) for i in [1,2]]
df1_historical, df2_historical = [pd.DataFrame({key:np.array(output_dict[key][i])[-len(historical_dates):] for key in output_dict if 'historical' in key }, index=historical_dates) for i in [1,2]]
# dump them to disk
naming_lookup = {1:'LongLake', 2:'GlitterGulch'}
# df1_future_fn = 'precipitation_cmip5_allmodels_allscenarios_futures_2006-2100_LongLake_AK.csv'
# df2_future_fn = 'precipitation_cmip5_allmodels_allscenarios_futures_2006-2100_GlitterGulch_AK.csv'
df1_historical_fn = 'precipitation_cru_ts40_allmodels_allscenarios_historical_1901-2015_LongLake_AK.csv'
df2_historical_fn = 'precipitation_cru_ts40_allmodels_allscenarios_historical_1901-2015_GlitterGulch_AK.csv'
# df1_future.to_csv( os.path.join( output_dir, df1_future_fn), sep=',' )
# df2_future.to_csv( os.path.join( output_dir, df2_future_fn), sep=',' )
df1_historical.to_csv( os.path.join( output_dir, df1_historical_fn), sep=',' )
df2_historical.to_csv( os.path.join( output_dir, df2_historical_fn), sep=',' )
| mit | 8,534,226,147,032,637,000 | 40.316547 | 195 | 0.692495 | false |
LordGaav/rsscat | rsscat/threads.py | 1 | 1992 | # Copyright (c) 2012 Nick Douma < n.douma [at] nekoconeko . nl >
#
# This file is part of rsscat.
#
# rsscat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rsscat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rsscat. If not, see <http://www.gnu.org/licenses/>.
import rsscat
import threading
class Threads(object):
thread_list = {}
def __init__(self):
self.logger = rsscat.getLogger(__name__)
def registerThread(self, name, thread):
if not isinstance(thread, threading.Thread):
self.logger.error("Thread {0} is not actually a Thread!".format(name))
raise Exception("Thread {0} is not actually a Thread!".format(name))
if name in self.thread_list:
self.logger.error("Thread {0} already registered!".format(name))
raise Exception("Thread {0} already registered!".format(name))
self.thread_list[name] = thread
self.logger.debug("Registered thread {0}".format(name))
return thread
def getThreads(self):
return self.thread_list.keys()
def getThread(self, name):
if not name in self.thread_list:
self.logger.error("Thread {0} is not registered!".format(name))
raise Exception("Thread {0} is not registered!".format(name))
return self.thread_list[name]
def unregisterThread(self, name):
if not name in self.thread_list:
self.logger.error("Thread {0} is not registered!".format(name))
raise Exception("Thread {0} is not registered!".format(name))
del self.thread_list[name]
self.logger.debug("Unregistered thread {0}".format(name))
| gpl-3.0 | 1,484,509,439,365,768,700 | 33.947368 | 73 | 0.708333 | false |
cpcloud/numpy | numpy/core/setup.py | 3 | 42420 | from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Python 2.3 causes a segfault when
# trying to re-acquire the thread-state
# which is done in error-handling
# ufunc code. NPY_ALLOW_C_API and friends
# cause the segfault. So, we disable threading
# for now.
if sys.version[:5] < '2.4.2':
nosmp = 1
else:
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
try:
nosmp = os.environ['NPY_NOSMP']
nosmp = 1
except KeyError:
nosmp = 0
return nosmp == 1
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [8, 12, 16]
expected['Py_intptr_t'] = [4, 8]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [4, 8]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
# Don't install fenv unless we need them.
if sys.platform == 'cygwin':
config.add_data_dir('include/numpy/fenv')
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c')]
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources = multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends = deps + multiarray_deps,
libraries = ['npymath', 'npysort'])
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# scalarmath module #
#######################################################################
config.add_extension('scalarmath',
sources = [join('src', 'scalarmathmodule.c.src'),
join('src', 'private', 'scalarmathmodule.h.src'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
generate_ufunc_api],
depends = deps + npymath_sources,
libraries = ['npymath'],
)
#######################################################################
# _dotblas module #
#######################################################################
# Configure blasdot
blas_info = get_info('blas_opt', 0)
#blas_info = {}
def get_dotblas_sources(ext, build_dir):
if blas_info:
if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []):
return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.
return ext.depends[:1]
return None # no extension module will be built
config.add_extension('_dotblas',
sources = [get_dotblas_sources],
depends = [join('blasdot', '_dotblas.c'),
join('blasdot', 'cblas.h'),
],
include_dirs = ['blasdot'],
extra_info = blas_info
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| bsd-3-clause | 3,789,421,310,767,423,500 | 40.916996 | 97 | 0.523102 | false |
texuf/whiskeynode | examples/friendsoffriends.py | 1 | 3757 |
'''
to run in python terminal:
python -c "execfile('examples/friendsoffriends.py')"
'''
from examples.helpers import Nameable
from random import random
from whiskeynode import WhiskeyNode
from whiskeynode.db import db
from whiskeynode.edges import Edge
from whiskeynode.terminals import outbound_node, bidirectional_list, inbound_list, bidirectional_list
'''
this is an example of finding friends of friends. The query is pretty borked because our
bidirectional friends terminal isn't directed, so we have to search for inbound and outbound relationsships
'''
class User(WhiskeyNode, Nameable):
COLLECTION_NAME = 'example_friendsoffriends_users'
COLLECTION = db[COLLECTION_NAME]
FIELDS = {
'name':unicode,
}
@classmethod
def init_terminals(cls):
cls.TERMINALS = {
'friends': bidirectional_list(User),
}
if __name__ == '__main__':
print '\n===Friends of Friends Example===\n'
users = [
User.from_name('George Carlin'),
User.from_name('Tom Waits'),
User.from_name('Bubba'),
User.from_name('George Harison'),
User.from_name('Montell Williams'),
User.from_name('George Clooney'),
User.from_name('Kevin Bacon'),
]
previous_user = None
for user in users:
if previous_user:
previous_user.friends.append(user)
previous_user = user
for user in users:
print '%s is friends with: ' % user.name, [x.name for x in user.friends]
map(lambda x:x.save(), users)
user_a = users[0]
user_b = users[-1]
friend_ids = [user_a._id]
count = 0
#look at all george's friends, then look at all of their friends, then look at all of their friends, until kevin's id is returned
while(True):
#get friends
friends_of_friend_ids = Edge.COLLECTION.find({
'$or':[
{
'$and':[
{
'name':'friends',
'outboundCollection':User.COLLECTION_NAME,
'outboundId':{'$in':friend_ids},
},
{
'name':'friends',
'outboundCollection':User.COLLECTION_NAME,
'inboundId':{'$nin':friend_ids},
}
]
},
{
'$and':[
{
'name':'friends',
'outboundCollection':User.COLLECTION_NAME,
'inboundId':{'$in':friend_ids},
},
{
'name':'friends',
'outboundCollection':User.COLLECTION_NAME,
'outboundId':{'$nin':friend_ids},
}
]
}
]
}).distinct('inboundId')
if len(friends_of_friend_ids) == 0:
print '%s and %s are not connected' % (user_a.name, user_b.name)
break
if user_b._id in friends_of_friend_ids:
print 'Found %s and %s are seperated by %d relationships' % (user_a.name, user_b.name, count + 1)
break
else:
count = count + 1
friend_ids = friend_ids + friends_of_friend_ids
| apache-2.0 | -5,178,659,917,334,447,000 | 30.308333 | 133 | 0.461538 | false |
akusok/website-ibc | wibc_old/wibc_config.py | 1 | 3538 | """Setup for an IBC experiment.
Contains all configs from the application scheme.
Experiments should differ by this setup only.
Can create HDF5-based or fast online experiments.
"""
__author__ = "Anton Akusok"
__license__ = "PSFL"
__version__ = "0.0.1"
class IBCConfig(object):
"""Global parameters for WIC system.
Import this with some simpler name, like "conf".
"""
_dir = "/data/r0/spiiras_train/"
_ibc = "/home/akusoka1/WIBC/src/"
_maxc = 12 # maximum amount of different classes
_raw_dir = _dir + "raw_img/"
_ws_descr = _dir + "wsdescr.txt"
# m02: choose either option, the other gets ""
_mode = "hdf5" # its either "hdf5" or something else
_hdf5 = _dir + "spiiras_train.h5"
_img_data = _dir + "imgdata.pkl"
# m03: img_preprocessor
_img_dir = _dir + "images/"
_min_size = 2400
_max_dim = 500
_jpeg_quality = 95
# m04: get_descriptors
_temp_dir = "/run/shm/"
_descr_extractor = "csift"
_max_reg = 10000 # maximum number of regions
_cD_bin = _ibc + "sift/colorDescriptor"
# m05: get_centroids
_C_file = _dir + "C.pkl"
# must correspond to the given _maxc !!!
# contains: C["C"] = centroids
# C["L_majority"] = majority labels
# C["L_soft"] = soft labels
# m06: run_nn
_nn_count = 10
_nn_batch = 100
# m08: elm_classifier
_train_size = 10000
_val_size = 10000
_test_size = 20000
_neurons = 50
_elm_rep = 100 # ELM re-train repetitions for validation
_elm_param = _dir + "ELM.pkl"
# saving results
_f_out = _dir + "out_test_border.txt"
# multiprocessing config
_nr_wrk = 10 # a good guess is the number of cores - 1
_qsize = 300 # maximum size of a queue, to limit memory consumption
_host = _dir + "hostname.txt"
_port = 50763
_key = "IBC"
_show_progress = True
##########################################################################
# neighborhood matrix config
_nm_hdf5 = _dir + "descr_clust.h5" # HDF5 file storing neighbours info
_nm_descr = _dir + "descr.pkl" # cPickle'd array of descriptors
_nm_N = 1100000 # amount of descriptors for neighbourhood matrix calc
_nm_K = 1000 # amount of nearest neighbours kept
_nm_batch = 100000 # batch size
classes = {} # table of classes in number notations
classes["Adult"] = 0
classes["Alcohol"] = 1
classes["Cults"] = 2
classes["Dating"] = 3
classes["Drugs"] = 4
classes["Gambling"] = 5
classes["Hate"] = 6
classes["Tobacco"] = 7
classes["Violence"] = 8
classes["Weapons"] = 9
classes["Innocent"] = 10
classes["Advertisement"] = 11
@staticmethod
def get_SIFT_command(img_file, temp_file):
max_reg = 10000 # limiting number of local features per one image
command = ('./colorDescriptor %s --detector harrislaplace '
'--descriptor csift --output %s --outputFormat binary '
'--keepLimited %d > /dev/null'
% (img_file, temp_file, max_reg))
return command
@staticmethod
def file_name(classN, nr_in_class, ext):
"""Build standard image or region file name.
Allows to change the name pattern as desired (keep same arguments).
"""
fname = "i%02d_%08d.%s" % (classN, nr_in_class, ext)
return fname
| gpl-2.0 | -4,770,503,214,068,694,000 | 27.532258 | 78 | 0.559073 | false |
yakudzam/Flask-Disqus | flask_disqus/flask_disqus.py | 1 | 7437 | import json
import base64
import hmac
import hashlib
import time
from flask import render_template_string, g
class Disqus(object):
def __init__(self, app=None):
if app:
self.app = app
self.init_app(app)
def __call__(self, app, *args, **kwargs):
pass
def init_app(self, app):
self.DISQUS_SECRET_KEY = str(app.config.get('DISQUS_SECRET_KEY'))
if not self.DISQUS_SECRET_KEY:
return "<p>You need to set DISQUS_SECRET_KEY before you can use SSO</p>"
self.DISQUS_PUBLIC_KEY = str(app.config.get('DISQUS_PUBLIC_KEY'))
if not self.DISQUS_PUBLIC_KEY:
return "<p>You need to set DISQUS_PUBLIC_KEY before you can use SSO</p>"
self.init_template_tags(app)
def init_template_tags(self, app):
@app.context_processor
def _disqus_context_processor():
def disqus_dev():
"""
Return the HTML/js code to enable DISQUS comments on a local
development server if settings.DEBUG is True.
"""
disqus_url = app.config.get("SERVER_NAME")
template = """
{% if disqus_url %}
<script type="text/javascript">
var disqus_developer = 1;
var disqus_url = '{{ disqus_url }}';
</script>
{% endif %}
"""
return render_template_string(template, disqus_url=disqus_url)
def disqus_show_comments(shortname=''):
"""
Return the HTML code to display DISQUS comments.
"""
shortname = str(self.app.config.get('DISQUS_WEBSITE_SHORTNAME', shortname))
template = """
<div id="disqus_thread"></div>
<script type="text/javascript">
/* <![CDATA[ */
{% block config_variables %}
var disqus_shortname = '{{ shortname }}';
{{ config|safe}}
{% endblock %}
/* * * DON'T EDIT BELOW THIS LINE * * */
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
/* ]]> */
</script>
<noscript>Please enable JavaScript to view the <a href="//disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
<a href="//disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a>
"""
return render_template_string(template, shortname=shortname, config='')
def disqus_sso():
"""
Return the HTML/js code to enable DISQUS SSO - so logged in users on
your site can be logged in to disqus seemlessly.
"""
user = g.user
if user.is_anonymous():
return ""
# create a JSON packet of our data attributes
data = json.dumps({
'id': user.id,
'username': user.username,
'email': user.email,
})
# encode the data to base64
message = base64.b64encode(data.encode('utf-8'))
# generate a timestamp for signing the message
timestamp = int(time.time())
key = self.DISQUS_SECRET_KEY.encode('utf-8')
msg = ('%s %s' % (message, timestamp)).encode('utf-8')
digestmod = hashlib.sha1
# generate our hmac signature
sig = hmac.HMAC(key, msg, digestmod).hexdigest()
template = """
<script type="text/javascript">
var disqus_config = function() {
this.page.remote_auth_s3 = "{{ message }} {{ sig }} {{ timestamp }}";
this.page.api_key = "{{ pub_key }}";
}
</script>
"""
return render_template_string(template, message=message, timestamp=timestamp, sig=sig,
pub_key=self.DISQUS_PUBLIC_KEY)
def disqus_recent_comments(shortname='', num_items=5, excerpt_length=200, hide_avatars=0, avatar_size=32):
"""
Return the HTML/js code which shows recent comments.
"""
shortname = str(app.config.get('DISQUS_WEBSITE_SHORTNAME', shortname))
template = """
<div id="dsq-recent-comments" class="dsq-widget">
<script type="text/javascript">
{% block config_variables %}
var disqus_shortname = '{{ shortname }}';
{{ config|safe }}
{% endblock %}
</script>
<script src='//{{ shortname }}.disqus.com/recent_comments_widget.js?num_items={{ num_items }}&hide_avatars={{ hide_avatars }}&avatar_size={{ avatar_size }}&excerpt_length={{ excerpt_length }}'>
</script>
</div>
"""
params = {'shortname': shortname, 'num_items': num_items, 'hide_avatars': hide_avatars,
'avatar_size': avatar_size, 'excerpt_length': excerpt_length, 'config': ''}
return render_template_string(template, **params)
def disqus_num_replies(shortname=''):
"""
Return the HTML/js code which transforms links that end with an
#disqus_thread anchor into the threads comment count.
"""
shortname = str(app.config.get('DISQUS_WEBSITE_SHORTNAME', shortname))
template = """
<script type="text/javascript">
{% block config_variables %}
var disqus_shortname = '{{ shortname }}';
{{ config|safe }}
{% endblock %}
/* * * DON'T EDIT BELOW THIS LINE * * */
(function () {
var s = document.createElement('script'); s.async = true;
s.type = 'text/javascript';
s.src = '//' + disqus_shortname + '.disqus.com/count.js';
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
}());
</script>
"""
params = {
'shortname': shortname,
'config': "",
}
return render_template_string(template, **params)
return {
'disqus_dev': disqus_dev,
'disqus_show_comments': disqus_show_comments,
'disqus_sso': disqus_sso,
'disqus_recent_comments': disqus_recent_comments,
'disqus_num_replies': disqus_num_replies
}
| apache-2.0 | -2,756,522,641,323,273,000 | 43.532934 | 213 | 0.476267 | false |
updownlife/multipleK | bin/fast_multipleK/batchQuery.py | 1 | 2371 | #!/usr/bin/env python
klength = 8
startk = 9
endk = 25
querynum = 1000
theta = -1
patternId = '0'
import os
import sys
import time
import timeit
from optparse import OptionParser
WORKING_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../")
read_file = WORKING_DIR + 'data/sample/read.fasta'
#read_file = WORKING_DIR + 'data/multipleK/read_test.fasta'
#read_file = WORKING_DIR + 'data/multipleK/homo_read.fastq'
#read_file = WORKING_DIR + 'data/multipleK/random_read.fastq'
#read_file = WORKING_DIR + 'data/multipleK/read_CP65671.fasta'
#read_file = WORKING_DIR + 'data/multipleK/read_CP65671_200.fasta'
#read_file = WORKING_DIR + 'data/multipleK/read_NC32798.fasta '
#read_file = WORKING_DIR + 'data/multipleK/read_NZ_CP007569.fasta'
#read_file = WORKING_DIR + 'data/multipleK/reads_ecoli.fasta'
#read_file = WORKING_DIR + 'data/multipleK/read_NC_005106.4.fasta'
query_file = WORKING_DIR + 'data/multipleK/boxquery'
result_file = WORKING_DIR + 'data/multipleK/result'
print "\n****** compile bond-tree ******\n"
os.chdir(WORKING_DIR + 'src/')
# modify kmer length in code file dim.h and then compile bondtree
dim_file = open("dim.h", 'w')
dim_file.write("const int DIM = %s ;" % klength)
dim_file.close()
os.system("make");
for i in range(startk, endk+1):
print "\n****** multiple K query k=%s K=%s ******\n" % (klength, i)
print "\n******generate random box query******\n"
os.chdir(WORKING_DIR)
cmd = 'python bin/random_boxquery/random_fast_boxquery.py --num '+ str(querynum) +' --klength '+ str(i) +' --output '+ query_file + str(i) +' --read '+ read_file +' --boxsize 1 --pattern ' + patternId
os.system(cmd)
print "\n****** do box query k=" + str(klength) + " K=" + str(i)
os.chdir(WORKING_DIR + 'src/')
#cmd = 'python queryK.py -k '+ klength + ' -K '+ str(i) + ' -b ' + query_file + str(i) + ' -o '+ result_file + str(i) + ' --threshold ' + threshold
cmd = 'python optQueryK.py -k '+ str(klength) + ' -K '+ str(i) + ' -b ' + query_file + str(i) + ' -o '+ result_file + str(i) + " -t " + str(theta)
os.system(cmd)
print "\n****** alignment ******\n"
cmd = 'python ../bin/align_kmer_read/fast_align_kmer_read.py --readsfile ' + read_file + ' --resultsfile ' + result_file + str(i) + ' --queryfile ' + query_file + str(i)
os.system(cmd)
print "\n\n\n***** Done ******\n\n\n"
| gpl-2.0 | 5,775,841,275,727,426,000 | 40.596491 | 207 | 0.628427 | false |
Noah-Huppert/py-i2c-register | setup.py | 1 | 1407 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
try:
import pypandoc
long_description = pypandoc.convert(path.join(here, "README.md"), 'rst')
except(IOError, ImportError) as e:
raise Exception("Failed to convert README.md to rst format, error: {}".format(e))
setup(
name="py-i2c-register",
version="0.0.8",
description="Python wrapper library around the common I2C controller register pattern.",
long_description=long_description,
url="https://github.com/Noah-Huppert/py-i2c-register",
author="Noah Huppert",
author_email="[email protected]",
license="MIT",
# List of: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# Project maturity
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Intended audience
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
keywords="library i2c registers",
packages=find_packages(exclude=["contrib", "docs", "tests"]),
install_requires=[],
package_data={},
data_files=[
("", ["README.md"])
],
entry_points={}
)
| mit | 2,582,732,402,775,944,000 | 27.14 | 92 | 0.63113 | false |
AunShiLord/sympy | sympy/series/tests/test_series.py | 1 | 4998 | from sympy import sin, cos, exp, E, series, oo, S, Derivative, O, Integral, \
Function, log, sqrt, Symbol, Subs, pi, symbols
from sympy.abc import x, y, n, k
from sympy.utilities.pytest import raises
from sympy.series.gruntz import calculate_series
def test_sin():
e1 = sin(x).series(x, 0)
e2 = series(sin(x), x, 0)
assert e1 == e2
def test_cos():
e1 = cos(x).series(x, 0)
e2 = series(cos(x), x, 0)
assert e1 == e2
def test_exp():
e1 = exp(x).series(x, 0)
e2 = series(exp(x), x, 0)
assert e1 == e2
def test_exp2():
e1 = exp(cos(x)).series(x, 0)
e2 = series(exp(cos(x)), x, 0)
assert e1 == e2
def test_issue_5223():
assert series(1, x) == 1
assert next(S(0).lseries(x)) == 0
assert cos(x).series() == cos(x).series(x)
raises(ValueError, lambda: cos(x + y).series())
raises(ValueError, lambda: x.series(dir=""))
assert (cos(x).series(x, 1) -
cos(x + 1).series(x).subs(x, x - 1)).removeO() == 0
e = cos(x).series(x, 1, n=None)
assert [next(e) for i in range(2)] == [cos(1), -((x - 1)*sin(1))]
e = cos(x).series(x, 1, n=None, dir='-')
assert [next(e) for i in range(2)] == [cos(1), (1 - x)*sin(1)]
# the following test is exact so no need for x -> x - 1 replacement
assert abs(x).series(x, 1, dir='-') == x
assert exp(x).series(x, 1, dir='-', n=3).removeO() == \
E - E*(-x + 1) + E*(-x + 1)**2/2
D = Derivative
assert D(x**2 + x**3*y**2, x, 2, y, 1).series(x).doit() == 12*x*y
assert next(D(cos(x), x).lseries()) == D(1, x)
assert D(
exp(x), x).series(n=3) == D(1, x) + D(x, x) + D(x**2/2, x) + O(x**3)
assert Integral(x, (x, 1, 3), (y, 1, x)).series(x) == -4 + 4*x
assert (1 + x + O(x**2)).getn() == 2
assert (1 + x).getn() is None
assert ((1/sin(x))**oo).series() == oo
logx = Symbol('logx')
assert ((sin(x))**y).nseries(x, n=1, logx=logx) == \
exp(y*logx) + O(x*exp(y*logx), x)
assert sin(1/x).series(x, oo, n=5) == 1/x - 1/(6*x**3) + O(x**(-5), (x, oo))
assert abs(x).series(x, oo, n=5, dir='+') == x
assert abs(x).series(x, -oo, n=5, dir='-') == -x
assert abs(-x).series(x, oo, n=5, dir='+') == x
assert abs(-x).series(x, -oo, n=5, dir='-') == -x
assert exp(x*log(x)).series(n=3) == \
1 + x*log(x) + x**2*log(x)**2/2 + O(x**3*log(x)**3)
# XXX is this right? If not, fix "ngot > n" handling in expr.
p = Symbol('p', positive=True)
assert exp(sqrt(p)**3*log(p)).series(n=3) == \
1 + p**S('3/2')*log(p) + O(p**3*log(p)**3)
assert exp(sin(x)*log(x)).series(n=2) == 1 + x*log(x) + O(x**2*log(x)**2)
def test_issue_3978():
f = Function('f')
assert f(x).series(x, 0, 3, dir='-') == \
f(0) + x*Subs(Derivative(f(x), x), (x,), (0,)) + \
x**2*Subs(Derivative(f(x), x, x), (x,), (0,))/2 + O(x**3)
assert f(x).series(x, 0, 3) == \
f(0) + x*Subs(Derivative(f(x), x), (x,), (0,)) + \
x**2*Subs(Derivative(f(x), x, x), (x,), (0,))/2 + O(x**3)
assert f(x**2).series(x, 0, 3) == \
f(0) + x**2*Subs(Derivative(f(x), x), (x,), (0,)) + O(x**3)
assert f(x**2+1).series(x, 0, 3) == \
f(1) + x**2*Subs(Derivative(f(x), x), (x,), (1,)) + O(x**3)
class TestF(Function):
pass
assert TestF(x).series(x, 0, 3) == TestF(0) + \
x*Subs(Derivative(TestF(x), x), (x,), (0,)) + \
x**2*Subs(Derivative(TestF(x), x, x), (x,), (0,))/2 + O(x**3)
from sympy.series.acceleration import richardson, shanks
from sympy import Sum, Integer
def test_acceleration():
e = (1 + 1/n)**n
assert round(richardson(e, n, 10, 20).evalf(), 10) == round(E.evalf(), 10)
A = Sum(Integer(-1)**(k + 1) / k, (k, 1, n))
assert round(shanks(A, n, 25).evalf(), 4) == round(log(2).evalf(), 4)
assert round(shanks(A, n, 25, 5).evalf(), 10) == round(log(2).evalf(), 10)
def test_issue_5852():
assert series(1/cos(x/log(x)), x, 0) == 1 + x**2/(2*log(x)**2) + \
5*x**4/(24*log(x)**4) + O(x**6)
def test_issue_4583():
assert cos(1 + x + x**2).series(x, 0, 5) == cos(1) - x*sin(1) + \
x**2*(-sin(1) - cos(1)/2) + x**3*(-cos(1) + sin(1)/6) + \
x**4*(-11*cos(1)/24 + sin(1)/2) + O(x**5)
def test_issue_6318():
eq = (1/x)**(S(2)/3)
assert (eq + 1).as_leading_term(x) == eq
def test_x_is_base_detection():
eq = (x**2)**(S(2)/3)
assert eq.series() == x**(S(4)/3)
def test_sin_power():
e = sin(x)**1.2
assert calculate_series(e, x) == x**1.2
def test_issue_7203():
assert series(cos(x), x, pi, 3) == \
-1 + (x - pi)**2/2 + O((x - pi)**3, (x, pi))
def test_exp_product_positive_factors():
a, b = symbols('a, b', positive=True)
x = a * b
assert series(exp(x), x, n=8) == 1 + a*b + a**2*b**2/2 + \
a**3*b**3/6 + a**4*b**4/24 + a**5*b**5/120 + a**6*b**6/720 + \
a**7*b**7/5040 + O(a**8*b**8, a, b)
def test_issue_8805():
assert series(1, n=8) == 1
| bsd-3-clause | 1,150,461,149,742,997,500 | 31.454545 | 80 | 0.492597 | false |
ericmjonas/Sim2600 | sim2600/sim2600Console.py | 1 | 17332 | # Copyright (c) 2014 Greg James, Visual6502.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, struct
from array import array
import params
from sim6502 import Sim6502
from simTIA import SimTIA
from emuPIA import EmuPIA
class Sim2600Console:
def __init__(self, romFilePath, sim6502factory=Sim6502,
simTIAfactory= SimTIA):
self.sim6507 = sim6502factory()
self.simTIA = simTIAfactory()
self.emuPIA = EmuPIA()
self.rom = array('B', [0] * 4096)
self.bankSwitchROMOffset = 0
self.programLen = 0
self.loadProgram(romFilePath)
self.sim6507.resetChip()
# The 6507's IRQ and NMI are connected to the supply voltage
# Setting them to 'pulled high' will keep them high.
self.sim6507.setPulledHigh(self.sim6507.getWireIndex('IRQ'))
self.sim6507.setPulledHigh(self.sim6507.getWireIndex('NMI'))
self.sim6507.recalcWireNameList(['IRQ', 'NMI'])
# TIA CS1 is always high. !CS2 is always grounded
self.simTIA.setPulledHigh(self.simTIA.getWireIndex('CS1'))
self.simTIA.setPulledLow(self.simTIA.getWireIndex('CS2'))
self.simTIA.recalcWireNameList(['CS1','CS2'])
# We're running an Atari 2600 program, so set memory locations
# for the console's switches and joystick state.
# Console switches:
# d3 set to 1 for color (vs B&W),
# d1 select set to 1 for 'switch not pressed'
# d0 set to 1 switch
self.writeMemory(0x0282, 0x0B, True)
# No joystick motion
# joystick trigger buttons read on bit 7 of INPT4 and INPT5 of TIA
self.writeMemory(0x0280, 0xFF, True)
# Memory is mapped as follows:
# 0x00 - 0x2C write to TIA
# 0x30 - 0x3D read from TIA
# 0x80 - 0xFF PIA RAM (128 bytes), also mapped to 0x0180 - 0x01FF for the stack
# 0280 - 0297 PIA i/o ports and timer
# F000 - FFFF Cartridge memory, 4kb
# We handle 2k, 4k, and 8k cartridges, but only handle the bank switching
# operations used by Asteroids: write to 0xFFF8 or 0xFFF9
#
def readMemory(self, addr):
if addr > 0x02FF and addr < 0x8000:
estr = 'ERROR: 6507 ROM reading addr from 0x1000 to 0x1FFF: 0x%X'%addr
print(estr)
return 0
data = 0
if (addr >= 0x80 and addr <= 0xFF) or (addr >= 0x180 and addr <= 0x1FF):
data = self.emuPIA.ram[(addr & 0xFF) - 0x80]
elif addr >= 0x0280 and addr <= 0x0297:
data = self.emuPIA.iot[addr - 0x0280]
elif addr >= 0xF000 or \
(addr >= 0xD000 and addr <= 0xDFFF and self.programLen == 8192):
data = self.rom[addr - 0xF000 + self.bankSwitchROMOffset]
elif addr >= 0x30 and addr <= 0x3D:
# This is a read from the TIA where the value is
# controlled by the TIA data bus bits 6 and 7 drive-low
# and drive-high gates: DB6_drvLo, DB6_drvHi, etc.
# This is handled below, so no need for anything here
pass
elif addr <= 0x2C or (addr >= 0x100 and addr <= 0x12C):
# This happens all the time, usually at startup when
# setting data at all writeable addresses to 0.
msg = 'CURIOUS: Attempt to read from TIA write-only address 0x%4.4X'%(addr)
#print(msg)
else:
# This can happen when the 6507 is coming out of RESET.
# It sets the first byte of the address bus, issues a read,
# then sets the second byte, and issues another read to get
# the correct reset vector.
msg = 'WARNING: Unhandled address in readMemory: 0x%4.4X'%(addr)
print(msg)
cpu = self.sim6507
tia = self.simTIA
if cpu.isHigh(cpu.padIndSYNC):
for wireIndex in tia.dataBusDrivers:
if tia.isHigh(wireIndex):
estr = 'ERROR: TIA driving DB when 6502 fetching ' + \
'instruction at addr 0x%X'%(addr)
print(estr)
else:
if tia.isHigh(tia.indDB6_drvLo):
data = data & (0xFF ^ (1<<6))
if tia.isHigh(tia.indDB6_drvHi):
data = data | (1<<6)
if tia.isHigh(tia.indDB7_drvLo):
data = data & (0xFF ^ (1<<7))
if tia.isHigh(tia.indDB7_drvHi):
data = data | (1<<7)
if addr & 0x200 and addr < 0x2FF:
print('6507 READ [0x%X]: 0x%X'%(addr, data))
cpu.setDataBusValue(data)
cpu.recalcWireList(cpu.dataBusPads)
return data
def writeMemory(self, addr, byteValue, setup=False):
cpu = self.sim6507
tia = self.simTIA
pia = self.emuPIA
if cpu.isLow(cpu.padReset) and not setup:
print('Skipping 6507 write during reset. addr: 0x%X'%(addr))
return
if addr >= 0xF000 and not setup:
if self.programLen == 8192:
if addr == 0xFFF9:
# switch to bank 0 which starts at 0xD000
self.bankSwitchROMOffset = 0x2000
elif addr == 0xFFF8:
self.bankSwitchROMOffset = 0x1000
else:
estr = 'ERROR: 6507 writing to ROM space addr ' + \
'0x4.4%X data 0x%2.2X '%(addr, byteValue)
if addr >= 0xFFF4 and addr <= 0xFFFB:
estr += 'This is likely a bank switch strobe we have not implemented'
elif addr >= 0xF000 and addr <= 0xF07F:
estr += 'This is likely a cartridge RAM write we have not implemented'
raise RuntimeError(estr)
# 6502 shouldn't write to where we keep the console switches
if (addr == 0x282 or addr == 0x280) and not setup:
estr = 'ERROR: 6507 writing to console or joystick switches ' + \
'addr 0x%4.4X data 0x%2.2X'%(addr,byteValue)
print(estr)
return
if addr < 0x280:
msg = '6507 WRITE to [0x%4.4X]: 0x%2.2X at 6507 halfclock %d'% \
(addr, byteValue, cpu.halfClkCount)
print(msg)
if (addr >= 0x80 and addr <= 0xFF) or (addr >= 0x180 and addr <= 0x1FF):
pia.ram[(addr & 0xFF) - 0x80] = byteValue
elif addr >= 0x0280 and addr <= 0x0297:
pia.iot[addr - 0x0280] = byteValue
period = None
if addr == 0x294:
period = 1
elif addr == 0x295:
period = 8
elif addr == 0x296:
period = 64
elif addr == 0x297:
period = 1024
if period != None:
pia.timerPeriod = period
# initial value for timer read from data bus
pia.timerVal = cpu.getDataBusValue()
pia.timerClockCount = 0
pia.timerFinished = False
#elif addr <= 0x2C:
# # Remember what we wrote to the TIA write-only address
# # This is only for bookeeping and debugging and is not
# # used for simulation.
# self.simTIA.lastControlValue[addr] = byteValue
def loadProgramBytes(self, progByteList, baseAddr, setResetVector):
pch = baseAddr >> 8
pcl = baseAddr & 0xFF
print('loadProgramBytes base addr $%2.2X%2.2X'%(pch,pcl))
romDuplicate = 1
programLen = len(progByteList)
self.programLen = programLen
if not programLen in [2048, 4096, 8192]:
estr = 'No support for program byte list of length %d'%(programLen)
raise RuntimeError(estr)
if programLen == 2048:
# Duplicate ROM contents so it fills all of 0xF000 - 0xFFFF
romDuplicate = 2
elif programLen == 8192:
self.bankSwitchROMOffset = 0x1000
self.rom = array('B', progByteList * romDuplicate)
if setResetVector == True:
print("Setting program's reset vector to program's base address")
self.writeMemory(0xFFFC, pcl, True)
self.writeMemory(0xFFFD, pch, True)
else:
pcl = self.readMemory(0xFFFA)
pch = self.readMemory(0xFFFB)
print("NMI vector: %X %X"%(pch, pcl))
pcl = self.readMemory(0xFFFC)
pch = self.readMemory(0xFFFD)
print("Reset vector: %X %X"%(pch, pcl))
pcl = self.readMemory(0xFFFE)
pch = self.readMemory(0xFFFF)
print("IRQ/BRK vector: %X %X"%(pch, pcl))
def loadProgram(self, programFilePath):
if not os.path.exists(programFilePath):
estr = 'ERROR: Could not find program "%s"'%(programFilePath) + \
'from current dir %s'%(os.getcwd())
raise RuntimeError(estr)
print('Setting 6502 program to ROM image %s'%(programFilePath))
self.programFilePath = programFilePath
# load ROM from file
of = open (programFilePath, 'rb')
byteStr = of.read()
of.close()
program = []
progHex = ''
count = 0
for byte in byteStr:
intVal = struct.unpack ('1B', byte)[0]
progHex += '%2.2X '%intVal
count += 1
if count == 8:
progHex += ' '
elif count == 16:
progHex += '\n'
count = 0
program.append (intVal)
baseAddr = 0xF000
if len(byteStr) == 8192:
print('Loading 8kb ROM starting from 0x%X'%baseAddr)
elif len(byteStr) == 2048:
baseAddr = 0xF800
print('Loading 2kb ROM starting from 0x%X'%baseAddr)
self.loadProgramBytes(program, baseAddr, False)
def updateDataBus(self):
cpu = self.sim6507
tia = self.simTIA
# transfer 6507 data bus to TIA
# TIA DB0-DB5 are pure inputs
# TIA DB6 and DB7 can be driven high or low by the TIA
# TIA CS3 or CS0 high inhibits tia from driving db6 and db7
i = 0
numPads = len(cpu.dataBusPads)
while i < numPads:
dbPadHigh = cpu.isHigh(cpu.dataBusPads[i])
tia.setPulled(tia.dataBusPads[i], dbPadHigh)
i += 1
tia.recalcWireList(tia.dataBusPads)
hidrv = False
for wireInd in tia.dataBusDrivers:
if tia.isHigh(wireInd):
hidrv = True
break
if hidrv:
# 6502 SYNC is HIGH when its fetching instruction, so make sure
# our DB is not being written to by the TIA at this time
if cpu.isHigh(cpu.padIndSYNC):
estr = 'ERROR: TIA driving DB when 6502 fetching instruction'
#report.add (estr)
print(estr)
def advanceOneHalfClock(self): #D circuitSim6502, circuitSimTIA, emuPIA):
cpu = self.sim6507
tia = self.simTIA
pia = self.emuPIA
# Set all TIA inputs to be pulled high. These aren't updated to
# reflect any joystick or console switch inputs, but they could be.
# To give the sim those inputs, you could check the sim halfClkCount,
# and when it hits a certain value or range of values, set whatever
# ins you like to low or high.
# Here, we make an arbitrary choice to set the pads to be pulled
# high for 10 half clocks. After this, they should remain pulled
# high, so choosing 10 half clocks or N > 0 half clocks makes no
# difference.
if tia.halfClkCount < 10:
for wireIndex in tia.inputPads:
tia.setPulledHigh(wireIndex)
tia.recalcWireList(tia.inputPads)
tia.setPulledHigh(tia.padIndDEL)
tia.recalcWire(tia.padIndDEL)
# TIA 6x45 control ROM will change when R/W goes HI to LOW only if
# the TIA CLK2 is LOW, so update R/W first, then CLK2.
# R/W is high when 6502 is reading, low when 6502 is writing
tia.setPulled(tia.padIndRW, cpu.isHigh(cpu.padIndRW))
tia.recalcWire(tia.padIndRW)
addr = cpu.getAddressBusValue()
# Transfer the state of the 6507 simulation's address bus
# to the corresponding address inputs of the TIA simulation
for i, tiaWireIndex in enumerate(tia.addressBusPads):
padValue = cpu.isHigh(cpu.addressBusPads[i])
if cpu.isHigh(cpu.addressBusPads[i]):
tia.setHigh(tiaWireIndex)
else:
tia.setLow(tiaWireIndex)
tia.recalcWireList(tia.addressBusPads)
# 6507 AB7 goes to TIA CS3 and PIA CS1
# 6507 AB12 goes to TIA CS0 and PIA CS0, but which 6502 AB line is it?
# 6507 AB12, AB11, AB10 are not connected externally, so 6507 AB12 is
# 6502 AB15
#
# TODO: return changed/unchanged from setHigh, setLow to decide to recalc
if addr > 0x7F:
# It's not a TIA address, so set TIA CS3 high
# Either CS3 high or CS0 high should disable TIA from writing
tia.setHigh(tia.padIndCS3)
tia.setHigh(tia.padIndCS0)
else:
# It is a TIA addr from 0x00 to 0x7F, so set CS3 and CS0 low
tia.setLow(tia.padIndCS3)
tia.setLow(tia.padIndCS0)
tia.recalcWireList(tia.padIndsCS0CS3)
self.updateDataBus()
# Advance the TIA 2nd input clock that is controlled
# by the 6507's clock generator.
tia.setPulled(tia.padIndCLK2, cpu.isHigh(cpu.padIndCLK1Out))
tia.recalcWire(tia.padIndCLK2)
#print('TIA sim num wires added to groups %d, num ant %d'%
# (tia.numAddWireToGroup, tia.numAddWireTransistor))
tia.clearSimStats()
# Advance TIA 'CLK0' by one half clock
tia.setPulled(tia.padIndCLK0, not tia.isHigh(tia.padIndCLK0))
tia.recalcWire(tia.padIndCLK0)
tia.halfClkCount += 1
# This is a good place to record the TIA and 6507 (6502)
# state if you want to capture something like a logic
# analyzer trace.
# Transfer bits from TIA pads to 6507 pads
# TIA RDY and 6507 RDY are pulled high through external resistor, so pull
# the pad low if the TIA RDY_lowCtrl is on.
cpu.setPulled(cpu.padIndRDY, not tia.isHigh(tia.indRDY_lowCtrl))
cpu.recalcWire(cpu.padIndRDY)
# TIA sends a clock to the 6507. Propagate this clock from the
# TIA simulation to the 6507 simulation.
clkTo6507IsHigh = tia.isHigh(tia.padIndPH0)
if clkTo6507IsHigh != cpu.isHigh(cpu.padIndCLK0):
# Emulate the PIA timer
# Here at Visual6502.org, we're building a gate-level model
# of the PIA, but it's not ready yet.
pia = self.emuPIA
if clkTo6507IsHigh:
# When its reached its end, it counts down from 0xFF every clock
# (every time the input clock is high, it advances)
if pia.timerFinished:
pia.timerValue -= 1
if pia.timerValue < 0:
# Assume it doesn't wrap around
pia.timerValue = 0
else:
pia.timerClockCount += 1
if pia.timerClockCount >= pia.timerPeriod:
# decrement interval counter
pia.timerValue -= 1
pia.timerClockCount = 0
if pia.timerValue < 0:
pia.timerFinished = True
pia.timerValue = 0xFF
# Advance the 6502 simulation 1 half clock cycle
if clkTo6507IsHigh:
cpu.setPulledHigh(cpu.padIndCLK0)
else:
cpu.setPulledLow(cpu.padIndCLK0)
# Put PIA count value into memory so 6507 can read it
# like a regular memory read.
self.writeMemory(0x284, pia.timerValue)
cpu.recalcWire(cpu.padIndCLK0)
cpu.halfClkCount += 1
addr = cpu.getAddressBusValue()
if cpu.isHigh(cpu.padIndCLK0):
if cpu.isLow(cpu.padIndRW):
data = cpu.getDataBusValue()
self.writeMemory(addr, data)
else:
# 6507's CLK0 is low
if cpu.isHigh(cpu.padIndRW):
self.readMemory(addr)
| cc0-1.0 | 4,285,064,449,283,350,500 | 38.480638 | 85 | 0.587064 | false |
google/dqm | backend/dqm/checks/check_non_useful_parameters.py | 1 | 2448 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dqm.check_bricks import (
Check,
DataType,
Parameter,
Platform,
Result,
ResultField,
Theme,
)
from dqm.helpers import analytics
class CheckNonUsefulParameters(Check):
"""Detect tracked URLs containing non useful parameters.
GIVEN
A site URL
WHEN
A query to GA Reporting API v4, filtering on (""ga:pagePath"" dimension
contains ""msclkid="" OR ""fbclid="" OR ""token="" OR ""vid="" OR ""cid=""
OR ""mt_*="" OR ""efid ="") returns 1 or more results
THEN
The URL should be flagged as potentially non-useful.
"""
title = 'Non useful URI parameters'
description = """
Detect tracked URLs containing non useful parameters, such as msclkid, fbclid,
token, vid, cid or mt_*.
"""
platform = Platform.Ga
theme = Theme.Trustful
parameters = [
Parameter(name='viewId', data_type=DataType.STRING, delegate=True),
Parameter(name='startDate', data_type=DataType.DATE, delegate=True),
Parameter(name='endDate', data_type=DataType.DATE, delegate=True),
Parameter(name='blackList',
title='Parameters blacklist',
data_type=DataType.LIST,
default=['msclkid', 'fbclid', 'token', 'vid', 'cid']),
]
result_fields = [
ResultField(name='url', title='URL', data_type=DataType.STRING),
ResultField(name='param', title='Parameter name', data_type=DataType.STRING)
]
def run(self, params):
params = self.validate_values(params)
black_list = params['blackList']
urls = analytics.get_url_parameters(
view_id=params['viewId'],
start_date=params['startDate'],
end_date=params['endDate'])
# TODO: add "mt_*="
problems = []
for url in urls:
for p in url['params']:
if p in black_list:
problems.append({
'url': url['url'],
'param': p,
})
return Result(success=not problems, payload=problems) | apache-2.0 | 4,856,807,815,189,841,000 | 30 | 80 | 0.671977 | false |
krzysztof/zenodo | zenodo/modules/records/config.py | 1 | 3258 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo Records."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext
from speaklater import make_lazy_gettext
_ = make_lazy_gettext(lambda: gettext)
ZENODO_RELATION_RULES = {
'f1000research': [{
'prefix': '10.12688/f1000research',
'relation': 'isCitedBy',
'scheme': 'doi',
'text': 'Published in',
'image': 'img/f1000research.jpg',
}],
'inspire': [{
'prefix': 'http://inspirehep.net/record/',
'relation': 'isSupplementedBy',
'scheme': 'url',
'text': 'Available in',
'image': 'img/inspirehep.png',
}],
'briefideas': [{
'prefix': 'http://beta.briefideas.org/',
'relation': 'isIdenticalTo',
'scheme': 'url',
'text': 'Published in',
'image': 'img/briefideas.png',
}],
'zenodo': [{
'prefix': 'https://github.com',
'relation': 'isSupplementTo',
'scheme': 'url',
'text': 'Available in',
'image': 'img/github.png',
}, {
'prefix': '10.1109/JBHI',
'relation': 'isCitedBy',
'scheme': 'doi',
'text': 'Published in',
'image': 'img/ieee.jpg',
}],
}
ZENODO_COMMUNITY_BRANDING = [
'biosyslit',
'lory',
]
ZENODO_RELATION_TYPES = [
('isCitedBy', _('Cited by')),
('cites', _('Cites')),
('isSupplementTo', _('Supplement to')),
('isSupplementedBy', _('Supplementary material')),
('references', _('References')),
('isReferencedBy', _('Referenced by')),
('isNewVersionOf', _('Previous versions')),
('isPreviousVersionOf', _('New versions')),
('isPartOf', _('Part of')),
('hasPart', _('Has part')),
('isDocumentedBy', _('Documented by')),
('documents', _('Documents')),
('compiles', _('Compiles')),
('isCompiledBy', _('Compiled by')),
('isIdenticalTo', _('Identical to')),
]
ZENODO_LOCAL_DOI_PREFIXES = []
ZENODO_DOIID4RECID = {
7468: 7448,
7458: 7457,
7467: 7447,
7466: 7446,
7465: 7464,
7469: 7449,
7487: 7486,
7482: 7481,
7484: 7483,
}
"""Mapping of recids to the id used in generated DOIs.
Wrong DOIs were minted for a short period in 2013 due to mistake in the legacy
system.
"""
| gpl-2.0 | -8,156,797,694,020,821,000 | 28.089286 | 78 | 0.611111 | false |
cedwards/Nova | _modules/oscap.py | 1 | 2187 | # -*- coding: utf-8 -*-
'''
OpenSCAP scanner execution module.
:maintainer: Christer Edwards ([email protected])
:maturity: 20160430
:platform: Linux
:requires: SaltStack
:upstream: http://open-scap.org
This execution module uses the openSCAP scanner utility and an argument of an
XML guide. The returned data should be a dictionary of the cmd output.
On CentOS the packages are: openscap-scanner openscap
Configurable options would be:
show_success: True/False
This version requires the file be stored in /root/ (because I'm being lazy).
Afterwards the command is run as:
.. code-block:: shell
wget http://www.redhat.com/security/data/oval/com.redhat.rhsa-RHELX.xml
salt centos\* oscap.scan salt://com.redhat.rhsa-RHELX.xml
Roadmap:
* top.nova mapping for feed profiles
* performance improvements
* feed-type via args (oval vs xccdf) / autodetection
* support ubuntu, debian, centos, rhel, suse
* support already exists for FreeBSD (via pkg audit)
* cmd output or results.xml parsing and custom reporting
'''
from __future__ import absolute_import
# Import python libs
import logging
from salt import utils
__virtualname__ = 'oscap'
log = logging.getLogger(__name__)
_OSCAP = utils.which('oscap')
def __virtual__():
'''
Compatible with Linux & requires oscap binary
'''
return True
def scan(filename):
'''
scan function
'''
if not filename.startswith('salt://'):
filename = 'salt://' + filename
if filename.startswith('salt://'):
cached_source = __salt__['cp.cache_file'](filename)
ret = {'Vulnerabilities': []}
cmd = '{0} oval eval {1}'.format(_OSCAP, cached_source)
salt_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
items = salt_ret['stdout'].split('\n')
for item in items:
if 'true' in item:
if 'rhsa' in item:
rhsa = item.split(':')[3]
year = item.split(':')[3][:4]
num = item.split(':')[3][4:]
url = 'https://rhn.redhat.com/errata/RHSA-' + year + '-' + num + '.html'
ret['Vulnerabilities'].append('RHSA-' + rhsa + ' : ' + url)
return ret
| apache-2.0 | -2,844,935,003,297,489 | 25.349398 | 88 | 0.639232 | false |
F5Networks/f5-common-python | f5/bigip/contexts.py | 1 | 4118 | # coding=utf-8
#
# Copyright 2014 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import logging
from f5.sdk_exception import TransactionSubmitException
class TransactionContextManager(object):
def __init__(self, transaction, validate_only=False):
"""Initialize a new Transaction context
Args:
validate_only (bool): Will not commit the transaction, but only
validate that it will succeed or not.
Attributes:
transaction (Transaction): The transaction object that was sent
to the context manager
validate_only (bool): Specifies whether the transaction should
commit itself upon `__exit__` or whether the commands in the
transaction should just be checked to make sure they don't
raise an error.
bigip (dict): A reference to the dictionary containing the BIG-IP
mgmt_root
icr (iControlRESTSession): A reference to the dictionary
containing the iControl REST session
original_headers (dict): A deep copy of all the headers that were
originally part of the iControl REST session. A copy is needed
so that we can revert back to them after the transaction has
been committed, since the only way to commit the transaction
is to set the X-F5-REST-Coordination-Id to the value of the
transaction ID of the transaction.
"""
self.transaction = transaction
self.validate_only = validate_only
self.bigip = transaction._meta_data['bigip']
self.icr = self.bigip._meta_data['icr_session']
self.original_headers = copy.deepcopy(self.icr.session.headers)
def __enter__(self):
"""Begins a new transaction context
When a transaction begins, this method will automatically be called
to set up the transaction.
Transaction IDs are automatically retrieved for you and the
appropriate headers are set so that operations in the Transaction
Context will reference the transaction.
Headers are preserved so that after you exit the transaction, you will
be able to use the API object as you normally would.
"""
self.transaction = self.transaction.create()
self.icr.session.headers.update({
'X-F5-REST-Coordination-Id': str(self.transaction.transId)
})
return self.bigip
def __exit__(self, exc_type, exc_value, exc_tb):
"""Commit a transaction upon Context Manager exit
Upon exit, the transaction will attempt to be committed. If the commit
fails, the transaction will automatically be rolled back by the server
performing the transaction.
:param exc_type: The type of exception raised
:param exc_value: Value of the exception raised
:param exc_tb: Traceback
NOTE: If the context exits without an exception, all three of the
parameters will be None
:returns: void
"""
self.icr.session.headers = dict()
if exc_tb is None:
try:
self.transaction.modify(state="VALIDATING",
validateOnly=self.validate_only)
except Exception as e:
logging.debug(e)
raise TransactionSubmitException(e)
finally:
self.icr.session.headers = self.original_headers
self.icr.session.headers = self.original_headers
| apache-2.0 | -7,644,202,878,223,179,000 | 38.980583 | 78 | 0.647644 | false |
openstack/openstack-ansible-lxc_hosts | doc/source/conf.py | 1 | 9923 | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
'sphinxcontrib.rsvgconverter',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
role_name = 'lxc_hosts'
target_name = 'openstack-ansible-' + role_name
title = 'OpenStack-Ansible Documentation: ' + role_name + ' role'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/' + target_name
openstackdocs_pdf_link = True
openstackdocs_bug_project = project.lower()
openstackdocs_bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'doc-' + target_name + '.tex',
title.replace("_", r"\_"), author, 'manual'),
]
latex_use_xindy = False
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
(master_doc, target_name,
title, author)
]
locale_dirs = ['locale/']
| apache-2.0 | 4,027,649,898,609,721,300 | 31.857616 | 79 | 0.700091 | false |
nansencenter/nansat | nansat/mappers/mapper_amsre_uham_leadfraction.py | 1 | 1565 | #-------------------------------------------------------------------------------
# Name: mapper_amsre_UHAM_lead_fraction.py
# Purpose:
#
# Author: Morten Wergeland Hansen
# Modified: Morten Wergeland Hansen
#
# Created: 18.02.2015
# Last modified:24.02.2015 09:26
# Copyright: (c) NERSC
# License:
#-------------------------------------------------------------------------------
import datetime
from osgeo import gdal, osr
from nansat.nsr import NSR
from nansat.vrt import VRT
from nansat.exceptions import WrongMapperError
class Mapper(VRT):
def __init__(self, filename, gdalDataset, gdalMetadata, **kwargs):
title_correct = False
if not gdalMetadata:
raise WrongMapperError
for key, val in list(gdalMetadata.items()):
if 'title' in key:
if not val == 'Daily AMSR-E Arctic lead area fraction [in percent]':
raise WrongMapperError
else:
title_correct = True
if not title_correct:
raise WrongMapperError
# initiate VRT for the NSIDC 10 km grid
self._init_from_dataset_params(1216, 1792, (-3850000, 6250, 0.0, 5850000, 0.0, -6250),
NSR(3411).wkt)
src = {
'SourceFilename': 'NETCDF:"%s":lf' % filename,
'SourceBand': 1,
}
dst = {
'name': 'leadFraction',
'long_name': 'AMSRE sea ice lead fraction',
}
self.create_band(src, dst)
self.dataset.FlushCache()
| gpl-3.0 | -8,877,827,143,350,893,000 | 28.528302 | 94 | 0.513099 | false |
ReconOS/reconos | tools/_pypack/reconos/scripts/info.py | 1 | 1406 | import logging
import argparse
log = logging.getLogger(__name__)
def get_cmd(prj):
return "info"
def get_call(prj):
return info_cmd
def get_parser(prj):
parser = argparse.ArgumentParser("info", description="""
Prints informations regarding the active project.
""")
return parser
def info_cmd(args):
info(args)
def info(args):
prj = args.prj
print("-" * 40)
print("ReconOS Project '" + prj.name + "'")
print(" Board".ljust(20) + str(prj.impinfo.board))
print(" Reference Design".ljust(20) + prj.impinfo.design)
print(" Part".ljust(20) + prj.impinfo.part)
print(" Operating System".ljust(20) + prj.impinfo.os)
print(" Xilinx Tools".ljust(20) + ",".join(prj.impinfo.xil))
print(" CFlags".ljust(20) + prj.impinfo.cflags)
print(" LdFlags".ljust(20) + prj.impinfo.ldflags)
print("-" * 40)
print("Clocks:")
for c in prj.clocks:
print(" " + (c.name + "*" if c == prj.clock else "").ljust(18) + "[freq=" + str(c.freq // 1000000) + "MHz]")
print("Slots:")
for s in prj.slots:
print(" " + s.name.ljust(18) + "[id=" + str(s.id) + ",clk=" + s.clock.name + "]")
print("Resources:")
for r in prj.resources:
print(" " + r.name.ljust(18) + "[id=" + str(r.id) + ",type=" + r.type + ",args=" + str(r.args) + ",group=" + r.group + "]")
print("Threads:")
for t in prj.threads:
print(" " + t.name.ljust(18) + "[slots=" + str(t.slots) + ",resources=" + str(t.resources) + "]") | gpl-2.0 | -1,182,149,986,920,027,400 | 30.977273 | 126 | 0.613798 | false |
m8ttyB/socorro | socorro/unittest/submitter/test_submitter_app.py | 1 | 16998 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import time
from configman.dotdict import DotDict
import mock
import pytest
from socorro.submitter.submitter_app import (
SubmitterApp,
SubmitterFileSystemWalkerSource,
)
from socorro.external.crashstorage_base import Redactor
from socorro.unittest.testbase import TestCase
def sequencer(*args):
list_of_args = list(args)
def foo(*fargs, **fkwargs):
try:
return list_of_args.pop()
except IndexError:
return None
return foo
def generator_for_sequence(*args):
list_of_args = list(args)
def foo(*fargs, **fkwargs):
try:
yield list_of_args.pop()
except IndexError:
return
return foo
class TestSubmitterFileSystemWalkerSource(TestCase):
def get_standard_config(self):
config = DotDict()
config.search_root = None
config.dump_suffix = '.dump'
config.dump_field = "upload_file_minidump"
config.redactor_class = Redactor
config.forbidden_keys = Redactor.required_config.forbidden_keys.default
config.logger = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
sub_walker = SubmitterFileSystemWalkerSource(config)
assert sub_walker.config == config
assert sub_walker.config.logger == config.logger
def test_get_raw_crash(self):
config = self.get_standard_config()
sub_walker = SubmitterFileSystemWalkerSource(config)
raw = ('{"name":"Gabi", ''"submitted_timestamp":"%d"}' % time.time())
fake_raw_crash = DotDict(json.loads(raw))
mocked_get_raw_crash = mock.Mock(return_value=fake_raw_crash)
sub_walker.get_raw_crash = mocked_get_raw_crash
path_tuple = ['6611a662-e70f-4ba5-a397-69a3a2121129.dump',
'6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
'6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump',
]
raw_crash = sub_walker.get_raw_crash(path_tuple)
assert isinstance(raw_crash, DotDict)
assert raw_crash['name'] == 'Gabi'
def test_get_raw_dumps_as_files(self):
config = self.get_standard_config()
sub_walker = SubmitterFileSystemWalkerSource(config)
dump_pathnames = (
'6611a662-e70f-4ba5-a397-69a3a2121129',
(
'raw_crash_file',
'/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.dump',
'/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
'/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump',
),
)
raw_dumps_files = sub_walker.get_raw_dumps_as_files(dump_pathnames)
dump_names = {
'upload_file_minidump': '/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.dump',
'flash1': '/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
'flash2': '/some/path/6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump'
}
assert isinstance(raw_dumps_files, dict)
assert raw_dumps_files == dump_names
def test_new_crashes(self):
sequence = [
(
'./',
'6611a662-e70f-4ba5-a397-69a3a2121129.json',
'./6611a662-e70f-4ba5-a397-69a3a2121129.json',
),
(
'./',
'6611a662-e70f-4ba5-a397-69a3a2121129.upload.dump',
'./6611a662-e70f-4ba5-a397-69a3a2121129.upload.dump',
),
(
'./',
'7611a662-e70f-4ba5-a397-69a3a2121129.json',
'./7611a662-e70f-4ba5-a397-69a3a2121129.json',
),
(
'./',
'7611a662-e70f-4ba5-a397-69a3a2121129.other.dump',
'./7611a662-e70f-4ba5-a397-69a3a2121129.other.dump',
),
(
'./',
'7611a662-e70f-4ba5-a397-69a3a2121129.other.txt',
'./7611a662-e70f-4ba5-a397-69a3a2121129.other.txt',
),
(
'./',
'8611a662-e70f-4ba5-a397-69a3a2121129.json',
'./8611a662-e70f-4ba5-a397-69a3a2121129.json',
)
]
def findFileGenerator_mock_method(root, method):
for x in sequence:
if method(x):
yield x
def listdir_mock_method(a_path):
for x in sequence:
yield x[1]
config = self.get_standard_config()
expected = [
(
((
'6611a662-e70f-4ba5-a397-69a3a2121129',
[
'./6611a662-e70f-4ba5-a397-69a3a2121129.json',
'./6611a662-e70f-4ba5-a397-69a3a2121129.upload.dump'
],
), ),
{}
),
(
((
'7611a662-e70f-4ba5-a397-69a3a2121129',
[
'./7611a662-e70f-4ba5-a397-69a3a2121129.json',
'./7611a662-e70f-4ba5-a397-69a3a2121129.other.dump'
],
), ),
{}
),
(
((
'8611a662-e70f-4ba5-a397-69a3a2121129',
[
'./8611a662-e70f-4ba5-a397-69a3a2121129.json'
]
), ),
{}
),
]
find_patch_path = 'socorro.submitter.submitter_app.findFileGenerator'
with mock.patch(
find_patch_path,
new_callable=lambda: findFileGenerator_mock_method
):
listdir_patch_path = 'socorro.submitter.submitter_app.listdir'
with mock.patch(
listdir_patch_path,
new_callable=lambda: listdir_mock_method
):
sub_walker = SubmitterFileSystemWalkerSource(config)
result = [x for x in sub_walker.new_crashes()]
assert result == expected
class TestSubmitterApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.source = DotDict()
mocked_source_crashstorage = mock.Mock()
mocked_source_crashstorage.id = 'mocked_source_crashstorage'
config.source.crashstorage_class = mock.Mock(
return_value=mocked_source_crashstorage
)
config.destination = DotDict()
mocked_destination_crashstorage = mock.Mock()
mocked_destination_crashstorage.id = 'mocked_destination_crashstorage'
config.destination.crashstorage_class = mock.Mock(
return_value=mocked_destination_crashstorage
)
config.producer_consumer = DotDict()
mocked_producer_consumer = mock.Mock()
mocked_producer_consumer.id = 'mocked_producer_consumer'
config.producer_consumer.producer_consumer_class = mock.Mock(
return_value=mocked_producer_consumer
)
config.producer_consumer.number_of_threads = float(1)
config.new_crash_source = DotDict()
config.new_crash_source.new_crash_source_class = None
config.submitter = DotDict()
config.submitter.delay = 0
config.submitter.dry_run = False
config.number_of_submissions = "all"
config.logger = mock.MagicMock()
return config
def get_new_crash_source_config(self):
config = DotDict()
config.source = DotDict()
mocked_source_crashstorage = mock.Mock()
mocked_source_crashstorage.id = 'mocked_source_crashstorage'
config.source.crashstorage_class = mock.Mock(
return_value=mocked_source_crashstorage
)
config.destination = DotDict()
mocked_destination_crashstorage = mock.Mock()
mocked_destination_crashstorage.id = 'mocked_destination_crashstorage'
config.destination.crashstorage_class = mock.Mock(
return_value=mocked_destination_crashstorage
)
config.producer_consumer = DotDict()
mocked_producer_consumer = mock.Mock()
mocked_producer_consumer.id = 'mocked_producer_consumer'
config.producer_consumer.producer_consumer_class = mock.Mock(
return_value=mocked_producer_consumer
)
config.producer_consumer.number_of_threads = float(1)
config.new_crash_source = DotDict()
mocked_new_crash_source = mock.Mock()
mocked_new_crash_source.id = 'mocked_new_crash_source'
config.new_crash_source.new_crash_source_class = mock.Mock(
return_value=mocked_new_crash_source
)
config.submitter = DotDict()
config.submitter.delay = 0
config.submitter.dry_run = False
config.number_of_submissions = "all"
config.logger = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
sub = SubmitterApp(config)
assert sub.config == config
assert sub.config.logger == config.logger
def test_transform(self):
config = self.get_standard_config()
sub = SubmitterApp(config)
sub._setup_source_and_destination()
crash_id = '86b58ff2-9708-487d-bfc4-9dac32121214'
fake_raw_crash = DotDict()
mocked_get_raw_crash = mock.Mock(return_value=fake_raw_crash)
sub.source.get_raw_crash = mocked_get_raw_crash
fake_dump = {'upload_file_minidump': 'fake dump'}
mocked_get_raw_dumps_as_files = mock.Mock(return_value=fake_dump)
sub.source.get_raw_dumps_as_files = mocked_get_raw_dumps_as_files
sub.destination.save_raw_crash = mock.Mock()
sub.transform(crash_id)
sub.source.get_raw_crash.assert_called_with(crash_id)
sub.source.get_raw_dumps_as_files.assert_called_with(crash_id)
sub.destination.save_raw_crash_with_file_dumps.assert_called_with(
fake_raw_crash,
fake_dump,
crash_id
)
def test_source_iterator(self):
# Test with number of submissions equal to all
# It raises StopIterations after all the elements were called
config = self.get_standard_config()
config.number_of_submissions = "all"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
sub.source.new_crashes = lambda: iter([1, 2, 3])
itera = sub.source_iterator()
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
with pytest.raises(StopIteration):
itera.next()
# Test with number of submissions equal to forever
# It never raises StopIterations
config = self.get_standard_config()
config.number_of_submissions = "forever"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
sub.source.new_crashes = lambda: iter([1, 2, 3])
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
# Test with number of submissions equal to an integer > number of items
# It raises StopIterations after some number of elements were called
config = self.get_standard_config()
config.number_of_submissions = "5"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
sub.source.new_crashes = lambda: iter([1, 2, 3])
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
with pytest.raises(StopIteration):
itera.next()
# Test with number of submissions equal to an integer < number of items
# It raises StopIterations after some number of elements were called
config = self.get_standard_config()
config.number_of_submissions = "1"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
sub.source.new_crashes = lambda: iter([1, 2, 3])
assert itera.next() == ((1,), {})
with pytest.raises(StopIteration):
itera.next()
def test_new_crash_source_iterator(self):
# Test with number of submissions equal to all
# It raises StopIterations after all the elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "all"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([1, 2, 3])
itera = sub.source_iterator()
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
with pytest.raises(StopIteration):
itera.next()
# Test with number of submissions equal to forever
# It never raises StopIterations
config = self.get_new_crash_source_config()
config.number_of_submissions = "forever"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
# setup a fake iter using two form of the data to ensure it deals
# with both forms correctly.
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([1, ((2, ), {}), 3])
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
assert itera.next() == ((1,), {})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
# Test with number of submissions equal to an integer > number of items
# It raises StopIterations after some number of elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "5"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
def _iter():
return iter([((1, ), {'finished_func': (1,)}), 2, 3])
config.new_crash_source.new_crash_source_class.return_value.new_crashes = _iter
assert itera.next() == ((1,), {'finished_func': (1,)})
assert itera.next() == ((2,), {})
assert itera.next() == ((3,), {})
assert itera.next() == ((1,), {'finished_func': (1,)})
assert itera.next() == ((2,), {})
with pytest.raises(StopIteration):
itera.next()
# Test with number of submissions equal to an integer < number of items
# It raises StopIterations after some number of elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "1"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([1, 2, 3])
assert itera.next() == ((1,), {})
with pytest.raises(StopIteration):
itera.next()
# Test with number of submissions equal to an integer < number of items
# AND the new_crashes iter returning an args, kwargs form rather than
# than a crash_id
# It raises StopIterations after some number of elements were called
config = self.get_new_crash_source_config()
config.number_of_submissions = "2"
sub = SubmitterApp(config)
sub._setup_source_and_destination()
sub._setup_task_manager()
itera = sub.source_iterator()
item1 = (((1, ['./1.json', './1.dump', './1.other.dump']), ), {})
item2 = (((2, ['./2.json', './1.dump']), ), {})
config.new_crash_source.new_crash_source_class.return_value \
.new_crashes = lambda: iter([item1, item2])
assert itera.next() == item1
assert itera.next() == item2
with pytest.raises(StopIteration):
itera.next()
| mpl-2.0 | 7,488,123,512,661,564,000 | 34.63522 | 91 | 0.572597 | false |
jeanfeydy/lddmm-ot | LDDMM_Python/lddmm_python/modules/io/level_lines.py | 1 | 1478 | from numpy import *
from skimage.measure import find_contours
from scipy import misc
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import interp1d
from ..manifolds.curves import Curve
def arclength_param(line) :
vel = line[1:, :] - line[:-1, :]
vel = sqrt(sum( vel ** 2, 1 ))
return hstack( ( [0], cumsum( vel, 0 ) ) )
def arclength(line) :
return arclength_param(line)[-1]
def resample(line, npoints) :
s = arclength_param(line)
f = interp1d(s, line, kind = 'linear', axis = 0, assume_sorted = True)
t = linspace(0, s[-1], npoints)
p = f(t)
connec = vstack( (arange(0, len(p) - 1), arange(1, len(p)) ) ).T
return (p, connec)
def level_curves(fname, npoints, smoothing = 10, level = 0.5) :
# Find the contour lines
img = misc.imread(fname, flatten = True) # Grayscale
img = img.T[:, ::-1]
img = img / 255.
img = gaussian_filter(img, smoothing, mode='nearest')
lines = find_contours(img, level)
# Compute the sampling ratio
lengths = []
for line in lines :
lengths.append( arclength(line) )
lengths = array(lengths)
points_per_line = ceil( npoints * lengths / sum(lengths) )
# Interpolate accordingly
points = []
connec = []
index_offset = 0
for ppl, line in zip(points_per_line, lines) :
(p, c) = resample(line, ppl)
points.append(p)
connec.append(c + index_offset)
index_offset += len(p)
points = vstack(points)
connec = vstack(connec)
return Curve(points.ravel(), connec, 2) # Dimension 2 !
| mit | -2,916,201,283,628,472,300 | 25.854545 | 71 | 0.665538 | false |
danielelinaro/dynclamp | python/lcg/prc.py | 1 | 17634 | #!/usr/bin/env python
# Script to compute the PRC using LCG
#
import os
import sys
import numpy as np
import getopt
import subprocess as sub
import lcg
import time
from lcg.entities import (H5Recorder, LIFNeuron,
RealNeuron, Waveform,
Constant, ConstantFromFile,
SobolDelay,VariableDelayConnection,
EventCounter, FrequencyEstimator,
PeriodicTrigger, PID)
usage = '''Usage: {0} mode [option <value>]
Script to compute the PRC using direct or indirect methods.
The modes are:
steps - current steps are used to evoke repetitive firing
this is necessary when the cell does not fire spontaneously
fclamp - a PID controller is used to keep a constant firing rate
fclamp-sobol - the fclamp protocol is combined with the sobol sequence
allowing to optimally sample the phase space.
noise - a noisy current is injected to compute the PRC indirectly
General options:
-n number of repetitions (trials)
-i interval between repetitions
-I input channel
-O output channel
-F sampling frequency ({1} default)
-H baseline current
--dryrun
--model
Direct PRC options:
-a amplitude of the perturbation (150pA default; square wave)
-d duration of the perturbation (1ms default)
-f perturbation frequency (2.5 Hz or number (6) of spikes in Sobol mode)
STEPS mode:
-A amplitude of the step (default 200pA)
-D duration of the step (3s)
-S perturbation delay (1s default)
FCLAMP mode:
-P proportional gain
-G integrative gain
-T Frequency estimator time constant (0.1)
-t Target firing frequency (30Hz)
-D Duration of the trial (48s)
FCLAMP-SOBOL mode:
-D Number of stimulations per trial
-S offset of the sobol sequence so that we don't start
sampling always the same points
Indirect PRC; NOISE mode options:
-D duration of the noisy waveform (default 30s)
-A amplitude of the noise (0pA)
-S standard deviation of the noise (50pA)
-T time constant of the noise (20ms)
Examples:
lcg-prc fclamp-sobol -D 20 -f 20 -n 1 --model --no-kernel -H 220 -t 20 -n 10
lcg-prc fclamp -D 20 -f 2.5 -n 10 --model --no-kernel -H 220 -t 40 -n 10
lcg-prc noise -n 2 -t 60 -A 200 --model --no-kernel
'''.format(os.path.basename(sys.argv[0]), os.environ['SAMPLING_RATE'])
env = lambda x:os.environ[x]
def parse_prc_options(mode,opts):
defaults = {'ai':env('AI_CHANNEL'),
'ao':env('AI_CHANNEL'),
'holding':0.0,
'ntrials':10,
'tpre':1,
'tpost':1,
'intreps':0.05,
'srate':env('SAMPLING_RATE'),
'dryrun':False,
'kernel':True,
'model':False}
if mode in ('steps','fclamp','fclamp-sobol'):
defaults.update({'pert_amp':150,
'pert_dur':1,
'pert_waveform':'dc',
'pert_freq':2.5,
})
if mode == 'steps':
defaults.update({'step_amp':200,
'step_dur':3,
'intreps':5,
'ntrials':150,
'step_pdelay':1})
if 'fclamp' in mode:
defaults.update({'gi':0.001,
'gp':0.1,
'gd':0,
'Ftau':0.1,
'trial_dur':48,
'target_freq':30,
'ntrials':10,
'intreps':0})
if 'sobol' in mode :
defaults.update({'sobol_offset':0,
'pert_freq':6})
else:
defaults.update({'ou_amp':0,
'ou_std':50,
'ou_tau':20,
'ou_dur':30,
'ntrials':1})
options = defaults.copy()
for o,a in opts:
if o == '-I':
options['ai'] = [int(i) for i in a.split(',')]
elif o == '-O':
options['ao'] = [int(i) for i in a.split(',')]
elif o == '-F':
options['srate'] = [float(a)]
elif o == '-H':
options['holding'] = float(a)
elif o == '-i':
options['intreps'] = float(a)
elif o == '-n':
options['ntrials'] = int(a)
elif o == '-a':
options['pert_amp'] = float(a)
elif o == '-d':
options['pert_dur'] = float(a)
elif o == '-f':
options['pert_freq'] = float(a)
elif o == '-t':
options['target_freq'] = float(a)
elif o == '-A':
if mode == 'steps':
options['step_amp'] = float(a)
else :
options['ou_amp'] = float(a)
elif o == '-D':
if mode == 'steps':
options['step_dur'] = float(a)
elif 'fclamp' in mode:
options['trial_dur'] = float(a)
else:
options['ou_dur'] = float(a)
elif o == '-S':
if mode == 'steps':
options['step_pdelay'] = float(a)
elif 'sobol' in mode:
options['sobol_offset'] = float(a)
else:
options['ou_std'] = float(a)
elif o == '-T':
if 'fclamp' in mode:
options['Ftau'] = float(a)
else:
options['ou_tau'] = float(a)
elif o == '-P':
options['gp'] = float(a)
elif o == '-G':
options['gi'] = float(a)
elif o == '--model':
options['model'] = True
elif o == '--no-kernel':
options['kernel'] = False
elif o == '--dryrun':
options['dryrun'] = True
elif o in ['help','--help','-h']:
print(usage)
sys.exit(1)
else:
print('Unknown option...')
print(usage)
sys.exit(1)
print options
return options
modes = ['steps','fclamp','fclamp-sobol','noise']
switches = 'hn:i:I:O:F:H:a:d:f:t:A:D:P:G:T:S:'
long_switches = ['help','no-kernel','dryrun','model']
def build_steps_config(opts, config,lastValueFilename):
# Insert periodic trigger
baselineName = 'baseline.stim'
config.add_entity(PeriodicTrigger(3, (2),
delay = opts['step_pdelay'],
tend = opts['step_dur'] + opts['tpre'],
frequency = opts['pert_freq']))
config.add_entity(Waveform(4,(0,1),filename=baselineName, units = 'pA'))
sys.argv = ['lcg-stimgen','-o',baselineName,
'dc', '-d',str(opts['tpre']),'--','0',
'dc', '-d',str(opts['step_dur']),'--',str(opts['step_amp']),
'dc', '-d',str(opts['tpost']),'--','0']
lcg.stimgen.main()
# Holding current (use a ConstantFromFile)
config.add_entity(ConstantFromFile(id=len(config._entities),
connections=[0,1],
filename=lastValueFilename,
units='pA'))
config.set_tend(opts['tpre']+opts['step_dur']+opts['tpost'])
def build_fclamp_config(opts, config,lastValueFilename):
config.add_entity(PeriodicTrigger(3, (2),
frequency = opts['pert_freq']))
config.add_entity(PID(4,(0,1),
baseline=0.0,
gi=opts['gi'],
gp=opts['gp'],
gd=opts['gd']))
config.add_entity(Constant(id=5,
connections=[0,4],
value=opts['target_freq'],
units='Hz'))
config.add_entity(FrequencyEstimator(6,(0,4),
tau = opts['Ftau'],
initial_frequency= opts['target_freq']))
config.add_entity(ConstantFromFile(id=len(config._entities),
connections=[0,1],
filename=lastValueFilename,
units='pA'))
# Fix RealNeuronConnections
config._add_connections(config._entities[1],[6])
config.set_tend(opts['trial_dur'])
def build_fclamp_sobol_config(opts, config, lastValueFilename):
config.add_entity(PID(3,(0,1),
baseline=0.0,
gi=opts['gi'],
gp=opts['gp'],
gd=opts['gd']))
config.add_entity(Constant(id=4,
connections=[0,3],
value=opts['target_freq'],
units='Hz'))
config.add_entity(FrequencyEstimator(5,(0,3,6),
tau = opts['Ftau'],
initial_frequency= opts['target_freq']))
config.add_entity(SobolDelay(6,connections=(7),start_sample=opts['sobol_offset']))
config.add_entity(VariableDelayConnection(7,connections=[2]))
config.add_entity(EventCounter(8,connections=[7],
max_count=3,auto_reset=False))
config.add_entity(EventCounter(9,connections=[3,5],
max_count=2,event_to_send='TOGGLE',
auto_reset=False))
config.add_entity(EventCounter(10,connections=[3,5],
max_count=5,event_to_send='TOGGLE',
auto_reset=False))
config.add_entity(EventCounter(11,connections=[8,9,10,12],
max_count=opts['pert_freq'],event_to_send='RESET',
auto_reset=True))
config.add_entity(EventCounter(12,connections=[],
max_count=opts['trial_dur'],event_to_count='RESET',
event_to_send='STOPRUN',
auto_reset=True))
config.add_entity(ConstantFromFile(id=len(config._entities),
connections=[0,1],
filename=lastValueFilename,
units='pA'))
# Fix RealNeuronConnections
config._add_connections(config._entities[1],[5,8,9,10,11])
config.set_tend(opts['trial_dur'])
def build_noise_config(opts, config, waveformName,lastValueFilename):
config.add_entity(Waveform(id=2,
connections=[0,1],
filename=waveformName,
units='pA'))
sys.argv = ['lcg-stimgen','-o',waveformName,
'dc', '-d',str(opts['tpre']),'--','0',
'ou', '-d',str(opts['ou_dur']),'--',
str(opts['ou_amp']),str(opts['ou_std']),str(opts['ou_tau']),
'dc', '-d',str(opts['tpost']),'--','0']
lcg.stimgen.main()
config.add_entity(ConstantFromFile(id=len(config._entities),
connections=[0,1],
filename=lastValueFilename,
units='pA'))
config.set_tend(opts['tpre']+opts['ou_dur']+opts['tpost'])
def run_kernel(opts):
kernel_cmd = ('lcg-kernel -I {0} -O {1} -F {2} -H {3}'.format(opts['ai'],
opts['ao'],
opts['srate'],
opts['holding']))
print('''
Going to run the kernel protocol.
The cell should not be firing spontaneous.
-> Use the amplifier to hyperpolarize the cell if necessary and press [ENTER].
''')
raw_input()
if opts['dryrun'] or not opts['model'] is None:
print(kernel_cmd)
else:
sub.call(kernel_cmd,shell=True)
print('''
-> Release hyperpolarization and press [ENTER].
''')
raw_input()
def insert_model(config,holding,lastValueFilename):
config.add_entity(LIFNeuron(1, (0), C=0.08, tau=0.0075,
tarp=0.0014, Er=-65.2,
E0=-70, Vth=-50, Iext=0,
holdLastValue=True,
holdLastValueFilename=lastValueFilename))
def hold_cell(opts,lastValueFilename):
print('Holding the cell with {0} pA'.format(opts['holding']))
if opts['model']:
with open(lastValueFilename,'w') as fd:
fd.write('%e'%(opts['holding']))
return ''
else:
# Then use a RealNeuron to inject into the cell
tmp = lcg.XMLConfigurationFile(opts['srate'], 0.01)
insertRealNeuron(tmp, opts, [], lastValueFilename)
tmp.add_entity(Constant(id=2,
connections=[1],
value=opts['holding'],
units='pA'))
tmpName = 'holdNeuron.xml'
tmp.write(tmpName)
return 'lcg-experiment -c {0}'.format(tmpName)
def insertRealNeuron(config, opts, connections,lastValueFilename):
config.add_entity(RealNeuron(id=1,
connections=connections,
spikeThreshold=-20,
V0=-65,
deviceFile=env('COMEDI_DEVICE'),
inputSubdevice=env('AI_SUBDEVICE'),
outputSubdevice=env('AO_SUBDEVICE'),
readChannel=opts['ai'],
writeChannel=opts['ao'],
inputConversionFactor=env('AI_CONVERSION_FACTOR_CC'),
outputConversionFactor=env('AO_CONVERSION_FACTOR_CC'),
inputRange=env('RANGE'),
reference=env('GROUND_REFERENCE'),
holdLastValue=True,
holdLastValueFilename=lastValueFilename,
kernelFile='kernel.dat'))
def main():
if len(sys.argv) < 2 or sys.argv[1] in ('-h','--help','help'):
print(usage)
sys.exit(0)
mode = sys.argv[1]
if not mode in modes:
print('''Unknown mode: {0}; try instead
one of the following: {1}'''.format(mode,
', '.join(modes)))
sys.exit(1)
try:
opts,args = getopt.getopt(sys.argv[2:], switches,long_switches)
except getopt.GetoptError, err:
print(err)
print(usage)
sys.exit(1)
opts = parse_prc_options(mode,opts)
configName = 'prc.xml'
waveformName = 'prc.stim'
lastValueFilename = 'prc.lastValue'
hold_time = 3
duration = 0
config = lcg.XMLConfigurationFile(opts['srate'], duration)
config.add_entity(H5Recorder(id=0,
connections=[],
compress=True))
if opts['model']:
insert_model(config,opts['holding'],lastValueFilename)
else:
insertRealNeuron(config, opts, [0], lastValueFilename)
# Perturbation waveform
if not mode == 'noise':
config.add_entity(Waveform(id=2,
connections=[0,1],
filename=waveformName,
triggered=True,
units='pA'))
sys.argv = ['lcg-stimgen','-o',waveformName,
opts['pert_waveform'],
'-d',str(opts['pert_dur']*1e-3),
'--',str(opts['pert_amp']),
'dc','-d','0.0001','0']
lcg.stimgen.main()
if mode == 'steps':
build_steps_config(opts, config,lastValueFilename)
elif mode == 'fclamp':
build_fclamp_config(opts, config,lastValueFilename)
elif mode == 'fclamp-sobol':
build_fclamp_sobol_config(opts, config,lastValueFilename)
else: # Then it is for the WSTA
build_noise_config(opts, config, waveformName,lastValueFilename)
# Run or dry-run?
trial_cmd = 'lcg-experiment -c {0}'.format(configName)
if opts['dryrun']:
run = lambda x: sys.stdout.write(x+'\n')
else:
run = lambda x: sub.call(x, shell=True)
# Initialization
if opts['kernel'] and opts['model'] is None:
run_kernel(opts)
run(hold_cell(opts,lastValueFilename))
if not opts['dryrun'] and not opts['holding'] == 0:
print('Sleeping {0}...'.format(hold_time))
time.sleep(hold_time)
print('Going to run {0} trials'.format(opts['ntrials']))
for ii in range(opts['ntrials']):
if 'sobol' in mode:
startSample = opts['sobol_offset'] + int(ii*opts['trial_dur'])
# This should be the SobolDelay entity; parameter StartSample
# Otherwise things are going to go wrong.
config._entities[6][2][0].text = str(startSample)
config.write(configName)
run(trial_cmd)
time.sleep(opts['intreps'])
if __name__ == '__main__':
main()
| gpl-3.0 | 9,198,043,673,436,689,000 | 39.352403 | 87 | 0.476806 | false |
intelxed/xed | pysrc/enc2gen.py | 1 | 194883 | #!/usr/bin/env python3
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
# This is the "fast" encoder generator known as "enc2".
from __future__ import print_function
import os
import sys
import copy
import re
import argparse
import itertools
import collections
import traceback
import find_dir # finds mbuild and adds it to sys.path
import mbuild
import codegen
import read_xed_db
import gen_setup
import enc2test
import enc2argcheck
from enc2common import *
def get_fname(depth=1): # default is current caller
#return sys._getframe(depth).f_code.co_name
return traceback.extract_stack(None, depth+1)[0][2]
gpr_nt_widths_dict = {}
# list indexed by OSZ (o16,o32,o64)
gpr_nt_widths_dict['GPRv_SB'] = [16,32,64]
gpr_nt_widths_dict['GPRv_R'] = [16,32,64]
gpr_nt_widths_dict['GPRv_B'] = [16,32,64]
gpr_nt_widths_dict['GPRz_R'] = [16,32,32]
gpr_nt_widths_dict['GPRz_B'] = [16,32,32]
gpr_nt_widths_dict['GPRy_R'] = [32,32,64]
gpr_nt_widths_dict['GPRy_B'] = [32,32,64]
gpr_nt_widths_dict['GPR8_R'] = [8,8,8]
gpr_nt_widths_dict['GPR8_B'] = [8,8,8]
gpr_nt_widths_dict['GPR8_SB'] = [8,8,8]
gpr_nt_widths_dict['GPR16_R'] = [16,16,16]
gpr_nt_widths_dict['GPR16_B'] = [16,16,16]
gpr_nt_widths_dict['GPR32_B'] = [32,32,32]
gpr_nt_widths_dict['GPR32_R'] = [32,32,32]
gpr_nt_widths_dict['GPR64_B'] = [64,64,64]
gpr_nt_widths_dict['GPR64_R'] = [64,64,64]
gpr_nt_widths_dict['VGPR32_B'] = [32,32,32]
gpr_nt_widths_dict['VGPR32_R'] = [32,32,32]
gpr_nt_widths_dict['VGPR32_N'] = [32,32,32]
gpr_nt_widths_dict['VGPRy_N'] = [32,32,64]
gpr_nt_widths_dict['VGPR64_B'] = [64,64,64]
gpr_nt_widths_dict['VGPR64_R'] = [64,64,64]
gpr_nt_widths_dict['VGPR64_N'] = [64,64,64]
gpr_nt_widths_dict['A_GPR_R' ] = 'ASZ-SIZED-GPR' # SPECIAL
gpr_nt_widths_dict['A_GPR_B' ] = 'ASZ-SIZED-GPR'
# everything else is not typically used in scalable way. look at other
# operand.
oc2_widths_dict = {}
oc2_widths_dict['v'] = [16,32,64]
oc2_widths_dict['y'] = [32,32,64]
oc2_widths_dict['z'] = [16,32,32]
oc2_widths_dict['b'] = [8,8,8]
oc2_widths_dict['w'] = [16,16,16]
oc2_widths_dict['d'] = [32,32,32]
oc2_widths_dict['q'] = [64,64,64]
enc_fn_prefix = "xed_enc"
arg_reg_type = 'xed_reg_enum_t '
var_base = 'base'
arg_base = 'xed_reg_enum_t ' + var_base
var_index = 'index'
arg_index = 'xed_reg_enum_t ' + var_index
var_indexx = 'index_xmm'
arg_indexx = 'xed_reg_enum_t ' + var_indexx
var_indexy = 'index_ymm'
arg_indexy = 'xed_reg_enum_t ' + var_indexy
var_indexz = 'index_zmm'
arg_indexz = 'xed_reg_enum_t ' + var_indexz
var_vsib_index_dct = { 'xmm': var_indexx,
'ymm': var_indexy,
'zmm': var_indexz }
var_scale = 'scale'
arg_scale = 'xed_uint_t ' + var_scale
var_disp8 = 'disp8'
arg_disp8 = 'xed_int8_t ' + var_disp8
var_disp16 = 'disp16'
arg_disp16 = 'xed_int16_t ' + var_disp16
var_disp32 = 'disp32'
arg_disp32 = 'xed_int32_t ' + var_disp32
var_disp64 = 'disp64'
arg_disp64 = 'xed_int64_t ' + var_disp64
var_request = 'r'
arg_request = 'xed_enc2_req_t* ' + var_request
var_reg0 = 'reg0'
arg_reg0 = 'xed_reg_enum_t ' + var_reg0
var_reg1 = 'reg1'
arg_reg1 = 'xed_reg_enum_t ' + var_reg1
var_reg2 = 'reg2'
arg_reg2 = 'xed_reg_enum_t ' + var_reg2
var_reg3 = 'reg3'
arg_reg3 = 'xed_reg_enum_t ' + var_reg3
var_reg4 = 'reg4'
arg_reg4 = 'xed_reg_enum_t ' + var_reg4
var_kmask = 'kmask'
arg_kmask = 'xed_reg_enum_t ' + var_kmask
var_kreg0 = 'kreg0'
arg_kreg0 = 'xed_reg_enum_t ' + var_kreg0
var_kreg1 = 'kreg1'
arg_kreg1 = 'xed_reg_enum_t ' + var_kreg1
var_kreg2 = 'kreg2'
arg_kreg2 = 'xed_reg_enum_t ' + var_kreg2
var_rcsae = 'rcsae'
arg_rcsae = 'xed_uint_t ' + var_rcsae
var_zeroing = 'zeroing'
arg_zeroing = 'xed_bool_t ' + var_zeroing
var_imm8 = 'imm8'
arg_imm8 = 'xed_uint8_t ' + var_imm8
var_imm8_2 = 'imm8_2'
arg_imm8_2 = 'xed_uint8_t ' + var_imm8_2
var_imm16 = 'imm16'
arg_imm16 = 'xed_uint16_t ' + var_imm16
var_imm16_2 = 'imm16_2'
arg_imm16_2 = 'xed_uint16_t ' + var_imm16_2
var_imm32 = 'imm32'
arg_imm32 = 'xed_uint32_t ' + var_imm32
var_imm64 = 'imm64'
arg_imm64 = 'xed_uint64_t ' + var_imm64
def special_index_cases(ii):
if ii.avx512_vsib or ii.avx_vsib or ii.sibmem:
return True
return False
# if I wanted to prune the number of memory variants, I could set
# index_vals to just [True].
index_vals = [False,True]
def get_index_vals(ii):
global index_vals
if special_index_cases(ii):
return [True]
return index_vals
gprv_index_names = { 16:'gpr16_index', 32:'gpr32_index', 64:'gpr64_index'}
gprv_names = { 8:'gpr8', 16:'gpr16', 32:'gpr32', 64:'gpr64'} # added gpr8 for convenience
gpry_names = { 16:'gpr32', 32:'gpr32', 64:'gpr64'}
gprz_names = { 16:'gpr16', 32:'gpr32', 64:'gpr32'}
vl2names = { '128':'xmm', '256':'ymm', '512':'zmm',
'LIG':'xmm', 'LLIG':'xmm' }
vl2func_names = { '128':'128', '256':'256', '512':'512',
'LIG':'', 'LLIG':'' }
bits_to_widths = {8:'b', 16:'w', 32:'d', 64:'q' }
arg_immz_dct = { 0: '', 8: arg_imm8, 16: arg_imm16, 32: arg_imm32, 64: arg_imm32 }
var_immz_dct = { 0: '', 8: var_imm8, 16: var_imm16, 32: var_imm32, 64: var_imm32 }
arg_immz_meta = { 0: '', 8:'int8', 16: 'int16', 32: 'int32', 64: 'int32' }
arg_immv_dct = { 0: '', 8: arg_imm8, 16: arg_imm16, 32: arg_imm32, 64: arg_imm64 }
var_immv_dct = { 0: '', 8: var_imm8, 16: var_imm16, 32: var_imm32, 64: var_imm64 }
arg_immv_meta = { 0: '', 8:'int8', 16: 'int16', 32: 'int32', 64: 'int64' }
arg_dispv = { 8: arg_disp8, 16: arg_disp16, 32: arg_disp32, 64: arg_disp64 } # index by dispsz
var_dispv = { 8: arg_disp8, 16:var_disp16, 32:var_disp32, 64:var_disp64 }
arg_dispz = { 16: arg_disp16, 32: arg_disp32, 64: arg_disp32 } # index by dispsz
tag_dispz = { 16: 'int16', 32: 'int32', 64: 'int32' } # index by dispsz
var_dispz = { 16:var_disp16, 32:var_disp32, 64:var_disp32 }
arg_dispv_meta = { 8:'int8', 16:'int16', 32:'int32', 64:'int64' }
widths_to_bits = {'b':8, 'w':16, 'd':32, 'q':64 }
widths_to_bits_y = {'w':32, 'd':32, 'q':64 }
widths_to_bits_z = {'w':16, 'd':32, 'q':32 }
# if I cut the number of displacements by removing 0, I would have to
# add some sort of gizmo to omit the displacent if the value of the
# displacement is 0, but then that creates a problem for people who
# what zero displacements for patching. I could also consider merging
# disp8 and disp16/32 and then chose the smallest displacement that
# fits, but that also takes away control from the user.
def get_dispsz_list(env):
return [0,8,16] if env.asz == 16 else [0,8,32]
def get_osz_list(env):
return [16,32,64] if env.mode == 64 else [16,32]
_modvals = { 0: 0, 8: 1, 16: 2, 32: 2 } # index by dispsz
def get_modval(dispsz):
global _modvals
return _modvals[dispsz]
def _gen_opnds(ii): # generator
# filter out write-mask operands and suppressed operands
for op in ii.parsed_operands:
if op.lookupfn_name in [ 'MASK1', 'MASKNOT0']:
continue
if op.visibility == 'SUPPRESSED':
continue
if op.name == 'BCAST':
continue
yield op
def _gen_opnds_nomem(ii): # generator
# filter out write-mask operands and suppressed operands and memops
for op in ii.parsed_operands:
if op.name.startswith('MEM'):
continue
if op.lookupfn_name == 'MASK1':
continue
if op.lookupfn_name == 'MASKNOT0':
continue
if op.visibility == 'SUPPRESSED':
continue
if op.name == 'BCAST':
continue
yield op
def first_opnd(ii):
op = next(_gen_opnds(ii))
return op
def first_opnd_nonmem(ii):
op = next(_gen_opnds_nomem(ii))
return op
#def second_opnd(ii):
# for i,op in enumerate(_gen_opnds(ii)):
# if i==1:
# return op
def op_mask_reg(op):
return op_luf_start(op,'MASK')
def op_masknot0(op):
return op_luf_start(op,'MASKNOT0')
def op_scalable_v(op):
if op_luf_start(op,'GPRv'):
return True
if op.oc2 == 'v':
return True
return False
def op_gpr8(op):
if op_luf_start(op,'GPR8'):
return True
if op_reg(op) and op.oc2 == 'b':
return True
return False
def op_gpr16(op):
if op_luf_start(op,'GPR16'):
return True
if op_reg(op) and op.oc2 == 'w':
return True
return False
def op_seg(op):
return op_luf_start(op,'SEG')
def op_cr(op):
return op_luf_start(op,'CR')
def op_dr(op):
return op_luf_start(op,'DR')
def op_gprz(op):
return op_luf_start(op,'GPRz')
def op_gprv(op):
return op_luf_start(op,'GPRv')
def op_gpry(op):
return op_luf_start(op,'GPRy')
def op_vgpr32(op):
return op_luf_start(op,'VGPR32')
def op_vgpr64(op):
return op_luf_start(op,'VGPR64')
def op_gpr32(op):
return op_luf_start(op,'GPR32')
def op_gpr64(op):
return op_luf_start(op,'GPR64')
def op_ptr(op):
if 'PTR' in op.name:
return True
return False
def op_reg(op):
if 'REG' in op.name:
return True
return False
def op_mem(op):
if 'MEM' in op.name:
return True
return False
def op_agen(op): # LEA
if 'AGEN' in op.name:
return True
return False
def op_tmm(op):
if op.lookupfn_name:
if 'TMM' in op.lookupfn_name:
return True
return False
def op_xmm(op):
if op.lookupfn_name:
if 'XMM' in op.lookupfn_name:
return True
return False
def op_ymm(op):
if op.lookupfn_name:
if 'YMM' in op.lookupfn_name:
return True
return False
def op_zmm(op):
if op.lookupfn_name:
if 'ZMM' in op.lookupfn_name:
return True
return False
def op_mmx(op):
if op.lookupfn_name:
if 'MMX' in op.lookupfn_name:
return True
return False
def op_x87(op):
if op.lookupfn_name:
if 'X87' in op.lookupfn_name:
return True
elif (op.name.startswith('REG') and
op.lookupfn_name == None and
re.match(r'XED_REG_ST[0-7]',op.bits) ):
return True
return False
def one_scalable_gpr_and_one_mem(ii): # allows optional imm8,immz, one implicit specific reg
implicit,n,r,i = 0,0,0,0
for op in _gen_opnds(ii):
if op_mem(op):
n += 1
elif op_reg(op) and op_implicit_specific_reg(op):
implicit += 1
elif op_gprv(op): #or op_gpry(op):
r += 1
elif op_imm8(op) or op_immz(op):
i += 1
else:
return False
return n==1 and r==1 and i<=1 and implicit <= 1
def one_gpr_reg_one_mem_scalable(ii):
n,r = 0,0
for op in _gen_opnds(ii):
if op_agen(op) or (op_mem(op) and op.oc2 in ['v']):
n += 1
elif op_gprv(op):
r += 1
else:
return False
return n==1 and r==1
def one_gpr_reg_one_mem_zp(ii):
n,r = 0,0
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in ['p','z']:
n += 1
elif op_gprz(op):
r += 1
else:
return False
return n==1 and r==1
def one_gpr_reg_one_mem_fixed(ii):
n,r = 0,0
for op in _gen_opnds(ii):
# FIXME: sloppy could bemixing b and d operands, for example
if op_mem(op) and op.oc2 in ['b', 'w', 'd', 'q','dq']:
n += 1
elif op_gpr8(op) or op_gpr16(op) or op_gpr32(op) or op_gpr64(op):
r += 1
else:
return False
return n==1 and r==1
simd_widths = ['b','w','xud', 'qq', 'dq', 'q', 'ps','pd', 'ss', 'sd', 'd', 'm384', 'm512', 'xuq', 'zd']
def one_xmm_reg_one_mem_fixed_opti8(ii): # allows gpr32, gpr64, mmx too
global simd_widths
i,r,n=0,0,0
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in simd_widths:
n = n + 1
elif (op_xmm(op) or op_mmx(op) or op_gpr32(op) or op_gpr64(op)) and op.oc2 in simd_widths:
r = r + 1
elif op_imm8(op):
i = i + 1
else:
return False
return n==1 and r==1 and i<=1
def one_mem_common(ii): # b,w,d,q,dq, v, y, etc.
n = 0
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in ['b','w','d','q','dq','v', 'y', 's',
'mem14','mem28','mem94','mem108',
'mxsave', 'mprefetch',
'mem16', 's64', 'mfpxenv',
'm384', 'm512' ]:
n = n + 1
else:
return False
return n==1
def is_gather_prefetch(ii):
if 'GATHER' in ii.attributes:
if 'PREFETCH' in ii.attributes:
return True
return False
def is_far_xfer_mem(ii):
if 'FAR_XFER' in ii.attributes:
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in ['p','p2']:
return True
return False
def is_far_xfer_nonmem(ii):
p,i=0,0
if 'FAR_XFER' in ii.attributes:
for op in _gen_opnds(ii):
if op_ptr(op):
p =+ 1
elif op_imm16(op):
i += 1
else:
return False
return True
return i==1 and p==1
def op_reg_invalid(op):
if op.bits and op.bits != '1':
if op.bits == 'XED_REG_INVALID':
return True
return False
def one_mem_common_one_implicit_gpr(ii):
'''memop can be b,w,d,q,dq, v, y, etc. with
GPR8 or GPRv'''
n,g = 0,0
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in ['b','w','d','q','dq','v', 'y',
'mem14','mem28','mem94','mem108',
'mxsave', 'mprefetch' ]:
n += 1
elif op_reg(op) and op_implicit(op) and not op_reg_invalid(op):
# FIXME: could improve the accuracy by enforcing GPR. but
# not sure if that is strictly necessary. Encoding works...
g += 1
else:
return False
return n==1 and g==1
def one_mem_fixed_imm8(ii): # b,w,d,q,dq, etc.
n = 0
i = 0
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in ['b','w','d','q','dq', 'v', 'y',
'mem14','mem28','mem94','mem108']:
n = n + 1
elif op_imm8(op):
i = i + 1
else:
return False
return n==1 and i==1
def one_mem_fixed_immz(ii): # b,w,d,q,dq, etc.
n = 0
i = 0
for op in _gen_opnds(ii):
if op_mem(op) and op.oc2 in ['b','w','d','q','dq', 'v', 'y',
'mem14','mem28','mem94','mem108']:
n = n + 1
elif op_immz(op):
i = i + 1
else:
return False
return n==1 and i==1
def two_gpr_one_scalable_one_fixed(ii):
f,v = 0,0
for op in _gen_opnds(ii):
if op_reg(op) and op_scalable_v(op):
v += 1
elif op_reg(op) and (op_gpr8(op) or op_gpr16(op) or op_gpr32(op)):
f += 1
else:
return False
return v==1 and f==1
def two_scalable_regs(ii): # allow optional imm8, immz, allow one implicit GPR
n,i,implicit = 0,0,0
for op in _gen_opnds(ii):
if op_reg(op) and op_scalable_v(op):
n += 1
elif op_reg(op) and op_implicit_specific_reg(op):
implicit += 1
elif op_imm8(op) or op_immz(op):
i += 1
else:
return False
return n==2 and i <= 1 and implicit <= 1
def op_implicit(op):
return op.visibility == 'IMPLICIT'
def op_implicit_or_suppressed(op):
return op.visibility in ['IMPLICIT','SUPPRESSED']
def one_x87_reg(ii):
n = 0
for op in _gen_opnds(ii):
if op_reg(op) and op_x87(op) and not op_implicit(op):
n = n + 1
else:
return False
return n==1
def two_x87_reg(ii): # one implicit
n = 0
implicit = 0
for op in _gen_opnds(ii):
if op_reg(op) and op_x87(op):
n = n + 1
if op_implicit(op):
implicit = implicit + 1
else:
return False
return n==2 and implicit == 1
def one_x87_implicit_reg_one_memop(ii):
mem,implicit_reg = 0,0
for op in _gen_opnds(ii):
if op_reg(op) and op_x87(op):
if op_implicit(op):
implicit_reg = implicit_reg + 1
else:
return False
elif op_mem(op):
mem = mem + 1
else:
return False
return mem==1 and implicit_reg==1
def zero_operands(ii):# allow all implicit regs
n = 0
for op in _gen_opnds(ii):
if op_implicit(op):
continue
n = n + 1
return n == 0
def one_implicit_gpr_imm8(ii):
'''this allows implicit operands'''
n = 0
for op in _gen_opnds(ii):
if op_imm8(op):
n = n + 1
elif op_implicit(op):
continue
else:
return False
return n == 1
def op_implicit_specific_reg(op):
if op.name.startswith('REG'):
if op.bits and op.bits.startswith('XED_REG_'):
return True
return False
def one_gprv_one_implicit(ii):
n,implicit = 0,0
for op in _gen_opnds(ii):
if op_gprv(op):
n += 1
elif op_implicit_specific_reg(op):
implicit += 1
else:
return False
return n == 1 and implicit == 1
def one_gpr8_one_implicit(ii):
n,implicit = 0,0
for op in _gen_opnds(ii):
if op_gpr8(op):
n += 1
elif op_implicit_specific_reg(op):
implicit += 1
else:
return False
return n == 1 and implicit == 1
def one_nonmem_operand(ii):
n = 0
for op in _gen_opnds(ii):
if op_mem(op):
return False
if op_implicit_or_suppressed(op): # for RCL/ROR etc with implicit imm8
continue
n = n + 1
return n == 1
def two_gpr8_regs(ii):
n = 0
for op in _gen_opnds(ii):
if op_reg(op) and op_gpr8(op):
n = n + 1
else:
return False
return n==2
def op_immz(op):
if op.name == 'IMM0':
if op.oc2 == 'z':
return True
return False
def op_immv(op):
if op.name == 'IMM0':
if op.oc2 == 'v':
return True
return False
def op_imm8(op):
if op.name == 'IMM0':
if op.oc2 == 'b':
if op_implicit_or_suppressed(op):
return False
return True
return False
def op_imm16(op):
if op.name == 'IMM0':
if op.oc2 == 'w':
return True
return False
def op_imm8_2(op):
if op.name == 'IMM1':
if op.oc2 == 'b':
return True
return False
def one_mmx_reg_imm8(ii):
n = 0
for i,op in enumerate(_gen_opnds(ii)):
if op_reg(op) and op_mmx(op):
n = n + 1
elif i == 1 and op_imm8(op):
continue
else:
return False
return n==1
def one_xmm_reg_imm8(ii): # also allows SSE4 2-imm8 instr
i,j,n=0,0,0
for op in _gen_opnds(ii):
if op_reg(op) and op_xmm(op):
n += 1
elif op_imm8(op):
i += 1
elif op_imm8_2(op):
j += 1
else:
return False
return n==1 and i==1 and j<=1
def two_xmm_regs_imm8(ii):
n = 0
for i,op in enumerate(_gen_opnds(ii)):
if op_reg(op) and op_xmm(op):
n = n + 1
elif i == 2 and op_imm8(op):
continue
else:
return False
return n==2
def gen_osz_list(mode, osz_list):
"""skip osz 64 outside of 64b mode"""
for osz in osz_list:
if mode != 64 and osz == 64:
continue
yield osz
def modrm_reg_first_operand(ii):
op = first_opnd(ii)
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
return True
if op.lookupfn_name.startswith('SEG'):
return True
return False
def emit_required_legacy_prefixes(ii,fo):
if ii.iclass.endswith('_LOCK'):
fo.add_code_eol('emit(r,0xF0)')
if ii.f2_required:
fo.add_code_eol('emit(r,0xF2)', 'required by instr')
if ii.f3_required:
fo.add_code_eol('emit(r,0xF3)', 'required by instr')
if ii.osz_required:
fo.add_code_eol('emit(r,0x66)', 'required by instr')
def emit_67_prefix(fo):
fo.add_code_eol('emit(r,0x67)', 'change EASZ')
def emit_required_legacy_map_escapes(ii,fo):
if ii.map == 1:
fo.add_code_eol('emit(r,0x0F)', 'escape map 1')
elif ii.map == 2:
fo.add_code_eol('emit(r,0x0F)', 'escape map 2')
fo.add_code_eol('emit(r,0x38)', 'escape map 2')
elif ii.map == 3:
fo.add_code_eol('emit(r,0x0F)', 'escape map 3')
fo.add_code_eol('emit(r,0x3A)', 'escape map 3')
elif ii.amd_3dnow_opcode:
fo.add_code_eol('emit(r,0x0F)', 'escape map 3dNOW')
fo.add_code_eol('emit(r,0x0F)', 'escape map 3dNOW')
def get_implicit_operand_name(op):
if op_implicit(op):
if op.name.startswith('REG'):
if op.bits and op.bits.startswith('XED_REG_'):
reg_name = re.sub('XED_REG_','',op.bits).lower()
return reg_name
elif op.lookupfn_name:
ntluf = op.lookupfn_name
return ntluf
elif op.name == 'IMM0' and op.type == 'imm_const' and op.bits == '1':
return 'one'
die("Unhandled implicit operand {}".format(op))
return None
def _gather_implicit_regs(ii):
names = []
for op in _gen_opnds(ii):
nm = get_implicit_operand_name(op)
if nm:
names.append(nm)
return names
def _implicit_reg_names(ii):
extra_names = _gather_implicit_regs(ii)
if extra_names:
extra_names = '_' + '_'.join( extra_names )
else:
extra_names = ''
return extra_names
def emit_vex_prefix(env, ii, fo, register_only=False):
if ii.map == 1 and ii.rexw_prefix != '1':
# if any of x,b are set, need c4, else can use c5
# performance: we know statically if something is register
# only. In which case, we can avoid testing rexx.
if env.mode == 64:
if register_only:
fo.add_code('if (get_rexb(r))')
else:
fo.add_code('if (get_rexx(r) || get_rexb(r))')
fo.add_code_eol(' emit_vex_c4(r)')
fo.add_code('else')
fo.add_code_eol(' emit_vex_c5(r)')
else:
fo.add_code_eol('emit_vex_c5(r)')
else:
fo.add_code_eol('emit_vex_c4(r)')
def emit_opcode(ii,fo):
if ii.amd_3dnow_opcode:
return # handled later. See add_enc_func()
opcode = "0x{:02X}".format(ii.opcode_base10)
fo.add_code_eol('emit(r,{})'.format(opcode),
'opcode')
def create_modrm_byte(ii,fo):
mod,reg,rm = 0,0,0
modrm_required = False
if ii.mod_required:
if ii.mod_required in ['unspecified']:
pass
elif ii.mod_required in ['00/01/10']:
modrm_requried = True
else:
mod = ii.mod_required
modrm_required = True
if ii.reg_required:
if ii.reg_required in ['unspecified']:
pass
else:
reg = ii.reg_required
modrm_required = True
if ii.rm_required:
if ii.rm_required in ['unspecified']:
pass
else:
rm = ii.rm_required
modrm_required = True
if modrm_required:
modrm = (mod << 6) | (reg<<3) | rm
fo.add_comment('MODRM = 0x{:02x}'.format(modrm))
if mod: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
if reg: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(reg))
if rm: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(rm))
return modrm_required
numbered_function_creators = collections.defaultdict(int)
def dump_numbered_function_creators():
global numbered_function_creators
for k,val in sorted(numbered_function_creators.items(),
key=lambda x: x[1]):
print("NUMBERED FN CREATORS: {:5d} {:30s}".format(val,k))
numbered_functions = 0
def make_function_object(env, ii, fname, return_value='void', asz=None):
'''Create function object. Augment function name for conventions '''
global numbered_functions
global numbered_function_creators
if 'AMDONLY' in ii.attributes:
fname += '_amd'
if ii.space == 'evex':
fname += '_e'
# Distinguish the 16/32b mode register-only functions to avoid
# name collisions. The stuff references memory has an
# "_a"+env.asz suffix. The non-memory stuff can still have name
# collisions. To avoid those collisions, I append _md16 or _md32
# to the function names.
if asz:
fname += '_a{}'.format(asz)
elif env.mode in [16,32]:
fname += '_md{}'.format(env.mode)
if fname in env.function_names:
numbered_functions += 1
t = env.function_names[fname] + 1
env.function_names[fname] = t
fname = '{}_vr{}'.format(fname,t)
numbered_function_creators[get_fname(2)] += 1
#msge("Numbered function name for: {} from {}".format(fname, get_fname(2)))
else:
env.function_names[fname] = 0
fo = codegen.function_object_t(fname, return_value, dll_export=True)
if ii.iform:
fo.add_comment(ii.iform)
return fo
def make_opnd_signature(env, ii, using_width=None, broadcasting=False, special_xchg=False):
'''This is the heart of the naming conventions for the encode
functions. If using_width is present, it is used for GPRv and
GPRy operations to specify a width. '''
global vl2func_names, widths_to_bits, widths_to_bits_y, widths_to_bits_z
def _translate_rax_name(w):
rax_names = { 16: 'ax', 32:'eax', 64:'rax' }
osz = _translate_width_int(w)
return rax_names[osz]
def _translate_eax_name(w):
eax_names = { 16: 'ax', 32:'eax', 64:'eax' }
osz = _translate_width_int(w)
return eax_names[osz]
def _translate_r8_name(w):
# uppercase to try to differentiate r8 (generic 8b reg) from R8 64b reg
r8_names = { 16: 'R8W', 32:'R8D', 64:'R8' }
osz = _translate_width_int(w)
return r8_names[osz]
def _translate_width_int(w):
if w in [8,16,32,64]:
return w
return widths_to_bits[w]
def _translate_width(w):
return str(_translate_width_int(w))
def _translate_width_y(w):
if w in [32,64]:
return str(w)
elif w == 16:
return '32'
return str(widths_to_bits_y[w])
def _translate_width_z(w):
if w in [16,32]:
return str(w)
elif w == 64:
return '32'
return str(widths_to_bits_z[w])
def _convert_to_osz(w):
if w in [16,32,64]:
return w
elif w in widths_to_bits:
return widths_to_bits[w]
else:
die("Cannot convert {}".format(w) )
s = []
for op in _gen_opnds(ii):
if op_implicit(op):
nm = get_implicit_operand_name(op)
if nm in ['OrAX'] and using_width:
s.append( _translate_rax_name(using_width) )
elif nm in ['OeAX'] and using_width:
s.append( _translate_eax_name(using_width) )
else:
s.append(nm)
continue
# for the modrm-less MOV instr
if op.name.startswith('BASE'):
continue
if op.name.startswith('INDEX'):
continue
if op_tmm(op):
s.append('t')
elif op_xmm(op):
s.append('x')
elif op_ymm(op):
s.append('y')
elif op_zmm(op):
s.append('z')
elif op_mask_reg(op):
s.append('k')
elif op_vgpr32(op):
s.append('r32')
elif op_vgpr64(op):
s.append('r64') #FIXME something else
elif op_gpr8(op):
s.append('r8')
elif op_gpr16(op):
s.append('r16')
elif op_gpr32(op):
s.append('r32')
elif op_gpr64(op):
s.append('r64') #FIXME something else
elif op_gprv(op):
if special_xchg:
s.append(_translate_r8_name(using_width))
else:
s.append('r' + _translate_width(using_width))
elif op_gprz(op):
s.append('r' + _translate_width_z(using_width))
elif op_gpry(op):
s.append('r' + _translate_width_y(using_width))
elif op_agen(op):
s.append('m') # somewhat of a misnomer
elif op_mem(op):
if op.oc2 == 'b':
s.append('m8')
elif op.oc2 == 'w':
s.append('m16')
elif op.oc2 == 'd':
s.append('m32')
elif op.oc2 == 'q':
s.append('m64')
elif op.oc2 == 'ptr': # sibmem
s.append('mptr')
#elif op.oc2 == 'dq': don't really want to start decorating the wider memops
# s.append('m128')
elif op.oc2 == 'v' and using_width:
s.append('m' + _translate_width(using_width))
elif op.oc2 == 'y' and using_width:
s.append('m' + _translate_width_y(using_width))
elif op.oc2 == 'z' and using_width:
s.append('m' + _translate_width_z(using_width))
else:
osz = _convert_to_osz(using_width) if using_width else 0
if op.oc2 == 'tv' or op.oc2.startswith('tm'):
bits = 'tv'
elif op.oc2 == 'vv':
# read_xed_db figures out the memop width for
# no-broadcast and broadcasting cases for EVEX
# memops.
if broadcasting:
bits = ii.element_size
else:
bits = ii.memop_width
else:
bits = env.mem_bits(op.oc2, osz)
if bits == '0':
die("OC2FAIL: {}: oc2 {} osz {} -> {}".format(ii.iclass, op.oc2, osz, bits))
s.append('m{}'.format(bits))
# add the index reg width for sparse ops (scatter,gather)
if ii.avx_vsib:
s.append(ii.avx_vsib[0])
if ii.avx512_vsib:
s.append(ii.avx512_vsib[0])
elif op_imm8(op):
s.append('i8')
elif op_immz(op):
if using_width:
s.append('i' + _translate_width_z(using_width))
else:
s.append('i')
elif op_immv(op):
if using_width:
s.append('i' + _translate_width(using_width))
else:
s.append('i')
elif op_imm16(op):
s.append('i16')
elif op_imm8_2(op):
s.append('i') #FIXME something else?
elif op_x87(op):
s.append('sti') # FIXME: or 'x87'?
elif op_mmx(op):
s.append('mm') # FIXME: or "mmx"? "mm" is shorter.
elif op_cr(op):
s.append('cr')
elif op_dr(op):
s.append('dr')
elif op_seg(op):
s.append('seg')
elif op.name in ['REG0','REG1'] and op_luf(op,'OrAX'):
if using_width:
s.append( _translate_rax_name(using_width) )
else:
s.append('r') # FIXME something else?
else:
die("Unhandled operand {}".format(op))
if ii.space in ['evex']:
if ii.rounding_form:
s.append( 'rc' )
elif ii.sae_form:
s.append( 'sae' )
if ii.space in ['evex','vex']:
if 'KMASK' not in ii.attributes:
vl = vl2func_names[ii.vl]
if vl:
s.append(vl)
return "_".join(s)
def create_legacy_one_scalable_gpr(env,ii,osz_values,oc2):
global enc_fn_prefix, arg_request, arg_reg0, var_reg0, gprv_names
for osz in osz_values:
if env.mode != 64 and osz == 64:
continue
special_xchg = False
if ii.partial_opcode:
if ii.rm_required != 'unspecified':
if ii.iclass == 'XCHG':
if env.mode != 64:
continue
# This is a strange XCHG that takes r8w, r8d or r8
# depending on the EOSZ. REX.B is required & 64b mode obviously.
# And no register specifier is required.
special_xchg = True
opsig = make_opnd_signature(env, ii, osz, special_xchg=special_xchg)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_scalable_gpr")
if special_xchg:
fo.add_comment("special xchg using R8W/R8D/R8")
fo.add_arg(arg_request,'req')
if not special_xchg:
fo.add_arg(arg_reg0, gprv_names[osz])
emit_required_legacy_prefixes(ii,fo)
rex_forced = False
if special_xchg:
fo.add_code_eol('set_rexb(r,1)')
rex_forced = True
if env.mode == 64 and osz == 16:
if ii.eosz == 'osznot16':
warn("SKIPPING 16b version for: {} / {}".format(ii.iclass, ii.iform))
continue # skip 16b version for this instruction
fo.add_code_eol('emit(r,0x66)')
elif env.mode == 64 and osz == 32 and ii.default_64b == True:
continue # not encodable
elif env.mode == 64 and osz == 64 and ii.default_64b == False:
if ii.eosz == 'osznot64':
warn("SKIPPING 64b version for: {} / {}".format(ii.iclass, ii.iform))
continue # skip 64b version for this instruction
fo.add_code_eol('set_rexw(r)')
rex_forced = True
elif env.mode == 32 and osz == 16:
if ii.eosz == 'osznot16':
warn("SKIPPING 16b version for: {} / {}".format(ii.iclass, ii.iform))
continue # skip 16b version for this instruction
fo.add_code_eol('emit(r,0x66)')
elif env.mode == 16 and osz == 32:
fo.add_code_eol('emit(r,0x66)')
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
# if f1 is rm then we handle partial opcodes farther down
if f1 == 'reg' or not ii.partial_opcode:
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(f1, osz, var_reg0))
if f2 == 'reg':
if ii.reg_required != 'unspecified':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
else:
if ii.rm_required != 'unspecified':
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
if ii.partial_opcode:
if ii.rm_required == 'unspecified':
op = first_opnd(ii)
if op_luf(op,'GPRv_SB'):
fo.add_code_eol('enc_srm_gpr{}(r,{})'.format(osz, var_reg0))
else:
warn("NOT HANDLING SOME PARTIAL OPCODES YET: {} / {} / {}".format(ii.iclass, ii.iform, op))
ii.encoder_skipped = True
return
else:
# we have some XCHG opcodes encoded as partial register
# instructions but have fixed RM fields.
fo.add_code_eol('set_srm(r,{})'.format(ii.rm_required))
#dump_fields(ii)
#die("SHOULD NOT HAVE A VALUE FOR PARTIAL OPCODES HERE {} / {}".format(ii.iclass, ii.iform))
emit_rex(env, fo, rex_forced)
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
emit_partial_opcode_variable_srm(ii,fo)
else:
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def add_enc_func(ii,fo):
# hack to cover AMD 3DNOW wherever they are created...
if ii.amd_3dnow_opcode:
fo.add_code_eol('emit_u8(r,{})'.format(ii.amd_3dnow_opcode), 'amd 3dnow opcode')
dbg(fo.emit())
ii.encoder_functions.append(fo)
def create_legacy_one_imm_scalable(env,ii, osz_values):
'''just an imm-z (or IMM-v)'''
global enc_fn_prefix, arg_request
for osz in osz_values:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_m{}_{}_{}".format(enc_fn_prefix, env.mode, ii.iclass.lower(), opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_imm_scalable")
fo.add_arg(arg_request,'req')
add_arg_immv(fo,osz)
if ii.has_modrm:
die("NOT REACHED")
if env.mode != 16 and osz == 16:
fo.add_code_eol('emit(r,0x66)')
elif env.mode == 16 and osz == 32:
fo.add_code_eol('emit(r,0x66)')
if not ii.default_64b:
die("Only DF64 here for now")
emit_required_legacy_prefixes(ii,fo)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_immv(fo,osz)
add_enc_func(ii,fo)
def create_legacy_one_gpr_fixed(env,ii,width_bits):
global enc_fn_prefix, arg_request, gprv_names
opsig = make_opnd_signature(env,ii,width_bits)
fname = "{}_{}_{}".format(enc_fn_prefix, ii.iclass.lower(), opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_gpr_fixed")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0, gprv_names[width_bits])
if width_bits not in [8,16,32,64]:
die("SHOULD NOT REACH HERE")
fo.add_code_eol('set_mod(r,{})'.format(3))
if modrm_reg_first_operand(ii):
f1,f2 = 'reg', 'rm'
else:
f1,f2 = 'rm', 'reg'
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(f1,width_bits, var_reg0))
if f2 == 'reg':
if ii.reg_required != 'unspecified':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
else:
if ii.rm_required != 'unspecified':
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
if env.mode == 64 and width_bits == 64 and ii.default_64b == False:
fo.add_code_eol('set_rexw(r)')
emit_required_legacy_prefixes(ii,fo)
if env.mode == 64:
fo.add_code_eol('emit_rex_if_needed(r)')
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_relbr(env,ii):
global enc_fn_prefix, arg_request
op = first_opnd(ii)
if op.oc2 == 'b':
osz_values = [8]
elif op.oc2 == 'd':
osz_values = [32]
elif op.oc2 == 'z':
osz_values = [16,32]
else:
die("Unhandled relbr width for {}: {}".format(ii.iclass, op.oc2))
for osz in osz_values:
fname = "{}_{}_o{}".format(enc_fn_prefix, ii.iclass.lower(), osz)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_relbr")
fo.add_arg(arg_request,'req')
add_arg_disp(fo,osz)
#if ii.iclass in ['JCXZ','JECXZ','JRCXZ']:
if ii.easz != 'aszall':
if env.mode == 64 and ii.easz == 'a32':
emit_67_prefix(fo)
elif env.mode == 32 and ii.easz == 'a16':
emit_67_prefix(fo)
elif env.mode == 16 and ii.easz == 'a32':
emit_67_prefix(fo)
if op.oc2 == 'z':
if env.mode in [32,64] and osz == 16:
fo.add_code_eol('emit(r,0x66)')
elif env.mode == 16 and osz == 32:
fo.add_code_eol('emit(r,0x66)')
modrm_required = create_modrm_byte(ii,fo)
emit_required_legacy_prefixes(ii,fo)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
if modrm_required:
emit_modrm(fo)
if osz == 8:
fo.add_code_eol('emit_i8(r,{})'.format(var_disp8))
elif osz == 16:
fo.add_code_eol('emit_i16(r,{})'.format(var_disp16))
elif osz == 32:
fo.add_code_eol('emit_i32(r,{})'.format(var_disp32))
add_enc_func(ii,fo)
def create_legacy_one_imm_fixed(env,ii):
global enc_fn_prefix, arg_request
fname = "{}_{}".format(enc_fn_prefix,
ii.iclass.lower())
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_imm_fixed")
op = first_opnd(ii)
fo.add_arg(arg_request,'req')
if op.oc2 == 'b':
fo.add_arg(arg_imm8,'int8')
elif op.oc2 == 'w':
fo.add_arg(arg_imm16,'int16')
else:
die("not handling imm width {}".format(op.oc2))
modrm_required = create_modrm_byte(ii,fo)
emit_required_legacy_prefixes(ii,fo)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
if modrm_required:
emit_modrm(fo)
if op.oc2 == 'b':
fo.add_code_eol('emit(r,{})'.format(var_imm8))
elif op.oc2 == 'w':
fo.add_code_eol('emit_i16(r,{})'.format(var_imm16))
add_enc_func(ii,fo)
def create_legacy_one_implicit_reg(env,ii,imm8=False):
global enc_fn_prefix, arg_request, arg_imm8, var_imm8
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_implicit_reg")
fo.add_arg(arg_request,'req')
if imm8:
fo.add_arg(arg_imm8,'int8')
modrm_required = create_modrm_byte(ii,fo)
emit_required_legacy_prefixes(ii,fo)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
if modrm_required:
emit_modrm(fo)
if imm8:
fo.add_code_eol('emit(r,{})'.format(var_imm8))
add_enc_func(ii,fo)
def create_legacy_one_nonmem_opnd(env,ii):
# GPRv, GPR8, GPR16, RELBR(b,z), implicit fixed reg, GPRv_SB, IMM0(w,b)
op = first_opnd(ii)
if op.name == 'RELBR':
create_legacy_relbr(env,ii)
elif op.name == 'IMM0':
if op.oc2 in ['b','w','d','q']:
create_legacy_one_imm_fixed(env,ii)
elif op.oc2 == 'z':
create_legacy_one_imm_scalable(env,ii,[16,32])
else:
warn("Need to handle {} in {}".format(
op, "create_legacy_one_nonmem_opnd"))
elif op.lookupfn_name:
if op.lookupfn_name.startswith('GPRv'):
create_legacy_one_scalable_gpr(env,ii,[16,32,64],'v')
elif op.lookupfn_name.startswith('GPRy'):
create_legacy_one_scalable_gpr(env,ii,[32,64],'y')
elif op.lookupfn_name.startswith('GPR8'):
create_legacy_one_gpr_fixed(env,ii,8)
elif op.lookupfn_name.startswith('GPR16'):
create_legacy_one_gpr_fixed(env,ii,16)
elif op.lookupfn_name.startswith('GPR32'):
create_legacy_one_gpr_fixed(env,ii,32)
elif op.lookupfn_name.startswith('GPR64'):
create_legacy_one_gpr_fixed(env,ii,64)
elif op_implicit(op) and op.name.startswith('REG'):
create_legacy_one_implicit_reg(env,ii,imm8=False)
else:
warn("Need to handle {} in {}".format(
op, "create_legacy_one_nonmem_opnd"))
def scalable_implicit_operands(ii):
for op in _gen_opnds(ii):
if op_luf(op,'OeAX'):
return True
return False
def create_legacy_zero_operands_scalable(env,ii):
# FIXME 2020-06-06: IN and OUT are the only two instr with OeAX()
# operands. I should write more general code for realizing that
# only 16/32 are accessible.
if ii.iclass in ['IN','OUT']:
osz_list = [16,32]
for osz in osz_list:
opsig = make_opnd_signature(env,ii,osz)
if opsig:
opsig += '_'
fname = "{}_{}_{}o{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
osz) # FIXME:osz
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_zero_operands_scalable")
fo.add_arg(arg_request,'req')
modrm_required = create_modrm_byte(ii,fo)
if env.mode in [32,64] and osz == 16:
fo.add_code_eol('emit(r,0x66)')
if env.mode == 16 and osz == 32:
fo.add_code_eol('emit(r,0x66)')
emit_required_legacy_prefixes(ii,fo)
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
die("NOT HANDLING PARTIAL OPCODES YET in create_legacy_zero_operands_scalable")
emit_opcode(ii,fo)
if modrm_required:
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_zero_operands(env,ii): # allows all implicit too
global enc_fn_prefix, arg_request
if env.mode == 64 and ii.easz == 'a16':
# cannot do 16b addressing in 64b mode...so skip these!
ii.encoder_skipped = True
return
if scalable_implicit_operands(ii):
create_legacy_zero_operands_scalable(env,ii)
return
opsig = make_opnd_signature(env,ii)
if opsig:
opsig = '_' + opsig
fname = "{}_{}{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
if ii.easz in ['a16','a32','a64']:
fname = fname + '_' + ii.easz
if ii.eosz in ['o16','o32','o64']:
fname = fname + '_' + ii.eosz
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_zero_operands")
fo.add_arg(arg_request,'req')
modrm_required = create_modrm_byte(ii,fo)
# twiddle ASZ if specified
if env.mode == 64 and ii.easz == 'a32':
emit_67_prefix(fo)
elif env.mode == 32 and ii.easz == 'a16':
emit_67_prefix(fo)
elif env.mode == 16 and ii.easz == 'a32':
emit_67_prefix(fo)
# twiddle OSZ
rexw_forced=False
if not ii.osz_required:
if env.mode == 64 and ii.eosz == 'o16':
fo.add_code_eol('emit(r,0x66)')
elif env.mode == 64 and ii.eosz == 'o32' and ii.default_64b == True:
return # skip this one. cannot do 32b osz in 64b mode if default to 64b
elif env.mode == 64 and ii.eosz == 'o64' and ii.default_64b == False:
rexw_forced = True
fo.add_code_eol('set_rexw(r)')
elif env.mode == 32 and ii.eosz == 'o16':
fo.add_code_eol('emit(r,0x66)')
elif env.mode == 16 and ii.eosz == 'o16':
fo.add_code_eol('emit(r,0x66)')
elif ii.eosz == 'oszall': # works in any OSZ. no prefixes required
pass
elif env.mode == 64 and ii.eosz == 'osznot64':
return
elif ii.eosz == 'osznot16':
pass
emit_required_legacy_prefixes(ii,fo)
if rexw_forced:
fo.add_code_eol('emit_rex(r)')
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
if ii.rm_required != 'unspecified':
emit_partial_opcode_fixed_srm(ii,fo)
else:
warn("NOT HANDLING SOME PARTIAL OPCODES YET: {} / {}".format(ii.iclass, ii.iform))
ii.encoder_skipped = True
return
else:
emit_opcode(ii,fo)
if modrm_required:
emit_modrm(fo)
add_enc_func(ii,fo)
def two_fixed_gprs(ii):
width = None
n = 0 # count of the number of GPR32 or GPR64 stuff we encounter
c = 0 # operand count, avoid stray stuff
for op in _gen_opnds(ii):
c += 1
for w in [16,32,64]:
if op_luf_start(op,'GPR{}'.format(w)):
if not width:
width = w
n += 1
elif width != w:
return False
else:
n += 1
return width and n == 2 and c == 2
def get_gpr_opsz_code(op):
if op_luf_start(op,'GPR8'):
return 'rb'
if op_luf_start(op,'GPR16'):
return 'rw'
if op_luf_start(op,'GPR32'):
return 'rd'
if op_luf_start(op,'GPR64'):
return 'rq'
if op_luf_start(op,'GPRv'):
return 'rv'
if op_luf_start(op,'GPRy'):
return 'ry'
else:
die("Unhandled GPR width: {}".format(op))
def create_legacy_two_gpr_one_scalable_one_fixed(env,ii):
global enc_fn_prefix, arg_request, arg_reg0, arg_reg1
opsz_to_bits = { 'rb':8, 'rw':16, 'rd':32, 'rq':64 }
osz_list = get_osz_list(env)
opnds = []
opsz_codes =[]
for op in _gen_opnds(ii):
opnds.append(op)
opsz_codes.append( get_gpr_opsz_code(op) )
for osz in osz_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig) # "".join(opsz_codes), osz)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_two_gpr_one_scalable_one_fixed")
fo.add_arg(arg_request,'req')
opnd_types = get_opnd_types(env,ii,osz)
fo.add_arg(arg_reg0, opnd_types[0])
fo.add_arg(arg_reg1, opnd_types[1])
emit_required_legacy_prefixes(ii,fo)
if not ii.osz_required:
if osz == 16 and env.mode != 16:
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
if osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
if opsz_codes[0] in ['rv','ry']:
op0_bits = osz
else:
op0_bits = opsz_to_bits[opsz_codes[0]]
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(f1,osz,var_reg0))
if opsz_codes[1] in ['rv','ry']:
op1_bits = osz
else:
op1_bits = opsz_to_bits[opsz_codes[1]]
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(f2,op1_bits,var_reg1))
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
die("NOT HANDLING PARTIAL OPCODES YET: {} / {}".format(ii.iclass, ii.iform))
else:
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_two_fixed_gprs(env,ii):
op = first_opnd(ii)
if op_luf_start(op,'GPR16'):
create_legacy_two_scalable_regs(env,ii,[16])
elif op_luf_start(op,'GPR32'):
create_legacy_two_scalable_regs(env,ii,[32])
elif op_luf_start(op,'GPR64'):
create_legacy_two_scalable_regs(env,ii,[64])
else:
die("NOT REACHED")
def create_legacy_two_scalable_regs(env, ii, osz_list):
"""Allows optional imm8,immz"""
global enc_fn_prefix, arg_request, arg_reg0, arg_reg1
global arg_imm8, var_imm8
extra_names = _implicit_reg_names(ii) # for NOPs only (FIXME: not used!?)
if modrm_reg_first_operand(ii):
opnd_order = {0:'reg', 1:'rm'}
else:
opnd_order = {1:'reg', 0:'rm'}
var_regs = [var_reg0, var_reg1]
arg_regs = [arg_reg0, arg_reg1]
# We have some funky NOPs that come through here, that have been
# redefined for CET. They were two operand, but one operand is now
# fixed via a MODRM.REG restriction and some become have MODRM.RM
# restriction as well, and no real operands. For those funky NOPs,
# we remove the corresponding operands. I *think* the REX.R and
# REX.B bits don't matter.
s = []
fixed = {'reg':False, 'rm':False}
nop_opsig = None
if ii.iclass == 'NOP' and ii.iform in [ 'NOP_MEMv_GPRv_0F1C',
'NOP_GPRv_GPRv_0F1E' ]:
if ii.reg_required != 'unspecified':
s.append('reg{}'.format(ii.reg_required))
fixed['reg']=True
if ii.rm_required != 'unspecified':
s.append('rm{}'.format(ii.rm_required))
fixed['rm']=True
if s:
nop_opsig = "".join(s)
for osz in gen_osz_list(env.mode,osz_list):
if nop_opsig:
fname = "{}_{}{}_{}_o{}".format(enc_fn_prefix,
ii.iclass.lower(),
extra_names,
nop_opsig,osz)
else:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_two_scalable_regs")
fo.add_arg(arg_request,'req')
opnd_types = get_opnd_types(env,ii,osz)
for i in [0,1]:
if not fixed[opnd_order[i]]:
fo.add_arg(arg_regs[i], opnd_types[i])
if ii.has_imm8:
fo.add_arg(arg_imm8,'int8')
elif ii.has_immz:
add_arg_immz(fo,osz)
emit_required_legacy_prefixes(ii,fo)
if not ii.osz_required:
if osz == 16 and env.mode != 16:
if ii.iclass not in ['ARPL']: # FIXME: make a generic property default16b or something...
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
if osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
if ii.mod_required == 3:
fo.add_code_eol('set_mod(r,3)')
for i in [0,1]:
if not fixed[opnd_order[i]]:
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(opnd_order[i],osz,var_regs[i]))
for slot in ['reg','rm']:
if fixed[slot]:
if slot == 'reg':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
else:
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
die("NOT HANDLING PARTIAL OPCODES YET: {} / {}".format(ii.iclass, ii.iform))
else:
emit_opcode(ii,fo)
emit_modrm(fo)
cond_emit_imm8(ii,fo)
if ii.has_immz:
emit_immz(fo,osz)
add_enc_func(ii,fo)
def create_legacy_two_gpr8_regs(env, ii):
global enc_fn_prefix, arg_request, arg_reg0, arg_reg1
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_two_gpr8_regs")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0,'gpr8')
fo.add_arg(arg_reg1,'gpr8')
emit_required_legacy_prefixes(ii,fo)
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_gpr8(r,{})'.format(f1,var_reg0))
fo.add_code_eol('enc_modrm_{}_gpr8(r,{})'.format(f2,var_reg1))
if env.mode == 64:
fo.add_code_eol('emit_rex_if_needed(r)')
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
die("NOT HANDLING PARTIAL OPCODES YET: {} / {}".format(ii.iclass, ii.iform))
else:
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def add_arg_disp(fo,dispsz):
global arg_dispv, arg_dispv_meta
fo.add_arg(arg_dispv[dispsz], arg_dispv_meta[dispsz])
def add_arg_immz(fo,osz):
global arg_immz_dct, arg_immz_meta
fo.add_arg(arg_immz_dct[osz], arg_immz_meta[osz])
def add_arg_immv(fo,osz):
global arg_immv_dct, arg_immv_meta
fo.add_arg(arg_immv_dct[osz], arg_immv_meta[osz])
vlmap = { 'xmm': 0, 'ymm': 1, 'zmm': 2 }
def set_evexll_vl(ii,fo,vl):
global vlmap
if not ii.rounding_form and not ii.sae_form:
fo.add_code_eol('set_evexll(r,{})'.format(vlmap[vl]),
'VL={}'.format(ii.vl))
def emit_immz(fo,osz):
global var_immz_dct
emit_width_immz = { 16:16, 32:32, 64:32 }
fo.add_code_eol('emit_i{}(r,{})'.format(emit_width_immz[osz],
var_immz_dct[osz]))
def emit_immv(fo,osz):
global var_immv_dct
emit_width_immv = {8:8, 16:16, 32:32, 64:64 }
fo.add_code_eol('emit_u{}(r,{})'.format(emit_width_immv[osz],
var_immv_dct[osz]))
def emit_disp(fo,dispsz):
global var_dispv
fo.add_code_eol('emit_i{}(r,{})'.format(dispsz,
var_dispv[dispsz]))
def cond_emit_imm8(ii,fo):
global var_imm8, var_imm8_2
if ii.has_imm8:
fo.add_code_eol('emit(r,{})'.format(var_imm8))
if ii.has_imm8_2:
fo.add_code_eol('emit(r,{})'.format(var_imm8_2))
def cond_add_imm_args(ii,fo):
global arg_imm8, arg_imm8_2
if ii.has_imm8:
fo.add_arg(arg_imm8,'int8')
if ii.has_imm8_2:
fo.add_arg(arg_imm8_2,'int8')
def emit_rex(env, fo, rex_forced):
if env.mode == 64:
if rex_forced:
fo.add_code_eol('emit_rex(r)')
else:
fo.add_code_eol('emit_rex_if_needed(r)')
def get_opnd_types_short(ii):
types= []
for op in _gen_opnds(ii):
if op.oc2:
types.append(op.oc2)
elif op_luf_start(op,'GPRv'):
types.append('v')
elif op_luf_start(op,'GPRz'):
types.append('z')
elif op_luf_start(op,'GPRy'):
types.append('y')
else:
die("Unhandled op type {}".format(op))
return types
def get_reg_type_fixed(op):
'''return a type suitable for use in an enc_modrm function'''
if op_gpr32(op):
return 'gpr32'
elif op_gpr64(op):
return 'gpr64'
elif op_xmm(op):
return 'xmm'
elif op_ymm(op):
return 'ymm'
elif op_mmx(op):
return 'mmx'
die("UNHANDLED OPERAND TYPE {}".format(op))
orax = { 16:'ax', 32:'eax', 64:'rax' }
oeax = { 16:'ax', 32:'eax', 64:'eax' }
def get_opnd_types(env, ii, osz=0):
"""Create meta-data about operands that can be used for generating
testing content."""
global orax, oeax
s = []
for op in _gen_opnds(ii):
if op_luf_start(op,'GPRv'):
if osz == 0:
die("Need OSZ != 0")
s.append('gpr{}'.format(osz))
elif op_luf_start(op,'GPRy'):
if osz == 0:
die("Need OSZ != 0")
s.append('gpr{}'.format(osz if osz > 16 else 32))
elif op_luf_start(op,'GPRz'):
if osz == 0:
die("Need OSZ != 0")
s.append('gpr{}'.format(osz if osz < 64 else 32))
elif op_luf_start(op,'OrAX'):
if osz == 0:
die("Need OSZ != 0")
s.append(orax[osz])
elif op_luf_start(op,'OrAX'):
if osz == 0:
die("Need OSZ != 0")
s.append(oeax[osz])
elif op_luf_start(op,'ArAX'):
s.append(orax[env.asz])
elif op_immz(op):
if osz == 0:
die("Need OSZ != 0")
s.append('imm{}'.format(osz if osz < 64 else 32))
elif op_immv(op):
if osz == 0:
die("Need OSZ != 0")
s.append('imm{}'.format(osz))
elif op_luf_start(op, 'A_GPR'):
s.append('gpr{}'.format(env.asz))
elif op_implicit_specific_reg(op):
pass # ignore
elif op_tmm(op):
s.append('tmm')
elif op_xmm(op):
s.append('xmm')
elif op_ymm(op):
s.append('ymm')
elif op_zmm(op):
s.append('zmm')
elif op_vgpr32(op):
s.append('gpr32')
elif op_vgpr64(op):
s.append('gpr64')
elif op_gpr32(op):
s.append('gpr32')
elif op_gpr64(op):
s.append('gpr64')
elif op_gpr8(op):
s.append('gpr8')
elif op_gpr16(op):
s.append('gpr16')
elif op_mem(op):
s.append('mem')
elif op_agen(op): # LEA
s.append('agen')
elif op_imm8(op):
s.append('int8')
elif op_imm16(op):
s.append('int16')
elif op_imm8_2(op):
s.append('int8')
elif op_mmx(op):
s.append('mmx')
elif op_cr(op):
s.append('cr')
elif op_dr(op):
s.append('dr')
elif op_seg(op):
s.append('seg')
elif op_masknot0(op): # must be before generic mask test below
s.append('kreg!0')
elif op_mask_reg(op):
s.append('kreg')
else:
die("Unhandled operand {}".format(op))
return s
def two_fixed_regs_opti8(ii): # also allows 2-imm8 SSE4 instr
j,i,d,q,m,x=0,0,0,0,0,0
for op in _gen_opnds(ii):
if op_imm8(op):
i += 1
elif op_imm8_2(op):
j += 1
elif op_gpr32(op):
d += 1
elif op_gpr64(op):
q += 1
elif op_mmx(op):
m += 1
elif op_xmm(op):
x += 1
else:
return False
if i>=2 or j>=2:
return False
sum = d + q + m + x
return sum == 2 # 1+1 or 2+0...either is fine
def create_legacy_two_fixed_regs_opti8(env,ii):
'''Two regs and optional imm8. Regs can be gpr32,gpr64,xmm,mmx, and
they can be different from one another'''
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
opnd_sig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_two_fixed_regs_opti8")
fo.add_arg(arg_request,'req')
opnd_types = get_opnd_types(env,ii)
fo.add_arg(arg_reg0, opnd_types[0])
fo.add_arg(arg_reg1, opnd_types[1])
cond_add_imm_args(ii,fo)
emit_required_legacy_prefixes(ii,fo)
if modrm_reg_first_operand(ii):
locations = ['reg', 'rm']
else:
locations = ['rm', 'reg']
regs = [ var_reg0, var_reg1]
rexw_forced = cond_emit_rexw(env,ii,fo,osz=0) # legit
fo.add_code_eol('set_mod(r,3)')
for i,op in enumerate(_gen_opnds(ii)):
if op_imm8(op):
break
reg_type = get_reg_type_fixed(op)
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format(locations[i], reg_type, regs[i]))
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
cond_emit_imm8(ii,fo)
add_enc_func(ii,fo)
def create_legacy_one_mmx_reg_imm8(env,ii):
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_imm8, var_imm8
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_mmx_reg_imm8")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0, 'mmx')
cond_add_imm_args(ii,fo)
emit_required_legacy_prefixes(ii,fo)
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_mmx(r,{})'.format(f1,var_reg0))
fo.add_code_eol('set_mod(r,3)')
if f2 == 'reg':
if ii.reg_required != 'unspecified':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
else:
if ii.rm_required != 'unspecified':
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
cond_emit_imm8(ii,fo)
add_enc_func(ii,fo)
def create_legacy_one_xmm_reg_imm8(env,ii):
'''also handles 2 imm8 SSE4 instr'''
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_imm8, var_imm8
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_xmm_reg_imm8")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0,'xmm')
cond_add_imm_args(ii,fo)
emit_required_legacy_prefixes(ii,fo)
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_xmm(r,{})'.format(f1,var_reg0))
fo.add_code_eol('set_mod(r,3)')
if f2 == 'reg':
if ii.reg_required != 'unspecified':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
else:
if ii.rm_required != 'unspecified':
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
if env.mode == 64:
fo.add_code_eol('emit_rex_if_needed(r)')
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
cond_emit_imm8(ii,fo)
add_enc_func(ii,fo)
def create_legacy_two_x87_reg(env,ii):
global enc_fn_prefix, arg_request, arg_reg0, var_reg0
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_two_x87_reg")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0,'x87')
emit_required_legacy_prefixes(ii,fo)
fo.add_code_eol('set_mod(r,3)')
if ii.reg_required == 'unspecified':
die("Need a value for MODRM.REG in x87 encoding")
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
fo.add_code_eol('enc_modrm_rm_x87(r,{})'.format(var_reg0))
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_one_x87_reg(env,ii):
global enc_fn_prefix, arg_request, arg_reg0, var_reg0
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_one_x87_reg")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0,'x87')
emit_required_legacy_prefixes(ii,fo)
if ii.mod_required == 3:
fo.add_code_eol('set_mod(r,3)')
else:
die("FUNKY MOD on x87 op: {}".format(ii.mod_required))
if ii.reg_required == 'unspecified':
die("Need a value for MODRM.REG in x87 encoding")
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
fo.add_code_eol('enc_modrm_rm_x87(r,{})'.format(var_reg0))
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def gpr8_imm8(ii):
reg,imm=0,0
for i,op in enumerate(_gen_opnds(ii)):
if i == 0:
if op.name == 'REG0' and op_luf_start(op,'GPR8'):
reg = reg + 1
else:
return False
elif i == 1:
if op.name == 'IMM0' and op.oc2 == 'b':
if op_implicit_or_suppressed(op):
return False
imm = imm + 1
else:
return False
else:
return False
return reg == 1 and imm == 1
def gprv_imm8(ii):
reg,imm=0,0
for i,op in enumerate(_gen_opnds(ii)):
if i == 0:
if op.name == 'REG0' and op_luf_start(op,'GPRv'):
reg = reg + 1
else:
return False
elif i == 1:
if op.name == 'IMM0' and op.oc2 == 'b':
if op_implicit_or_suppressed(op):
return False
imm = imm + 1
else:
return False
else:
return False
return reg == 1 and imm == 1
def gprv_immz(ii):
for i,op in enumerate(_gen_opnds(ii)):
if i == 0:
if op.name == 'REG0' and op_luf_start(op,'GPRv'):
continue
else:
return False
elif i == 1:
if op_immz(op):
continue
else:
return False
else:
return False
return True
def gprv_immv(ii):
for i,op in enumerate(_gen_opnds(ii)):
if i == 0:
if op.name == 'REG0' and op_luf_start(op,'GPRv'):
continue
else:
return False
elif i == 1:
if op_immv(op):
continue
else:
return False
else:
return False
return True
def orax_immz(ii):
for i,op in enumerate(_gen_opnds(ii)):
if i == 0:
if op.name == 'REG0' and op_luf(op,'OrAX'):
continue
else:
return False
elif i == 1:
if op_immz(op):
continue
else:
return False
else:
return False
return True
def op_luf(op,s):
if op.lookupfn_name:
if op.lookupfn_name == s:
return True
return False
def op_luf_start(op,s):
if op.lookupfn_name:
if op.lookupfn_name.startswith(s):
return True
return False
def gprv_implicit_orax(ii):
for i,op in enumerate(_gen_opnds(ii)):
if i == 0:
if op.name == 'REG0' and op_luf(op,'GPRv_SB'):
continue
else:
return False
elif i == 1:
if op.name == 'REG1' and op_luf(op,'OrAX'):
continue
else:
return False
else:
return False
return True
def create_legacy_gpr_imm8(env,ii,width_list):
'''gpr8 or gprv with imm8. nothing fancy'''
global enc_fn_prefix, arg_request, arg_reg0, var_reg0, arg_imm8, var_imm8, gprv_names
for osz in gen_osz_list(env.mode,width_list):
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_gpr_imm8")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0, gprv_names[osz])
fo.add_arg(arg_imm8,'int8')
emit_required_legacy_prefixes(ii,fo)
if osz == 16 and env.mode != 16:
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
elif osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
elif ii.default_64b and osz == 32: # never happens
continue
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
if ii.partial_opcode:
fo.add_code_eol('enc_srm_gpr{}(r,{})'.format(osz, var_reg0))
else:
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(f1,osz,var_reg0))
if f2 == 'reg':
if ii.reg_required != 'unspecified':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
if ii.partial_opcode:
emit_partial_opcode_variable_srm(ii,fo)
else:
emit_opcode(ii,fo)
emit_modrm(fo)
fo.add_code_eol('emit(r,{})'.format(var_imm8))
add_enc_func(ii,fo)
def create_legacy_gprv_immz(env,ii):
global enc_fn_prefix, arg_request, gprv_names, arg_reg0, var_reg0
width_list = get_osz_list(env)
for osz in width_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_gprv_immz")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0, gprv_names[osz])
add_arg_immz(fo,osz)
emit_required_legacy_prefixes(ii,fo)
if osz == 16 and env.mode != 16:
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
if osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
elif ii.default_64b and osz == 32: # never happens
continue
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
if modrm_reg_first_operand(ii):
f1, f2 = 'reg','rm'
else:
f1, f2 = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_gpr{}(r,{})'.format(f1,osz,var_reg0))
if f2 == 'reg':
if ii.reg_required != 'unspecified':
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
else:
if ii.rm_required != 'unspecified':
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
emit_immz(fo,osz)
add_enc_func(ii,fo)
def create_legacy_orax_immz(env,ii):
"""Handles OrAX+IMMz. No MODRM byte"""
global enc_fn_prefix, arg_request
global arg_imm16
global arg_imm32
width_list = get_osz_list(env)
for osz in width_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_orax_immz")
fo.add_arg(arg_request,'req')
opnd_types = get_opnd_types(env,ii,osz)
# no need to mention the implicit OrAX arg... we don't use it for anything
#fo.add_arg(arg_reg0,opnd_types[0])
add_arg_immz(fo,osz)
emit_required_legacy_prefixes(ii,fo)
if osz == 16 and env.mode != 16:
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
elif osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
elif ii.default_64b and osz == 32: # never happens
continue
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_immz(fo,osz)
add_enc_func(ii,fo)
def create_legacy_gprv_immv(env,ii,imm=False):
"""Handles GPRv_SB-IMMv partial reg opcodes and GPRv_SB+OrAX implicit"""
global enc_fn_prefix, arg_request, gprv_names
global arg_reg0, var_reg0
global arg_imm16, var_imm16
global arg_imm32, var_imm32
global arg_imm64, var_imm64
width_list = get_osz_list(env)
for osz in width_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_gprv_immv")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_reg0, gprv_names[osz])
if imm:
add_arg_immv(fo,osz)
emit_required_legacy_prefixes(ii,fo)
if osz == 16 and env.mode != 16:
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
elif osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
elif ii.default_64b and osz == 32: # never happens
continue
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
# WE know this is a SRM partial opcode instr
if not ii.partial_opcode:
die("Expecting partial opcode instruction in create_legacy_gprv_immv")
op = first_opnd(ii)
if op_luf(op,'GPRv_SB'):
fo.add_code_eol('enc_srm_gpr{}(r,{})'.format(osz, var_reg0))
else:
die("NOT REACHED")
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
emit_partial_opcode_variable_srm(ii,fo)
if imm:
emit_immv(fo,osz)
add_enc_func(ii,fo)
def emit_partial_opcode_variable_srm(ii,fo):
opcode = "0x{:02X}".format(ii.opcode_base10)
fo.add_code_eol('emit(r,{} | get_srm(r))'.format(opcode),
'partial opcode, variable srm')
def emit_partial_opcode_fixed_srm(ii,fo):
fixed_opcode_srm = ii.rm_required
opcode = "0x{:02X}".format(ii.opcode_base10)
fo.add_code_eol('emit(r,{} | {})'.format(opcode,fixed_opcode_srm),
'partial opcode, fixed srm')
memsig_idx_16 = { 0: 'bi',
8: 'bid8',
16: 'bid16' }
memsig_idx_32or64 = { 0: 'bis',
8: 'bisd8',
32: 'bisd32' }
memsig_noidx_16 = { 0: 'b',
8: 'bd8',
16: 'bd16' }
memsig_noidx_32or64 = { 0: 'b',
8: 'bd8',
32: 'bd32' }
memsig_str_16 = { True : memsig_idx_16, # indexed by use_index
False: memsig_noidx_16 }
memsig_str_32or64 = { True : memsig_idx_32or64, # indexed by use_index
False: memsig_noidx_32or64 }
def get_memsig(asz, using_indx, dispz):
global memsig_str_16
global memsig_str_32or64
if asz == 16:
return memsig_str_16[using_indx][dispz]
return memsig_str_32or64[using_indx][dispz]
def add_memop_args(env, ii, fo, use_index, dispsz, immw=0, reg=-1, osz=0):
"""reg=-1 -> no reg opnds,
reg=0 -> first opnd is reg,
reg=1 -> 2nd opnd is reg.
AVX or AVX512 vsib moots the use_index value"""
global arg_reg0, arg_imm_dct
global arg_base, arg_index, arg_scale
global arg_disp8, arg_disp16, arg_disp32
opnd_types = get_opnd_types(env,ii,osz)
if reg == 0:
fo.add_arg(arg_reg0,opnd_types[0])
fo.add_arg(arg_base, gprv_names[env.asz])
if ii.avx_vsib:
fo.add_arg("{} {}".format(arg_reg_type, var_vsib_index_dct[ii.avx_vsib]),
ii.avx_vsib)
elif ii.avx512_vsib:
fo.add_arg("{} {}".format(arg_reg_type, var_vsib_index_dct[ii.avx512_vsib]),
ii.avx512_vsib)
elif use_index:
fo.add_arg(arg_index, gprv_index_names[env.asz])
if use_index or special_index_cases(ii):
if env.asz in [32,64]:
fo.add_arg(arg_scale, 'scale') # a32, a64
if dispsz != 0:
add_arg_disp(fo,dispsz)
if reg == 1:
fo.add_arg(arg_reg0, opnd_types[1])
if immw:
add_arg_immv(fo,immw)
def create_legacy_one_xmm_reg_one_mem_fixed(env,ii):
'''allows xmm, mmx, gpr32, gpr64 regs optional imm8'''
global var_reg0
op = first_opnd(ii)
width = op.oc2
immw = 8 if ii.has_imm8 else 0
regpos = 0 if modrm_reg_first_operand(ii) else 1 # i determines argument order
#opsig = 'rm' if regpos==0 else 'mr'
#if ii.has_imm8:
# opsig = opsig + 'i'
opsig = make_opnd_signature(env,ii)
gpr32,gpr64,xmm,mmx = False,False,False,False
for op in _gen_opnds(ii):
if op_mmx(op):
mmx=True
break
if op_xmm(op):
xmm=True
break
if op_gpr32(op):
gpr32=True
break
if op_gpr64(op):
gpr64=True
break
dispsz_list = get_dispsz_list(env)
ispace = itertools.product(get_index_vals(ii), dispsz_list)
for use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = "{}_{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
#width, # FIXME:osz, funky
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_legacy_one_xmm_reg_one_mem_fixed")
fo.add_arg(arg_request,'req')
add_memop_args(env, ii, fo, use_index, dispsz, immw, reg=regpos)
rexw_forced = False
if ii.eosz == 'o16' and env.mode in [32,64]:
fo.add_code_eol('emit(r,0x66)', 'xx: fixed width with 16b osz')
elif ii.eosz == 'o32' and env.mode == 16:
fo.add_code_eol('emit(r,0x66)')
elif (ii.eosz == 'o64' and env.mode == 64 and ii.default_64b == False) or ii.rexw_prefix == '1':
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'forced rexw on memop')
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
# the sole reg is reg0 whether it is first or 2nd operand...
if xmm:
fo.add_code_eol('enc_modrm_reg_xmm(r,{})'.format(var_reg0))
elif mmx:
fo.add_code_eol('enc_modrm_reg_mmx(r,{})'.format(var_reg0))
elif gpr32:
fo.add_code_eol('enc_modrm_reg_gpr32(r,{})'.format(var_reg0))
elif gpr64:
fo.add_code_eol('enc_modrm_reg_gpr64(r,{})'.format(var_reg0))
else:
die("NOT REACHED")
encode_mem_operand(env, ii, fo, use_index, dispsz)
finish_memop(env, ii, fo, dispsz, immw, rexw_forced, space='legacy')
add_enc_func(ii,fo)
def get_reg_width(op):
if op_gpr8(op):
return 'b'
elif op_gpr16(op):
return 'w'
elif op_gpr32(op):
return 'd'
elif op_gpr64(op):
return 'q'
die("NOT REACHED")
def create_legacy_one_gpr_reg_one_mem_fixed(env,ii):
"""REGb-GPRb or GPRb-REGb also GPR32-MEMd, GPR64-MEMq or MEMdq and MEMw+GPR16"""
global var_reg0, widths_to_bits
dispsz_list = get_dispsz_list(env)
width = None
for i,op in enumerate(_gen_opnds(ii)):
if op_reg(op):
regn = i
width = get_reg_width(op)
break
if width == None:
dump_fields(ii)
die("Bad search for width")
widths = [width]
opsig = make_opnd_signature(env,ii)
ispace = itertools.product(widths, get_index_vals(ii), dispsz_list)
for width, use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = "{}_{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_legacy_one_gpr_reg_one_mem_fixed")
fo.add_arg(arg_request,'req')
add_memop_args(env, ii, fo, use_index, dispsz, immw=0, reg=regn)
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
# the sole reg is reg0 whether it is first or 2nd operand...
fo.add_code_eol('enc_modrm_reg_gpr{}(r,{})'.format(widths_to_bits[width],
var_reg0))
encode_mem_operand(env, ii, fo, use_index, dispsz)
osz=64 if width=='q' else 0
rexw_forced = cond_emit_rexw(env, ii, fo, osz)
immw=False
finish_memop(env, ii, fo, dispsz, immw, rexw_forced, space='legacy')
add_enc_func(ii,fo)
def create_legacy_one_gpr_reg_one_mem_scalable(env,ii):
"""GPRv-MEMv, MEMv-GPRv, GPRy-MEMv, MEMv-GPRy w/optional imm8 or immz. This
will work with anything that has one scalable register operand
and another fixed or scalable memory operand. """
# The GPRy stuff is not working yet
global arg_reg0, var_reg0, widths_to_bits, widths_to_bits_y
dispsz_list = get_dispsz_list(env)
op = first_opnd(ii)
widths = ['w','d']
if env.mode == 64:
widths.append('q')
gpry=False
for op in _gen_opnds(ii):
if op_gpry(op):
gpry=True
fixed_reg = False
if ii.iclass == 'NOP' and ii.iform.startswith('NOP_MEMv_GPRv_0F1C'):
if ii.reg_required != 'unspecified':
fixed_reg = True
immw = 8 if ii.has_imm8 else 0
ispace = itertools.product(widths, get_index_vals(ii), dispsz_list)
for width, use_index, dispsz in ispace:
opsig = make_opnd_signature(env,ii, width)
opnd_types_org = get_opnd_types(env,ii, osz_translate(width))
opnd_types = copy.copy(opnd_types_org)
if ii.has_immz:
immw = 16 if (width == 16 or width == 'w') else 32
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = "{}_{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_legacy_one_gpr_reg_one_mem_scalable")
fo.add_arg(arg_request,'req')
for i,optype in enumerate(opnd_types_org):
if optype.startswith('gpr'):
if not fixed_reg:
fo.add_arg(arg_reg0, opnd_types.pop(0))
elif optype in ['mem', 'agen']:
add_memop_args(env, ii, fo, use_index, dispsz, immw=0, osz=osz_translate(width))
opnd_types.pop(0)
elif optype.startswith('int') or optype.startswith('imm'):
add_arg_immv(fo,immw)
opnd_types.pop(0) # imm8 is last so we technically can skip this pop
else:
die("UNHANDLED ARG {} in {}".format(optype, ii.iclass))
rexw_forced = False
if width == 'w' and env.mode != 16:
fo.add_code_eol('emit(r,0x66)')
elif width == 'd' and env.mode == 16:
fo.add_code_eol('emit(r,0x66)')
elif width == 'q' and ii.default_64b == False:
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'forced rexw on memop')
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
if ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required),
'reg opcode extension')
else:
d=widths_to_bits_y if gpry else widths_to_bits
fo.add_code_eol('enc_modrm_reg_gpr{}(r,{})'.format(d[width],
var_reg0))
encode_mem_operand(env, ii, fo, use_index, dispsz)
finish_memop(env, ii, fo, dispsz, immw, rexw_forced=rexw_forced, space='legacy')
add_enc_func(ii,fo)
def create_legacy_far_xfer_nonmem(env,ii): # WRK
'''call far and jmp far via ptr+imm. BRDISPz + IMMw'''
global var_immz_dct, argv_immz_dct,arg_immz_meta, var_imm16_2, arg_imm16_2
for osz in [16,32]:
fname = '{}_{}_o{}'.format(enc_fn_prefix,
ii.iclass.lower(),
osz)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment('created by create_legacy_far_xfer_nonmem')
fo.add_arg(arg_request,'req')
fo.add_arg(arg_immz_dct[osz],arg_immz_meta[osz])
fo.add_arg(arg_imm16_2,'int16')
if osz == 16 and env.mode != 16:
fo.add_code_eol('emit(r,0x66)')
elif osz == 32 and env.mode == 16:
fo.add_code_eol('emit(r,0x66)')
emit_required_legacy_prefixes(ii,fo)
emit_opcode(ii,fo)
emit_immz(fo,osz)
fo.add_code_eol('emit_i16(r,{})'.format(var_imm16_2))
add_enc_func(ii,fo)
def create_legacy_far_xfer_mem(env,ii):
'''call far and jmp far via memop. p has widths 4/6/6 bytes. p2 has 4/6/10 widths'''
p_widths = {16:4, 32:6, 64:6}
p2_widths = {16:4, 32:6, 64:10}
op = first_opnd(ii)
if op.oc2 == 'p2':
widths = p2_widths
elif op.oc2 == 'p':
widths = p_widths
else:
die("NOT REACHED")
osz_list = get_osz_list(env)
dispsz_list = get_dispsz_list(env)
ispace = itertools.product(osz_list, get_index_vals(ii), dispsz_list)
for osz, use_index, dispsz in ispace:
membytes = widths[osz]
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = '{}_{}_m{}_{}'.format(enc_fn_prefix,
ii.iclass.lower(),
membytes*8,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment('created by create_legacy_far_xfer_mem')
fo.add_arg(arg_request,'req')
add_memop_args(env, ii, fo, use_index, dispsz)
rexw_forced = False
if osz == 16 and env.mode != 16:
fo.add_code_eol('emit(r,0x66)')
elif osz == 32 and env.mode == 16:
fo.add_code_eol('emit(r,0x66)')
elif osz == 64 and ii.default_64b == False:
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'forced rexw on memop')
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
if ii.reg_required != 'unspecified':
if ii.reg_required != 0: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
encode_mem_operand(env, ii, fo, use_index, dispsz)
finish_memop(env, ii, fo, dispsz,
immw=0,
rexw_forced=rexw_forced,
space='legacy')
add_enc_func(ii,fo)
def osz_translate(width):
if width in ['w',16]:
return 16
elif width in ['d',32]:
return 32
elif width in ['q', 64]:
return 64
return 0
def create_legacy_one_mem_common(env,ii,imm=0):
"""Handles one memop, fixed or scalable."""
dispsz_list = get_dispsz_list(env)
op = first_opnd(ii)
if op.oc2 == 'v':
widths = [16,32]
if env.mode == 64:
widths.append(64)
elif op.oc2 == 'y':
widths = [32]
if env.mode == 64:
widths.append(64)
elif op.oc2 == 's':
widths = [16,32]
else:
widths = ['nominal'] # just something to get the loop going
immz_dict = { 16:16, 32:32, 64:32 }
for width in widths:
immw = 0
if imm == '8':
immw = 8
elif imm == 'z':
immw = immz_dict[width]
#fwidth = "_{}".format(width) if width not in ['b','w','d','q'] else ''
ispace = itertools.product(get_index_vals(ii), dispsz_list)
for use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
if width != 'nominal':
opsig = make_opnd_signature(env, ii, width)
else:
opsig = make_opnd_signature(env, ii)
fname = '{}_{}_{}_{}'.format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment('created by create_legacy_one_mem_common')
fo.add_arg(arg_request,'req')
add_memop_args(env, ii, fo, use_index, dispsz, immw, osz=osz_translate(width))
rexw_forced = False
if op.oc2 in [ 'y','v', 's']: # handle scalable ops
if width == 16 and env.mode != 16:
fo.add_code_eol('emit(r,0x66)')
elif width == 32 and env.mode == 16:
fo.add_code_eol('emit(r,0x66)')
elif width == 64 and ii.default_64b == False:
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'forced rexw on memop')
else: # fixed width ops
if ii.eosz == 'o16' and env.mode in [32,64]:
fo.add_code_eol('emit(r,0x66)', 'xx: fixed width with 16b osz')
elif ii.eosz == 'o32' and env.mode == 16:
fo.add_code_eol('emit(r,0x66)')
elif (ii.eosz == 'o64' and env.mode == 64 and ii.default_64b == False) or ii.rexw_prefix == '1':
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'forced rexw on memop')
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
if ii.reg_required != 'unspecified':
if ii.reg_required != 0: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
encode_mem_operand(env, ii, fo, use_index, dispsz)
finish_memop(env, ii, fo, dispsz, immw, rexw_forced, space='legacy')
add_enc_func(ii,fo)
def encode_mem_operand(env, ii, fo, use_index, dispsz):
global var_base, var_index, var_scale, memsig_idx_32or64, var_vsib_index_dct
# this may overwrite modrm.mod
memaddrsig = get_memsig(env.asz, use_index, dispsz)
if ii.avx_vsib:
memsig = memsig_idx_32or64[dispsz]
fo.add_code_eol('enc_avx_modrm_vsib_{}_{}_a{}(r,{},{},{})'.format(
ii.avx_vsib, memaddrsig, env.asz, var_base,
var_vsib_index_dct[ii.avx_vsib], var_scale))
elif ii.avx512_vsib:
memsig = memsig_idx_32or64[dispsz]
fo.add_code_eol('enc_avx512_modrm_vsib_{}_{}_a{}(r,{},{},{})'.format(
ii.avx512_vsib, memaddrsig, env.asz, var_base,
var_vsib_index_dct[ii.avx512_vsib], var_scale))
elif use_index:
if env.asz == 16: # no scale
fo.add_code_eol('enc_modrm_rm_mem_{}_a{}(r,{},{})'.format(
memaddrsig, env.asz, var_base, var_index))
else:
fo.add_code_eol('enc_modrm_rm_mem_{}_a{}(r,{},{},{})'.format(
memaddrsig, env.asz, var_base, var_index, var_scale))
else: # no index,scale
fo.add_code_eol('enc_modrm_rm_mem_{}_a{}(r,{})'.format(
memaddrsig, env.asz, var_base))
def finish_memop(env, ii, fo, dispsz, immw, rexw_forced=False, space='legacy'):
global var_disp8, var_disp16, var_disp32
if space == 'legacy':
emit_rex(env,fo,rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
elif space =='evex':
fo.add_code_eol('emit_evex(r)')
emit_opcode(ii,fo)
emit_modrm(fo)
if special_index_cases(ii):
fo.add_code_eol('emit_sib(r)', 'for vsib/sibmem')
else:
fo.add_code('if (get_has_sib(r))')
fo.add_code_eol(' emit_sib(r)')
if space == 'evex':
if dispsz == 0:
# if form has no displacment, then we sometimes have to
# add a zero displacement to create an allowed modrm/sib
# encoding.
emit_synthetic_disp(fo)
elif dispsz == 8:
fo.add_code_eol('emit_i8(r,{})'.format(var_disp8))
else:
emit_evex_disp(env,fo)
else:
if dispsz == 8:
fo.add_code_eol('emit_i8(r,{})'.format(var_disp8))
elif dispsz == 16:
fo.add_code_eol('emit_i16(r,{})'.format(var_disp16))
elif dispsz == 32:
fo.add_code_eol('emit_i32(r,{})'.format(var_disp32))
elif dispsz == 0 and env.asz != 16:
# if form has no displacment, then we sometimes have to
# add a zero displacement to create an allowed modrm/sib
# encoding.
emit_synthetic_disp(fo)
if immw:
emit_immv(fo,immw)
def emit_modrm(fo):
fo.add_code_eol('emit_modrm(r)')
def emit_sib(fo):
fo.add_code('if (get_has_sib(r))')
fo.add_code_eol(' emit_sib(r)')
def emit_synthetic_disp(fo):
fo.add_code('if (get_has_disp8(r))')
fo.add_code_eol(' emit_i8(r,0)')
fo.add_code('else if (get_has_disp32(r))')
fo.add_code_eol(' emit_i32(r,0)')
def add_evex_displacement_var(fo):
fo.add_code_eol('xed_int32_t use_displacement')
def chose_evex_scaled_disp(fo, ii, dispsz, broadcasting=False): # WIP
disp_fix = '16' if dispsz == 16 else ''
if ii.avx512_tuple == 'NO_SCALE':
memop_width_bytes = 1
elif broadcasting:
memop_width_bytes = ii.element_size // 8
else:
memop_width_bytes = ii.memop_width // 8
fo.add_code_eol('use_displacement = xed_chose_evex_scaled_disp{}(r, disp{}, {})'.format(
disp_fix,
dispsz,
memop_width_bytes))
def emit_evex_disp(env,fo):
fo.add_code('if (get_has_disp8(r))')
fo.add_code_eol(' emit_i8(r,use_displacement)')
if env.asz == 16:
fo.add_code('else if (get_has_disp16(r))')
fo.add_code_eol(' emit_i16(r,use_displacement)')
else:
fo.add_code('else if (get_has_disp32(r))')
fo.add_code_eol(' emit_i32(r,use_displacement)')
def mov_without_modrm(ii):
if ii.iclass == 'MOV' and not ii.has_modrm:
if 'UIMM' in ii.pattern: # avoid 0xB0/0xB8 related mov's
return False
return True
return False
def create_legacy_mov_without_modrm(env,ii):
'''This if for 0xA0...0xA3 MOVs without MODRM'''
global enc_fn_prefix, arg_request, arg_reg0, bits_to_widths
# in XED, MEMDISPv is a misnomer - the displacement size is
# modulated by the EASZ! The actual width of the memory reference
# is OSZ modulated (66, rex.w) normally.
byte_width = False
for op in _gen_opnds(ii):
if op.oc2 and op.oc2 == 'b':
byte_width = True
break
if byte_width:
osz_list = [8]
else:
osz_list = get_osz_list(env)
disp_width = env.asz
for osz in osz_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}_d{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
env.asz) # FIXME redundant with asz
fo = make_function_object(env,ii,fname,asz=env.asz)
fo.add_comment("created by create_legacy_mov_without_modrm")
fo.add_arg(arg_request,'req')
add_arg_disp(fo,disp_width)
# MEMDISPv is EASZ-modulated.
if disp_width == 16 and env.asz != 16:
emit_67_prefix(fo)
elif disp_width == 32 and env.asz != 32:
emit_67_prefix(fo)
rexw_forced = emit_legacy_osz(env,ii,fo,osz)
emit_rex(env, fo, rexw_forced)
emit_opcode(ii,fo)
emit_disp(fo,disp_width)
add_enc_func(ii,fo)
def is_enter_instr(ii):
return ii.iclass == 'ENTER' # imm0-w, imm1-b
def is_mov_seg(ii):
if ii.iclass in ['MOV']:
for op in _gen_opnds(ii):
if op_seg(op):
return True
return False
def is_mov_cr_dr(ii):
return ii.iclass in ['MOV_CR','MOV_DR']
def create_legacy_gprv_seg(env,ii,op_info):
global arg_reg_type, gprv_names
reg1 = 'seg'
osz_list = get_osz_list(env)
for osz in osz_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment('created by create_legacy_gprv_seg')
fo.add_arg(arg_request,'req')
reg0 = gprv_names[osz]
fo.add_arg(arg_reg_type + reg0,'gpr{}'.format(osz))
fo.add_arg(arg_reg_type + reg1,'seg')
emit_required_legacy_prefixes(ii,fo)
if not ii.osz_required:
if osz == 16 and env.mode != 16:
# add a 66 prefix outside of 16b mode, to create 16b osz
fo.add_code_eol('emit(r,0x66)')
if osz == 32 and env.mode == 16:
# add a 66 prefix outside inside 16b mode to create 32b osz
fo.add_code_eol('emit(r,0x66)')
if osz == 64:
fo.add_code_eol('set_rexw(r)')
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format('rm',reg0,reg0))
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format('reg',op_info[1],reg1))
if osz == 64:
fo.add_code_eol('emit_rex(r)')
elif env.mode == 64:
fo.add_code_eol('emit_rex_if_needed(r)')
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_mem_seg(env,ii,op_info):
'''order varies: MEM-SEG or SEG-MEM'''
global arg_reg_type
dispsz_list = get_dispsz_list(env)
opnd_sig = make_opnd_signature(env,ii)
ispace = itertools.product(get_index_vals(ii), dispsz_list)
for use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = '{}_{}_{}_{}'.format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment('created by create_legacy_mem_seg')
fo.add_arg(arg_request,'req')
for opi in op_info:
if opi == 'mem':
add_memop_args(env, ii, fo, use_index, dispsz)
elif opi == 'seg':
reg0 = 'seg'
fo.add_arg(arg_reg_type + reg0, 'seg')
else:
die("NOT REACHED")
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
fo.add_code_eol('enc_modrm_reg_seg(r,{})'.format(reg0))
encode_mem_operand(env, ii, fo, use_index, dispsz)
finish_memop(env, ii, fo, dispsz, immw=0, rexw_forced=False, space='legacy')
add_enc_func(ii,fo)
def create_mov_seg(env,ii):
'''mov-seg. operand order varies'''
op_info=[] # for encoding the modrm fields
mem = False
scalable=False
for i,op in enumerate(_gen_opnds(ii)):
if op_gprv(op):
op_info.append('gprv')
scalable=True
elif op_gpr16(op):
op_info.append('gpr16')
elif op_seg(op):
op_info.append('seg')
elif op_mem(op):
mem=True
op_info.append('mem')
if op_info == ['gprv','seg']: # gprv, seg -- scalable, special handling
create_legacy_gprv_seg(env,ii,op_info)
return
elif op_info == ['mem','seg']: # mem,seg
create_legacy_mem_seg(env,ii,op_info)
return
elif op_info == ['seg','mem']: # seg,mem
create_legacy_mem_seg(env,ii,op_info)
return
elif op_info == ['seg','gpr16']: # seg,gpr16
opsig = 'SR' # handled below
else:
die("Unhandled mov-seg case")
fname = "{}_{}_{}".format(enc_fn_prefix, ii.iclass.lower(),opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_mov_seg")
fo.add_arg(arg_request,'req')
fo.add_arg('xed_reg_enum_t ' + op_info[0], 'seg')
fo.add_arg('xed_reg_enum_t ' + op_info[1], 'gpr16')
if modrm_reg_first_operand(ii):
f1, f2, = 'reg','rm'
else:
f1, f2, = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format(f1, op_info[0], op_info[0]))
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format(f2, op_info[1], op_info[1]))
emit_required_legacy_prefixes(ii,fo)
if env.mode == 64:
fo.add_code_eol('emit_rex_if_needed(r)')
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_mov_cr_dr(env,ii):
'''mov-cr and mov-dr. operand order varies'''
op_info=[] # for encoding the modrm fields
for op in _gen_opnds(ii):
if op_gpr32(op):
op_info.append('gpr32')
elif op_gpr64(op):
op_info.append('gpr64')
elif op_cr(op):
op_info.append('cr')
elif op_dr(op):
op_info.append('dr')
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix, ii.iclass.lower(),opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_mov_cr_dr")
fo.add_arg(arg_request,'req')
fo.add_arg('xed_reg_enum_t ' + op_info[0], op_info[0])
fo.add_arg('xed_reg_enum_t ' + op_info[1], op_info[1])
if modrm_reg_first_operand(ii):
f1, f2, = 'reg','rm'
else:
f1, f2, = 'rm','reg'
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format(f1, op_info[0], op_info[0]))
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format(f2, op_info[1], op_info[1]))
emit_required_legacy_prefixes(ii,fo)
if env.mode == 64:
fo.add_code_eol('emit_rex_if_needed(r)')
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_enter(env,ii):
'''These are 3 unusual instructions: enter and AMD SSE4a extrq, insrq'''
global arg_imm16, var_imm16
global arg_imm8_2, var_imm8_2
fname = "{}_{}".format(enc_fn_prefix, ii.iclass.lower())
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_enter")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_imm16,'imm16')
fo.add_arg(arg_imm8_2,'imm8')
emit_required_legacy_prefixes(ii,fo)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
fo.add_code_eol('emit_u16(r,{})'.format(var_imm16))
fo.add_code_eol('emit(r,{})'.format(var_imm8_2))
add_enc_func(ii,fo)
def is_mov_crc32(ii):
return ii.iclass == 'CRC32'
def is_lsl_regreg(ii):
if ii.iclass == 'LSL':
if not has_memop(ii):
return True
return False
def has_memop(ii):
for op in _gen_opnds(ii):
if op_mem(op):
return True
return False
def get_opnds(ii):
opnds = []
for op in _gen_opnds(ii):
opnds.append(op)
return opnds
def compute_widths_crc32(env,ii):
'''return a dict by osz of {op1-width,op2-width}. Also for LSL '''
opnd_types = get_opnd_types_short(ii)
if env.mode == 16:
if opnd_types == ['y','v']:
return { 16:{32,16}, 32:{32,32} }
elif opnd_types == ['y','b']:
return { 16:{32,8} }
elif opnd_types == ['v','z']:
return { 16:{16,16}, 32:{32,32} }
elif env.mode == 32:
if opnd_types == ['y','v']:
return { 16: {32,16}, 32:{32,32} }
elif opnd_types == ['y','b']:
return { 32:{32,8} }
elif opnd_types == ['v','z']:
return { 16:{16,16}, 32:{32,32} }
elif env.mode == 64:
if opnd_types == ['y','v']:
return { 16: {32,16}, 32:{32,32}, 64:{64,64} }
elif opnd_types == ['y','b']:
return { 32:{32,8}, 64:{64,8} }
elif opnd_types == ['v','z']:
return { 16:{16,16}, 32:{32,32}, 64:{64,32} }
die("not reached")
def create_legacy_crc32_mem(env,ii):
'''GPRy+MEMv or GPRy+MEMb'''
global gpry_names, arg_request, enc_fn_prefix
config = compute_widths_crc32(env,ii)
osz_list = list(config.keys())
dispsz_list = get_dispsz_list(env)
opnd_types = get_opnd_types_short(ii)
ispace = itertools.product(osz_list, get_index_vals(ii), dispsz_list)
for osz, use_index, dispsz in ispace:
#op_widths = config[osz]
opsig = make_opnd_signature(env,ii,osz)
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = '{}_{}_{}_{}'.format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
memaddrsig)
fo = make_function_object(env,ii,fname,asz=env.asz)
fo.add_comment("created by create_legacy_crc32_mem")
fo.add_arg(arg_request,'req')
op = first_opnd(ii)
if op.oc2 == 'y':
reg = gpry_names[osz]
fo.add_arg(arg_reg_type + reg, reg)
else:
die("NOT REACHED")
add_memop_args(env, ii, fo, use_index, dispsz, osz=osz)
rexw_forced = emit_legacy_osz(env,ii,fo,osz)
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(reg, reg))
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
encode_mem_operand(env, ii, fo, use_index, dispsz)
immw=0
finish_memop(env, ii, fo, dispsz, immw, rexw_forced, space='legacy')
add_enc_func(ii,fo)
def cond_emit_rexw(env,ii,fo,osz):
rexw_forced = False
if env.mode == 64:
if ii.rexw_prefix == '1':
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'required by instr')
elif osz == 64 and not ii.default_64b:
rexw_forced = True
fo.add_code_eol('set_rexw(r)', 'required by osz=64')
return rexw_forced
def emit_legacy_osz(env,ii,fo,osz):
if env.mode in [32,64] and osz == 16:
fo.add_code_eol('emit(r,0x66)','to set osz=16')
elif env.mode == 16 and osz == 32:
fo.add_code_eol('emit(r,0x66)','to set osz=32')
rexw_forced = cond_emit_rexw(env,ii,fo,osz)
return rexw_forced
def create_legacy_crc32_reg(env,ii):
'''CRC32-reg (GPRy-GPR{v,b}) and LSL (GPRv+GPRz)'''
global gprv_names, gpry_names, gprz_names
config = compute_widths_crc32(env,ii)
osz_list = list(config.keys())
opnd_types = get_opnd_types_short(ii)
for osz in osz_list:
opsig = make_opnd_signature(env,ii,osz)
fname = "{}_{}_{}".format(enc_fn_prefix, ii.iclass.lower(), opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_legacy_crc32_reg")
fo.add_arg(arg_request,'req')
reg_types_names =[]
for i,otype in enumerate(opnd_types):
if otype == 'y':
reg = gpry_names[osz]
elif otype == 'z':
reg = gprz_names[osz]
elif otype == 'b':
reg = 'gpr8'
elif otype == 'v':
reg = gprv_names[osz]
arg_name = '{}_{}'.format(reg,i)
fo.add_arg(arg_reg_type + arg_name, reg)
reg_types_names.append((reg,arg_name))
if modrm_reg_first_operand(ii):
modrm_order = ['reg','rm']
else:
modrm_order = ['rm','reg']
rexw_forced = emit_legacy_osz(env,ii,fo,osz)
for i,(reg,arg) in enumerate(reg_types_names):
fo.add_code_eol('enc_modrm_{}_{}(r,{})'.format(modrm_order[i], reg, arg))
emit_required_legacy_prefixes(ii,fo)
emit_rex(env, fo, rexw_forced)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_legacy_crc32(env,ii):
'''CRC32 is really strange. First operand is GPRy. Second operand is GPRv or GPR8 or MEMv or MEMb
and bizarrely also LSL gprv+gprz'''
if has_memop(ii):
create_legacy_crc32_mem(env,ii)
else:
create_legacy_crc32_reg(env,ii)
def is_movdir64_or_enqcmd(ii):
return ii.iclass in [ 'MOVDIR64B', 'ENQCMD', 'ENQCMDS']
def create_legacy_movdir64_or_enqcmd(env,ii):
'''MOVDIR64B and ENQCMD* are a little unusual. They have 2 memops, one
in an address-space-sized A_GPR_R and the other a normal
memop.'''
global arg_request, enc_fn_prefix, gprv_names
ispace = itertools.product( get_index_vals(ii), get_dispsz_list(env))
for use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = '{}_{}_{}'.format(enc_fn_prefix,
ii.iclass.lower(),
memaddrsig)
fo = make_function_object(env,ii,fname,asz=env.asz)
fo.add_comment("created by create_legacy_movdir64")
fo.add_arg(arg_request,'req')
reg = gpry_names[env.asz] # abuse the gprv names
fo.add_arg(arg_reg_type + reg, reg)
add_memop_args(env, ii, fo, use_index, dispsz)
# This operation is address-size modulated In 64b mode, 64b
# addressing is the default. For non default 32b addressing in
# 64b mode, we need a 67 prefix.
if env.mode == 64 and env.asz == 32:
emit_67_prefix(fo)
# FIXME: REWORD COMMENT In 32b mode, we usually, but not always have
# 32b addressing. It is perfectly legit to have 32b mode with
# 16b addressing in which case a 67 is not needed. Same (other
# way around) for 16b mode. So we really do not need the 67
# prefix ever outside of 64b mode as users are expected to use
# the appropriate library for their addressing mode.
#
#elif env.mode == 32 and env.asz == 16:
# emit_67_prefix(fo)
#elif env.mode == 16 and asz == 32:
# emit_67_prefix(fo)
rexw_forced = False
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(reg, reg))
emit_required_legacy_prefixes(ii,fo)
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(mod))
encode_mem_operand(env, ii, fo, use_index, dispsz)
immw=0
finish_memop(env, ii, fo, dispsz, immw, rexw_forced, space='legacy')
add_enc_func(ii,fo)
def is_umonitor(ii):
return ii.iclass == 'UMONITOR'
def create_legacy_umonitor(env,ii):
'''ASZ-based GPR_B.'''
global arg_request, enc_fn_prefix, gprv_names
fname = '{}_{}'.format(enc_fn_prefix,
ii.iclass.lower())
fo = make_function_object(env,ii,fname,asz=env.asz)
fo.add_comment("created by create_legacy_umonitor")
fo.add_arg(arg_request,'req')
reg = gpry_names[env.asz] # abuse the gprv names
fo.add_arg(arg_reg_type + reg, reg)
# This operation is address-size modulated In 64b mode, 64b
# addressing is the default. For non default 32b addressing in
# 64b mode, we need a 67 prefix.
if env.mode == 64 and env.asz == 32:
emit_67_prefix(fo)
# FIXME: REWORD COMMENT In 32b mode, we usually, but not always
# have 32b addressing. It is perfectly legit to have 32b mode
# with 16b addressing in which case a 67 is not needed. Same
# (other way around) for 16b mode. So we really do not need the 67
# prefix ever outside of 64b mode as users are expected to use the
# appropriate library for their addressing mode.
#
#elif env.mode == 32 and env.asz == 16:
# emit_67_prefix(fo)
#elif env.mode == 16 and asz == 32:
# emit_67_prefix(fo)
fo.add_code_eol('enc_modrm_rm_{}(r,{})'.format(reg, reg))
if ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required),
'reg opcode extension')
if ii.mod_required != 'unspecified':
if ii.mod_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(ii.mod_required))
emit_required_legacy_prefixes(ii,fo)
emit_rex(env,fo,rex_forced=False)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def is_ArAX_implicit(ii): # allows one implicit fixed reg
a,implicit_fixed=0,0
for op in _gen_opnds(ii):
if op_luf_start(op,'ArAX'):
a += 1
elif op_reg(op) and op_implicit_specific_reg(op):
implicit_fixed += 1
else:
return False
return a==1 and implicit_fixed <= 1
def create_legacy_ArAX_implicit(env,ii):
global arg_request, enc_fn_prefix
fname = '{}_{}'.format(enc_fn_prefix,
ii.iclass.lower())
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_legacy_ArAX_implicit")
fo.add_arg(arg_request,'req')
# This operation is address-size modulated In 64b mode, 64b
# addressing is the default. For non default 32b addressing in
# 64b mode, we need a 67 prefix.
if env.mode == 64 and env.asz == 32:
emit_67_prefix(fo)
# FIXME: REWORD COMMENT In 32b mode, we usually, but not always
# have 32b addressing. It is perfectly legit to have 32b mode
# with 16b addressing in which case a 67 is not needed. Same
# (other way around) for 16b mode. So we really do not need the 67
# prefix ever outside of 64b mode as users are expected to use the
# appropriate library for their addressing mode.
#
#elif env.mode == 32 and env.asz == 16:
# emit_67_prefix(fo)
#elif env.mode == 16 and asz == 32:
# emit_67_prefix(fo)
if ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required),
'reg opcode extension')
if ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required),
'rm opcode extension')
if ii.mod_required != 'unspecified':
if ii.mod_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_mod(r,{})'.format(ii.mod_required))
emit_required_legacy_prefixes(ii,fo)
#emit_rex(env,fo,rex_forced=False)
emit_required_legacy_map_escapes(ii,fo)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def _enc_legacy(env,ii):
if is_ArAX_implicit(ii): # must be before one_nonmem_operand and zero_operands
create_legacy_ArAX_implicit(env,ii)
elif is_umonitor(ii): # must be before one_nonmem_operand and zero_oprands
create_legacy_umonitor(env,ii)
elif zero_operands(ii):# allows all-implicit too
create_legacy_zero_operands(env,ii)
elif one_implicit_gpr_imm8(ii):
create_legacy_one_implicit_reg(env,ii,imm8=True)
elif mov_without_modrm(ii): # A0...A3, not B0,B8
create_legacy_mov_without_modrm(env,ii)
elif one_gpr_reg_one_mem_zp(ii):
create_legacy_one_gpr_reg_one_mem_scalable(env,ii)
elif one_gpr_reg_one_mem_scalable(ii):
create_legacy_one_gpr_reg_one_mem_scalable(env,ii)
elif one_scalable_gpr_and_one_mem(ii): # mem fixed or scalable, optional imm8,immz
create_legacy_one_gpr_reg_one_mem_scalable(env,ii) # GPRyor GPRv with MEMv
elif one_gpr_reg_one_mem_fixed(ii):
create_legacy_one_gpr_reg_one_mem_fixed(env,ii)
elif two_gpr8_regs(ii):
create_legacy_two_gpr8_regs(env,ii)
elif two_scalable_regs(ii): # allow optional imm8,immz
create_legacy_two_scalable_regs(env,ii,[16,32,64])
elif two_gpr_one_scalable_one_fixed(ii):
create_legacy_two_gpr_one_scalable_one_fixed(env,ii)
elif two_fixed_gprs(ii):
create_legacy_two_fixed_gprs(env,ii)
elif one_xmm_reg_imm8(ii): # also SSE4 2-imm8 instr
create_legacy_one_xmm_reg_imm8(env,ii)
elif one_mmx_reg_imm8(ii):
create_legacy_one_mmx_reg_imm8(env,ii)
elif two_fixed_regs_opti8(ii):
create_legacy_two_fixed_regs_opti8(env,ii)
elif one_x87_reg(ii):
create_legacy_one_x87_reg(env,ii)
elif two_x87_reg(ii): # one implicit
create_legacy_two_x87_reg(env,ii)
elif one_x87_implicit_reg_one_memop(ii):
create_legacy_one_mem_common(env,ii,imm=0)
elif one_gprv_one_implicit(ii):
create_legacy_one_scalable_gpr(env, ii, [16,32,64], 'v')
elif one_gpr8_one_implicit(ii):
create_legacy_one_scalable_gpr(env, ii, [8], '8')
elif one_nonmem_operand(ii):
create_legacy_one_nonmem_opnd(env,ii) # branches out
elif gpr8_imm8(ii):
create_legacy_gpr_imm8(env,ii,[8])
elif gprv_imm8(ii):
create_legacy_gpr_imm8(env,ii,[16,32,64])
elif gprv_immz(ii):
create_legacy_gprv_immz(env,ii)
elif gprv_immv(ii):
create_legacy_gprv_immv(env,ii,imm=True)
elif gprv_implicit_orax(ii):
create_legacy_gprv_immv(env,ii,imm=False)
elif orax_immz(ii):
create_legacy_orax_immz(env,ii)
elif is_far_xfer_nonmem(ii):
create_legacy_far_xfer_nonmem(env,ii)
elif is_far_xfer_mem(ii):
create_legacy_far_xfer_mem(env,ii)
elif one_mem_common(ii): # b,w,d,q,dq, v,y
create_legacy_one_mem_common(env,ii,imm=0)
elif one_mem_common_one_implicit_gpr(ii): # b,w,d,q,dq, v,y
create_legacy_one_mem_common(env,ii,imm=0)
elif one_mem_fixed_imm8(ii):
create_legacy_one_mem_common(env,ii,imm='8')
elif one_mem_fixed_immz(ii):
create_legacy_one_mem_common(env,ii,imm='z')
elif one_xmm_reg_one_mem_fixed_opti8(ii): # allows gpr32, gpr64, mmx too
create_legacy_one_xmm_reg_one_mem_fixed(env,ii)
elif is_enter_instr(ii):
create_legacy_enter(env,ii)
elif is_mov_cr_dr(ii):
create_mov_cr_dr(env,ii)
elif is_mov_seg(ii):
create_mov_seg(env,ii)
elif is_mov_crc32(ii) or is_lsl_regreg(ii):
create_legacy_crc32(env,ii)
elif is_movdir64_or_enqcmd(ii):
create_legacy_movdir64_or_enqcmd(env,ii)
def several_xymm_gpr_imm8(ii): # optional imm8
i,x,y,d,q = 0,0,0,0,0
for op in _gen_opnds(ii):
if op_reg(op) and op_xmm(op):
x += 1
elif op_reg(op) and op_ymm(op):
y += 1
elif op_gpr32(op) or op_vgpr32(op):
d += 1
elif op_gpr64(op) or op_vgpr64(op):
q += 1
elif op_imm8(op):
i += 1
else:
return False
simd = x + y
gpr = d + q
if simd == 4 and gpr == 0:
return True
sum = simd + gpr
return simd <= 3 and gpr <= 3 and i<=1 and sum<=3 and sum>0
def several_xymm_gpr_mem_imm8(ii): # optional imm8
m,i,x,y,d,q,k = 0,0,0,0,0,0,0
for op in _gen_opnds(ii):
if op_mem(op):
m += 1
elif op_mask_reg(op):
k += 1
elif op_reg(op) and op_xmm(op):
x += 1
elif op_reg(op) and op_ymm(op):
y += 1
elif op_gpr32(op) or op_vgpr32(op):
d += 1
elif op_gpr64(op) or op_vgpr64(op):
q += 1
elif op_imm8(op):
i += 1
else:
return False
simd = x + y
gpr = d + q
if m==1 and simd == 4 and gpr == 0:
return True
sum = simd + gpr + k
return m==1 and simd <= 3 and gpr <= 3 and k <= 1 and i<=1 and sum<=3 and (sum>0 or m==1)
def two_ymm_and_mem(ii):
m,n = 0,0
for op in _gen_opnds(ii):
if op_reg(op) and op_ymm(op):
n += 1
elif op_mem(op):
m += 1
else:
return False
return n==2 and m==1
def set_vex_pp(ii,fo):
# XED encodes VEX_PREFIX=2,3 values for F2,F3 so they need to be recoded before emitting.
translate_pp_values = { 0:0, 1:1, 2:3, 3:2 }
vex_prefix = re.compile(r'VEX_PREFIX=(?P<prefix>[0123])')
m = vex_prefix.search(ii.pattern)
if m:
ppval = int(m.group('prefix'))
real_pp = translate_pp_values[ppval]
if real_pp:
fo.add_code_eol('set_vexpp(r,{})'.format(real_pp))
else:
die("Could not find the VEX.PP pattern")
def largest_vl_vex(ii): # and evex
vl = 0
for op in _gen_opnds(ii):
if op_xmm(op):
vl = vl | 1
elif op_ymm(op):
vl = vl | 2
elif op_zmm(op):
vl = vl | 4
if vl >= 4:
return 'zmm'
elif vl >= 2:
return 'ymm'
return 'xmm'
def get_type_size(op):
a = re.sub(r'_.*','',op.lookupfn_name)
a = re.sub('MASK','kreg',a)
return re.sub(r'^[Vv]','',a).lower()
def count_operands(ii): # skip imm8
x = 0
for op in _gen_opnds(ii):
if op_imm8(op):
continue
x += 1
return x
def create_vex_simd_reg(env,ii):
"""Handle 2/3/4 xymm or gprs regs and optional imm8. This is coded to
allow different type and size for each operand. Different
x/ymm show up on converts. Also handles 2-imm8 SSE4a instr. """
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_reg2, var_reg2
global arg_reg3, var_reg3
nopnds = count_operands(ii) # not imm8
opnd_sig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_vex_simd_reg opnd_sig={} nopnds={}".format(opnd_sig,nopnds))
fo.add_arg(arg_request,'req')
opnd_types = get_opnd_types(env,ii)
fo.add_arg(arg_reg0,opnd_types[0])
if nopnds >= 2:
fo.add_arg(arg_reg1, opnd_types[1])
if nopnds >= 3:
fo.add_arg(arg_reg2, opnd_types[2])
if nopnds >= 4:
fo.add_arg(arg_reg3, opnd_types[3])
cond_add_imm_args(ii,fo)
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
fo.add_code_eol('set_mod(r,3)')
vars = [var_reg0, var_reg1, var_reg2, var_reg3]
var_r, var_b, var_n, var_se = None, None, None, None
for i,op in enumerate(_gen_opnds(ii)):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
var_r,sz_r = vars[i], get_type_size(op)
elif op.lookupfn_name.endswith('_B'):
var_b,sz_b = vars[i], get_type_size(op)
elif op.lookupfn_name.endswith('_N'):
var_n,sz_n = vars[i], get_type_size(op)
elif op.lookupfn_name.endswith('_SE'):
var_se,sz_se = vars[i],get_type_size(op)
else:
die("SHOULD NOT REACH HERE")
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if var_n:
fo.add_code_eol('enc_vvvv_reg_{}(r,{})'.format(sz_n, var_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
if var_r:
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(sz_r, var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_modrm_rm_{}(r,{})'.format(sz_b, var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
if var_se:
fo.add_code_eol('enc_imm8_reg_{}(r,{})'.format(sz_se, var_se))
emit_vex_prefix(env, ii,fo,register_only=True)
emit_opcode(ii,fo)
emit_modrm(fo)
if ii.has_imm8:
cond_emit_imm8(ii,fo)
elif var_se:
fo.add_code_eol('emit_se_imm8_reg(r)')
add_enc_func(ii,fo)
def find_mempos(ii):
for i,op in enumerate(_gen_opnds(ii)):
if op_mem(op):
return i
die("NOT REACHED")
def create_vex_regs_mem(env,ii):
"""0, 1, 2 or 3 xmm/ymm/gpr32/gpr64/kreg and 1 memory operand. allows imm8 optionally"""
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_reg2, var_reg2
global arg_reg3, var_reg3
global arg_imm8
nopnds = count_operands(ii) # skips imm8
op = first_opnd(ii)
width = op.oc2
opsig = make_opnd_signature(env,ii)
vlname = 'ymm' if ii.vl == '256' else 'xmm'
immw=0
if ii.has_imm8:
immw=8
dispsz_list = get_dispsz_list(env)
opnd_types_org = get_opnd_types(env,ii)
arg_regs = [ arg_reg0, arg_reg1, arg_reg2, arg_reg3 ]
var_regs = [ var_reg0, var_reg1, var_reg2, var_reg3 ]
ispace = itertools.product(get_index_vals(ii), dispsz_list)
for use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = "{}_{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_vex_regs_mem")
fo.add_arg(arg_request,'req')
opnd_types = copy.copy(opnd_types_org)
regn = 0
for i,optype in enumerate(opnd_types_org):
if optype in ['xmm','ymm','zmm', 'gpr32', 'gpr64', 'kreg']:
fo.add_arg(arg_regs[regn], opnd_types.pop(0))
regn += 1
elif optype in ['mem']:
add_memop_args(env, ii, fo, use_index, dispsz, immw=0)
opnd_types.pop(0)
elif optype in 'int8':
fo.add_arg(arg_imm8,'int8')
opnd_types.pop(0) # imm8 is last so we technically can skip this pop
else:
die("UNHANDLED ARG {} in {}".format(optype, ii.iclass))
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # Not setting VL=128 since that is ZERO OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
# FIXME REFACTOR function-ize this
var_r, var_b, var_n, var_se = None,None,None,None
sz_r, sz_b, sz_n, sz_se = None,None,None,None
for i,op in enumerate(_gen_opnds_nomem(ii)): # use no mem version to skip memop if a store-type op
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
var_r,sz_r = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_B'):
var_b,sz_b = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_SE'):
var_se,sz_se = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_N'):
var_n,sz_n = var_regs[i],get_type_size(op)
else:
die("SHOULD NOT REACH HERE")
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if var_n == None:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
else:
fo.add_code_eol('enc_vvvv_reg_{}(r,{})'.format(sz_n, var_n))
if var_r:
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(sz_r, var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_modrm_rm_{}(r,{})'.format(sz_b, var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
if var_se:
if immw:
immw=0
fo.add_code_eol('enc_imm8_reg_{}_and_imm(r,{},{})'.format(sz_se, var_se, var_imm8))
else:
fo.add_code_eol('enc_imm8_reg_{}(r,{})'.format(sz_se, var_se))
encode_mem_operand(env, ii, fo, use_index, dispsz)
emit_vex_prefix(env, ii,fo,register_only=False)
finish_memop(env, ii, fo, dispsz, immw, space='vex')
if var_se:
fo.add_code_eol('emit_se_imm8_reg(r)')
add_enc_func(ii,fo)
def create_vex_one_mask_reg_and_one_gpr(env,ii):
# FIXME: REFACTOR NOTE: could combine with create_vex_all_mask_reg
# if we handle 3 reg args and optional imm8.
global arg_reg0, arg_reg1, var_reg0, var_reg1
opsig = make_opnd_signature(env,ii)
opnd_types = get_opnd_types(env,ii)
arg_regs = [ arg_reg0, arg_reg1 ]
var_regs = [ var_reg0, var_reg1 ]
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_vex_one_mask_reg_and_one_gpr")
fo.add_arg(arg_request,'req')
for i,op in enumerate(opnd_types):
fo.add_arg(arg_regs[i], opnd_types[i])
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
var_r, var_b, var_n = None,None,None
for i,op in enumerate(_gen_opnds(ii)):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
var_r,sz_r = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_B'):
var_b,sz_b = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_N'):
var_n,sz_n = var_regs[i],get_type_size(op)
else:
die("SHOULD NOT REACH HERE")
fo.add_code_eol('set_mod(r,3)')
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if var_n:
fo.add_code_eol('enc_vvvv_reg_{}(r,{})'.format(sz_n, var_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
if var_r:
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(sz_r, var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_modrm_rm_{}(r,{})'.format(sz_b, var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
# FIXME: if kreg in MODRM.RM, we know we don't need to check rex.b
# before picking c4/c5. MINOR PERF OPTIMIZATION
emit_vex_prefix(env, ii,fo,register_only=True)
emit_opcode(ii,fo)
emit_modrm(fo)
add_enc_func(ii,fo)
def create_vex_all_mask_reg(env,ii):
'''Allows optional imm8'''
global enc_fn_prefix, arg_request
global arg_kreg0, var_kreg0
global arg_kreg1, var_kreg1
global arg_kreg2, var_kreg2
opsig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_vex_all_mask_reg")
fo.add_arg(arg_request,'req')
fo.add_arg(arg_kreg0,'kreg')
if 'k_k' in opsig:
fo.add_arg(arg_kreg1,'kreg')
if 'k_k_k' in opsig:
fo.add_arg(arg_kreg2,'kreg')
if ii.has_imm8:
add_arg_immv(fo,8)
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # Not setting VL=128 since that is ZERO OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
vars = [var_kreg0, var_kreg1, var_kreg2]
var_r,var_b,var_n=None,None,None
for i,op in enumerate(_gen_opnds(ii)):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
var_r = vars[i]
elif op.lookupfn_name.endswith('_B'):
var_b = vars[i]
elif op.lookupfn_name.endswith('_N'):
var_n = vars[i]
else:
die("SHOULD NOT REACH HERE")
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if var_n:
fo.add_code_eol('enc_vex_vvvv_kreg(r,{})'.format(var_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
if var_r:
fo.add_code_eol('enc_modrm_reg_kreg(r,{})'.format(var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_modrm_rm_kreg(r,{})'.format(var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
fo.add_code_eol('set_mod(r,3)')
emit_vex_prefix(env, ii,fo,register_only=True)
emit_opcode(ii,fo)
emit_modrm(fo)
if ii.has_imm8:
cond_emit_imm8(ii,fo)
add_enc_func(ii,fo)
def vex_amx_mem(ii):
if 'AMX' in ii.isa_set:
for op in _gen_opnds(ii):
if op_mem(op):
return True
return False
def vex_amx_reg(ii):
if 'AMX' in ii.isa_set:
for op in _gen_opnds(ii):
if op_mem(op):
return False
return True
return False
def create_vex_amx_reg(env,ii): # FIXME: XXX
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_reg2, var_reg2
global arg_reg3, var_reg3
nopnds = count_operands(ii) # not imm8
opnd_sig = make_opnd_signature(env,ii)
fname = "{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig)
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_vex_amx_reg opnd_sig={} nopnds={}".format(opnd_sig,nopnds))
fo.add_arg(arg_request,'req')
opnd_types = get_opnd_types(env,ii)
if nopnds >= 1:
fo.add_arg(arg_reg0,opnd_types[0])
if nopnds >= 2:
fo.add_arg(arg_reg1, opnd_types[1])
if nopnds >= 3:
fo.add_arg(arg_reg2, opnd_types[2])
if nopnds >= 4:
fo.add_arg(arg_reg3, opnd_types[3])
cond_add_imm_args(ii,fo)
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
fo.add_code_eol('set_mod(r,3)')
vars = [var_reg0, var_reg1, var_reg2, var_reg3]
var_r, var_b, var_n, var_se = None, None, None, None
for i,op in enumerate(_gen_opnds(ii)):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
var_r,sz_r = vars[i], get_type_size(op)
elif op.lookupfn_name.endswith('_B'):
var_b,sz_b = vars[i], get_type_size(op)
elif op.lookupfn_name.endswith('_N'):
var_n,sz_n = vars[i], get_type_size(op)
else:
die("SHOULD NOT REACH HERE")
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if var_n:
fo.add_code_eol('enc_vvvv_reg_{}(r,{})'.format(sz_n, var_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
if var_r:
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(sz_r, var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_modrm_rm_{}(r,{})'.format(sz_b, var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
emit_vex_prefix(env, ii,fo,register_only=True)
emit_opcode(ii,fo)
emit_modrm(fo)
if ii.has_imm8:
cond_emit_imm8(ii,fo)
elif var_se:
fo.add_code_eol('emit_se_imm8_reg(r)')
add_enc_func(ii,fo)
def create_vex_amx_mem(env,ii): # FIXME: XXX
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_reg2, var_reg2
global arg_reg3, var_reg3
global arg_imm8
nopnds = count_operands(ii) # skips imm8
op = first_opnd(ii)
width = op.oc2
opsig = make_opnd_signature(env,ii)
immw=0
if ii.has_imm8:
immw=8
dispsz_list = get_dispsz_list(env)
opnd_types_org = get_opnd_types(env,ii)
arg_regs = [ arg_reg0, arg_reg1, arg_reg2, arg_reg3 ]
var_regs = [ var_reg0, var_reg1, var_reg2, var_reg3 ]
ispace = itertools.product(get_index_vals(ii), dispsz_list)
for use_index, dispsz in ispace:
memaddrsig = get_memsig(env.asz, use_index, dispsz)
fname = "{}_{}_{}_{}".format(enc_fn_prefix,
ii.iclass.lower(),
opsig,
memaddrsig)
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_vex_amx_mem")
fo.add_arg(arg_request,'req')
opnd_types = copy.copy(opnd_types_org)
regn = 0
for i,optype in enumerate(opnd_types_org):
if optype in ['tmm','xmm','ymm','zmm', 'gpr32', 'gpr64', 'kreg']:
fo.add_arg(arg_regs[regn], opnd_types.pop(0))
regn += 1
elif optype in ['mem']:
add_memop_args(env, ii, fo, use_index, dispsz, immw=0)
opnd_types.pop(0)
elif optype in 'int8':
fo.add_arg(arg_imm8,'int8')
opnd_types.pop(0) # imm8 is last so we technically can skip this pop
else:
die("UNHANDLED ARG {} in {}".format(optype, ii.iclass))
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # Not setting VL=128 since that is ZERO OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
# FIXME REFACTOR function-ize this
var_r, var_b, var_n, var_se = None,None,None,None
sz_r, sz_b, sz_n, sz_se = None,None,None,None
for i,op in enumerate(_gen_opnds_nomem(ii)): # use no mem version to skip memop if a store-type op
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R'):
var_r,sz_r = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_B'):
var_b,sz_b = var_regs[i],get_type_size(op)
elif op.lookupfn_name.endswith('_N'):
var_n,sz_n = var_regs[i],get_type_size(op)
else:
die("SHOULD NOT REACH HERE")
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if var_n == None:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
else:
fo.add_code_eol('enc_vvvv_reg_{}(r,{})'.format(sz_n, var_n))
if var_r:
fo.add_code_eol('enc_modrm_reg_{}(r,{})'.format(sz_r, var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_modrm_rm_{}(r,{})'.format(sz_b, var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
encode_mem_operand(env, ii, fo, use_index, dispsz)
emit_vex_prefix(env, ii,fo,register_only=False)
finish_memop(env, ii, fo, dispsz, immw, space='vex')
add_enc_func(ii,fo)
def _enc_vex(env,ii):
if several_xymm_gpr_imm8(ii):
create_vex_simd_reg(env,ii)
elif several_xymm_gpr_mem_imm8(ii): # very generic
create_vex_regs_mem(env,ii)
elif vex_all_mask_reg(ii): # allows imm8
create_vex_all_mask_reg(env,ii)
elif vex_one_mask_reg_and_one_gpr(ii):
create_vex_one_mask_reg_and_one_gpr(env,ii)
elif vex_vzero(ii):
create_vex_vzero(env,ii)
elif vex_amx_reg(ii):
create_vex_amx_reg(env,ii)
elif vex_amx_mem(ii):
create_vex_amx_mem(env,ii)
def vex_vzero(ii):
return ii.iclass.startswith('VZERO')
def create_vex_vzero(env,ii):
fname = "{}_{}".format(enc_fn_prefix,
ii.iclass.lower())
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_vex_vzero")
fo.add_arg(arg_request,'req')
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
if ii.vl == '256': # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_vexl(r,1)')
if ii.rexw_prefix == '1': # could skip this because we know...
fo.add_code_eol('set_rexw(r)')
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
emit_vex_prefix(env, ii,fo,register_only=True) # could force C5 since we know...
emit_opcode(ii,fo) # no modrm on vzero* ... only exception in VEX space.
add_enc_func(ii,fo)
def vex_all_mask_reg(ii): # allow imm8
i,k = 0,0
for op in _gen_opnds(ii):
if op_mask_reg(op):
k += 1
elif op_imm8(op):
i += 1
else:
return False
return k>=2 and i<=1
def vex_one_mask_reg_and_one_gpr(ii):
g,k = 0,0
for op in _gen_opnds(ii):
if op_mask_reg(op):
k += 1
elif op_gpr32(op) or op_gpr64(op):
g += 1
else:
return False
return k == 1 and g == 1
def evex_xyzmm_and_gpr(ii):
i,d,q,x,y,z=0,0,0,0,0,0
for op in _gen_opnds(ii):
if op_xmm(op):
x += 1
elif op_ymm(op):
y += 1
elif op_zmm(op):
z +=1
elif op_imm8(op):
i += 1
elif op_gpr32(op):
d += 1
elif op_gpr64(op):
q += 1
else:
return False
simd = x + y + z
gprs = d + q
return gprs == 1 and simd > 0 and simd < 3 and i <= 1
def evex_2or3xyzmm(ii): # allows for mixing widths of registers
x,y,z=0,0,0
for op in _gen_opnds(ii):
if op_xmm(op):
x = x + 1
elif op_ymm(op):
y = y + 1
elif op_zmm(op):
z = z + 1
elif op_imm8(op):
continue
else:
return False
sum = x + y + z
return sum == 2 or sum == 3
def evex_regs_mem(ii): #allow imm8 and kreg, gpr
d,q, k,i,x, y,z,m = 0,0, 0,0,0, 0,0,0
for op in _gen_opnds(ii):
if op_mask_reg(op):
k += 1
elif op_xmm(op):
x += 1
elif op_ymm(op):
y += 1
elif op_zmm(op):
z += 1
elif op_imm8(op):
i += 1
elif op_mem(op):
m += 1
elif op_gpr32(op) or op_vgpr32(op):
d += 1
elif op_gpr64(op) or op_vgpr64(op):
q += 1
else:
return False
simd = x+y+z
gpr = d+q
return m==1 and (gpr+simd)<3 and i<=1 and k <= 1
def create_evex_xyzmm_and_gpr(env,ii):
'''1,2,or3 xyzmm regs and 1 gpr32/64 and optional imm8 '''
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_reg2, var_reg2
global arg_kmask, var_kmask
global arg_zeroing, var_zeroing
global arg_rcsae, var_rcsae
global arg_imm8, var_imm8
global vl2names
sae,rounding,imm8,masking_allowed=False,False,False,False
if ii.sae_form:
sae = True
elif ii.rounding_form:
rounding = True
if ii.has_imm8:
imm8 = True
if ii.write_masking:
masking_allowed = True
vl = vl2names[ii.vl]
mask_variant_name = { False:'', True: '_msk' }
opnd_sig = make_opnd_signature(env,ii)
mask_versions = [False]
if masking_allowed:
mask_versions.append(True)
reg_type_names = []
for op in _gen_opnds(ii):
if op_xmm(op):
reg_type_names.append('xmm')
elif op_ymm(op):
reg_type_names.append('ymm')
elif op_zmm(op):
reg_type_names.append('zmm')
elif op_gpr32(op):
reg_type_names.append('gpr32')
elif op_gpr64(op):
reg_type_names.append('gpr64')
nregs = len(reg_type_names)
opnd_types_org = get_opnd_types(env,ii)
for masking in mask_versions:
fname = "{}_{}_{}{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig,
mask_variant_name[masking])
fo = make_function_object(env,ii,fname)
fo.add_comment("created by create_evex_xyzmm_and_gpr")
fo.add_arg(arg_request,'req')
opnd_types = copy.copy(opnd_types_org)
fo.add_arg(arg_reg0,opnd_types.pop(0))
if masking:
fo.add_arg(arg_kmask,'kreg')
if not ii.write_masking_merging_only:
fo.add_arg(arg_zeroing,'zeroing')
fo.add_arg(arg_reg1,opnd_types.pop(0))
if nregs == 3:
fo.add_arg(arg_reg2, opnd_types.pop(0))
if imm8:
fo.add_arg(arg_imm8,'int8')
if rounding:
fo.add_arg(arg_rcsae,'rcsae')
set_vex_pp(ii,fo)
fo.add_code_eol('set_mod(r,3)')
fo.add_code_eol('set_map(r,{})'.format(ii.map))
set_evexll_vl(ii,fo,vl)
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if rounding:
fo.add_code_eol('set_evexb(r,1)', 'set rc+sae')
fo.add_code_eol('set_evexll(r,{})'.format(var_rcsae))
elif sae:
fo.add_code_eol('set_evexb(r,1)', 'set sae')
# ZERO INIT OPTIMIZATION for EVEX.LL/RC = 0
if masking:
if not ii.write_masking_merging_only:
fo.add_code_eol('set_evexz(r,{})'.format(var_zeroing))
fo.add_code_eol('enc_evex_kmask(r,{})'.format(var_kmask))
# ENCODE REGISTERS
vars = [var_reg0, var_reg1, var_reg2]
var_r, var_b, var_n = None, None, None
for i,op in enumerate(_gen_opnds(ii)):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R3') or op.lookupfn_name.endswith('_R'):
var_r, ri = vars[i], i
elif op.lookupfn_name.endswith('_B3') or op.lookupfn_name.endswith('_B'):
var_b, bi = vars[i], i
elif op.lookupfn_name.endswith('_N3') or op.lookupfn_name.endswith('_N'):
var_n, ni = vars[i], i
else:
die("SHOULD NOT REACH HERE")
if var_n:
fo.add_code_eol('enc_evex_vvvv_reg_{}(r,{})'.format(reg_type_names[ni], var_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
fo.add_code_eol('set_evexvv(r,1)',"must be 1")
if var_r:
fo.add_code_eol('enc_evex_modrm_reg_{}(r,{})'.format(reg_type_names[ri], var_r))
elif ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_evex_modrm_rm_{}(r,{})'.format(reg_type_names[bi], var_b))
elif ii.rm_required != 'unspecified':
if ii.rm_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_rm(r,{})'.format(ii.rm_required))
fo.add_code_eol('emit_evex(r)')
emit_opcode(ii,fo)
emit_modrm(fo)
if imm8:
fo.add_code_eol('emit(r,{})'.format(var_imm8))
add_enc_func(ii,fo)
def create_evex_regs_mem(env, ii):
"""Handles 0,1,2 simd/gpr regs and one memop (including vsib) Allows imm8 also."""
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_kmask, var_kmask
global arg_zeroing, var_zeroing
global arg_imm8, var_imm8
var_regs = [var_reg0, var_reg1, var_reg2]
arg_regs = [ arg_reg0, arg_reg1, arg_reg2 ]
imm8=False
if ii.has_imm8:
imm8 = True
vl = vl2names[ii.vl]
mask_variant_name = { False:'', True: '_msk' }
mask_versions = [False]
if ii.write_masking_notk0:
mask_versions = [True]
elif ii.write_masking:
mask_versions = [False, True]
else:
mask_versions = [False]
dispsz_list = get_dispsz_list(env)
if ii.broadcast_allowed:
bcast_vals = ['nobroadcast','broadcast']
else:
bcast_vals = ['nobroadcast']
bcast_variant_name = {'nobroadcast':'', 'broadcast':'_bcast' }
opnd_types_org = get_opnd_types(env,ii)
# flatten a 4-deep nested loop using itertools.product()
ispace = itertools.product(bcast_vals, get_index_vals(ii), dispsz_list, mask_versions)
for broadcast, use_index, dispsz, masking in ispace:
broadcast_bool = True if broadcast == 'broadcast' else False
opnd_sig = make_opnd_signature(env,ii, broadcasting=broadcast_bool)
memaddrsig = get_memsig(env.asz, use_index, dispsz)
opnd_types = copy.copy(opnd_types_org)
fname = "{}_{}_{}{}_{}{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig,
mask_variant_name[masking],
memaddrsig,
bcast_variant_name[broadcast])
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_evex_regs_mem")
fo.add_arg(arg_request,'req')
# ==== ARGS =====
def _add_mask_arg(ii,fo):
global arg_kmask, arg_zeroing
if ii.write_masking_notk0:
kreg_comment = 'kreg!0'
else:
kreg_comment = 'kreg'
fo.add_arg(arg_kmask,kreg_comment)
if ii.write_masking_merging_only == False:
fo.add_arg(arg_zeroing,'zeroing')
gather_prefetch = is_gather_prefetch(ii)
regn = 0
for i,optype in enumerate(opnd_types_org):
if i == 0 and masking and gather_prefetch:
_add_mask_arg(ii,fo)
if optype in ['xmm','ymm','zmm','kreg','gpr32','gpr64']:
fo.add_arg(arg_regs[regn], opnd_types.pop(0))
regn += 1
elif optype in ['mem']:
add_memop_args(env, ii, fo, use_index, dispsz)
opnd_types.pop(0)
elif optype in 'int8':
fo.add_arg(arg_imm8,'int8')
else:
die("UNHANDLED ARG {} in {}".format(optype, ii.iclass))
# add masking after 0th argument except for gather prefetch
if i == 0 and masking and not gather_prefetch:
_add_mask_arg(ii,fo)
# ===== ENCODING ======
if dispsz in [16,32]: # the largest displacements 16 for 16b addressing, 32 for 32/64b addressing
add_evex_displacement_var(fo)
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
set_evexll_vl(ii,fo,vl)
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if masking:
if not ii.write_masking_merging_only:
fo.add_code_eol('set_evexz(r,{})'.format(var_zeroing))
fo.add_code_eol('enc_evex_kmask(r,{})'.format(var_kmask))
if broadcast == 'broadcast': # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_evexb(r,1)')
# ENCODE REGISTERS
var_r, var_b, var_n = None, None, None
sz_r, sz_b, sz_n = None, None, None
for i,op in enumerate(_gen_opnds_nomem(ii)):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R3') or op.lookupfn_name.endswith('_R'):
var_r,sz_r = var_regs[i], get_type_size(op)
elif op.lookupfn_name.endswith('_B3') or op.lookupfn_name.endswith('_B'):
var_b,sz_b = var_regs[i], get_type_size(op)
elif op.lookupfn_name.endswith('_N3') or op.lookupfn_name.endswith('_N'):
var_n,sz_n = var_regs[i], get_type_size(op)
else:
die("SHOULD NOT REACH HERE")
if var_n:
fo.add_code_eol('enc_evex_vvvv_reg_{}(r,{})'.format(sz_n, var_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
fo.add_code_eol('set_evexvv(r,1)',"must be 1")
if var_r:
fo.add_code_eol('enc_evex_modrm_reg_{}(r,{})'.format(sz_r, var_r))
else:
# some instructions use _N3 as dest (like rotates)
#fo.add_code_eol('set_rexr(r,1)')
#fo.add_code_eol('set_evexrr(r,1)')
if ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
die("SHOULD NOT REACH HERE")
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
if mod == 2:
broadcasting = True if broadcast == 'broadcast' else False
chose_evex_scaled_disp(fo, ii, dispsz, broadcasting)
else:
fo.add_code_eol('set_mod(r,{})'.format(mod))
encode_mem_operand(env, ii, fo, use_index, dispsz)
immw=8 if imm8 else 0
finish_memop(env, ii, fo, dispsz, immw, rexw_forced=False, space='evex')
add_enc_func(ii,fo)
def evex_mask_dest_reg_only(ii): # optional imm8
i,m,xyz=0,0,0
for op in _gen_opnds(ii):
if op_mask_reg(op):
m += 1
elif op_xmm(op) or op_ymm(op) or op_zmm(op):
xyz += 1
elif op_imm8(op):
i += 1
else:
return False
return m==1 and xyz > 0 and i <= 1
def evex_mask_dest_mem(ii): # optional imm8
i,msk,xyz,mem=0,0,0,0
for op in _gen_opnds(ii):
if op_mask_reg(op):
msk += 1
elif op_xmm(op) or op_ymm(op) or op_zmm(op):
xyz += 1
elif op_mem(op):
mem += 1
elif op_imm8(op):
i += 1
else:
return False
return msk==1 and xyz > 0 and i <= 1 and mem==1
def create_evex_evex_mask_dest_reg_only(env, ii): # allows optional imm8
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_kmask, var_kmask # write mask
global arg_kreg0, var_kreg0 # normal operand
global arg_zeroing, var_zeroing
global arg_imm8, var_imm8, arg_rcsae, var_rcsae
imm8 = True if ii.has_imm8 else False
vl = vl2names[ii.vl]
mask_variant_name = { False:'', True: '_msk' }
opnd_sig = make_opnd_signature(env,ii)
mask_versions = [False]
if ii.write_masking_notk0:
mask_versions = [True]
elif ii.write_masking:
mask_versions = [False, True]
else:
mask_versions = [False]
opnd_types_org = get_opnd_types(env,ii)
arg_regs = [ arg_reg0, arg_reg1 ]
for masking in mask_versions:
opnd_types = copy.copy(opnd_types_org)
fname = "{}_{}_{}{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig,
mask_variant_name[masking])
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_evex_evex_mask_dest_reg_only")
fo.add_arg(arg_request,'req')
# ==== ARGS =====
regn = 0
for i,optype in enumerate(opnd_types_org):
if optype in [ 'kreg', 'kreg!0' ]:
fo.add_arg(arg_kreg0, optype)
opnd_types.pop(0)
elif optype in ['xmm','ymm','zmm']:
fo.add_arg(arg_regs[regn], opnd_types.pop(0))
regn += 1
elif optype in ['mem']:
die("NOT REACHED")
elif optype in 'int8':
fo.add_arg(arg_imm8,'int8')
else:
die("UNHANDLED ARG {} in {}".format(optype, ii.iclass))
# add masking after 0th argument.
if i == 0 and masking:
if ii.write_masking_notk0:
kreg_comment = 'kreg!0'
else:
kreg_comment = 'kreg'
fo.add_arg(arg_kmask,kreg_comment)
if ii.write_masking_merging_only == False:
fo.add_arg(arg_zeroing,'zeroing')
if ii.rounding_form:
fo.add_arg(arg_rcsae,'rcsae')
# ===== ENCODING ======
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
set_evexll_vl(ii,fo,vl)
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if masking:
if not ii.write_masking_merging_only:
fo.add_code_eol('set_evexz(r,{})'.format(var_zeroing))
fo.add_code_eol('enc_evex_kmask(r,{})'.format(var_kmask))
if ii.rounding_form:
fo.add_code_eol('set_evexb(r,1)', 'set rc+sae')
fo.add_code_eol('set_evexll(r,{})'.format(var_rcsae))
elif ii.sae_form:
fo.add_code_eol('set_evexb(r,1)', 'set sae')
# ZERO INIT OPTIMIZATION for EVEX.LL/RC = 0
# ENCODE REGISTERS
vars = [var_reg0, var_reg1, var_reg2]
kvars = [var_kreg0, var_kreg1, var_kreg2]
i, var_r, var_b, var_n = 0, None, None, None
j, kvar_r, kvar_b, kvar_n = 0, None, None, None
for op in _gen_opnds_nomem(ii):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R3'):
var_r = vars[i]
i += 1
elif op.lookupfn_name.endswith('_B3'):
var_b = vars[i]
i += 1
elif op.lookupfn_name.endswith('_N3'):
var_n = vars[i]
i += 1
elif op_luf(op,'MASK_R'):
kvar_r = kvars[j]
j += 1
elif op_luf(op,'MASK_B'):
kvar_b = kvars[j]
j += 1
elif op_luf(op,'MASK_N'):
kvar_n = kvars[j]
j += 1
else:
die("SHOULD NOT REACH HERE")
if var_n:
fo.add_code_eol('enc_evex_vvvv_reg_{}(r,{})'.format(vl, var_n))
elif kvar_n:
fo.add_code_eol('enc_evex_vvvv_kreg(r,{})'.format(kvar_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
fo.add_code_eol('set_evexvv(r,1)',"must be 1")
if var_r:
fo.add_code_eol('enc_evex_modrm_reg_{}(r,{})'.format(vl, var_r))
elif kvar_r:
fo.add_code_eol('enc_evex_modrm_reg_kreg(r,{})'.format(kvar_r))
else:
# some instructions use _N3 as dest (like rotates)
#fo.add_code_eol('set_rexr(r,1)')
#fo.add_code_eol('set_evexrr(r,1)')
if ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b:
fo.add_code_eol('enc_evex_modrm_rm_{}(r,{})'.format(vl, var_b))
elif kvar_b:
fo.add_code_eol('enc_evex_modrm_rm_kreg(r,{})'.format(kvar_b))
fo.add_code_eol('set_mod(r,3)')
fo.add_code_eol('emit_evex(r)')
emit_opcode(ii,fo)
emit_modrm(fo)
cond_emit_imm8(ii,fo)
add_enc_func(ii,fo)
def create_evex_evex_mask_dest_mem(env, ii): # allows optional imm8
global enc_fn_prefix, arg_request
global arg_reg0, var_reg0
global arg_reg1, var_reg1
global arg_kmask, var_kmask # write mask
global arg_kreg0, var_kreg0 # normal operand
global arg_zeroing, var_zeroing
global arg_imm8, var_imm8, arg_rcsae, var_rcsae
imm8 = True if ii.has_imm8 else False
vl = vl2names[ii.vl]
mask_variant_name = { False:'', True: '_msk' }
mask_versions = [False]
if ii.write_masking_notk0:
mask_versions = [True]
elif ii.write_masking:
mask_versions = [False, True]
else:
mask_versions = [False]
dispsz_list = get_dispsz_list(env)
if ii.broadcast_allowed:
bcast_vals = ['nobroadcast','broadcast']
else:
bcast_vals = ['nobroadcast']
bcast_variant_name = {'nobroadcast':'', 'broadcast':'_bcast' }
opnd_types_org = get_opnd_types(env,ii)
arg_regs = [ arg_reg0, arg_reg1 ]
# flatten a 4-deep nested loop using itertools.product()
ispace = itertools.product(bcast_vals, get_index_vals(ii), dispsz_list, mask_versions)
for broadcast, use_index, dispsz, masking in ispace:
broadcast_bool = True if broadcast == 'broadcast' else False
opnd_sig = make_opnd_signature(env,ii,broadcasting=broadcast_bool)
memaddrsig = get_memsig(env.asz, use_index, dispsz)
opnd_types = copy.copy(opnd_types_org)
fname = "{}_{}_{}{}_{}{}".format(enc_fn_prefix,
ii.iclass.lower(),
opnd_sig,
mask_variant_name[masking],
memaddrsig,
bcast_variant_name[broadcast])
fo = make_function_object(env,ii,fname, asz=env.asz)
fo.add_comment("created by create_evex_evex_mask_dest_mem")
fo.add_arg(arg_request,'req')
# ==== ARGS =====
def _add_mask_arg(ii,fo):
global arg_kmask, arg_zeroing
if ii.write_masking_notk0:
kreg_comment = 'kreg!0'
else:
kreg_comment = 'kreg'
fo.add_arg(arg_kmask,kreg_comment)
if ii.write_masking_merging_only == False:
fo.add_arg(arg_zeroing,'zeroing')
regn = 0
for i,optype in enumerate(opnd_types_org):
if optype in [ 'kreg' ]:
fo.add_arg(arg_kreg0, optype)
opnd_types.pop(0)
elif optype in ['xmm','ymm','zmm']:
fo.add_arg(arg_regs[regn], opnd_types.pop(0))
regn += 1
elif optype in ['mem']:
add_memop_args(env, ii, fo, use_index, dispsz)
opnd_types.pop(0)
elif optype in 'int8':
fo.add_arg(arg_imm8,'int8')
else:
die("UNHANDLED ARG {} in {}".format(optype, ii.iclass))
# add masking after 0th argument
if i == 0 and masking:
_add_mask_arg(ii,fo)
if ii.rounding_form:
fo.add_arg(arg_rcsae,'rcsae')
# ===== ENCODING ======
if dispsz in [16,32]: # the largest displacements 16 for 16b addressing, 32 for 32/64b addressing
add_evex_displacement_var(fo)
set_vex_pp(ii,fo)
fo.add_code_eol('set_map(r,{})'.format(ii.map))
set_evexll_vl(ii,fo,vl)
if ii.rexw_prefix == '1':
fo.add_code_eol('set_rexw(r)')
if masking:
if not ii.write_masking_merging_only:
fo.add_code_eol('set_evexz(r,{})'.format(var_zeroing))
fo.add_code_eol('enc_evex_kmask(r,{})'.format(var_kmask))
if broadcast == 'broadcast': # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_evexb(r,1)')
if ii.rounding_form:
fo.add_code_eol('set_evexb(r,1)', 'set rc+sae')
fo.add_code_eol('set_evexll(r,{})'.format(var_rcsae))
elif ii.sae_form:
fo.add_code_eol('set_evexb(r,1)', 'set sae')
# ZERO INIT OPTIMIZATION for EVEX.LL/RC = 0
# ENCODE REGISTERS
vars = [var_reg0, var_reg1, var_reg2]
kvars = [var_kreg0, var_kreg1, var_kreg2]
i, var_r, var_b, var_n = 0, None, None, None
j, kvar_r, kvar_b, kvar_n = 0, None, None, None
for op in _gen_opnds_nomem(ii):
if op.lookupfn_name:
if op.lookupfn_name.endswith('_R3'):
var_r = vars[i]
i += 1
elif op.lookupfn_name.endswith('_B3'):
var_b = vars[i]
i += 1
elif op.lookupfn_name.endswith('_N3'):
var_n = vars[i]
i += 1
elif op_luf(op,'MASK_R'):
kvar_r = kvars[j]
j += 1
elif op_luf(op,'MASK_B'):
kvar_b = kvars[j]
j += 1
elif op_luf(op,'MASK_N'):
kvar_n = kvars[j]
j += 1
else:
die("SHOULD NOT REACH HERE")
if var_n:
fo.add_code_eol('enc_evex_vvvv_reg_{}(r,{})'.format(vl, var_n))
elif kvar_n:
fo.add_code_eol('enc_evex_vvvv_kreg(r,{})'.format(kvar_n))
else:
fo.add_code_eol('set_vvvv(r,0xF)',"must be 1111")
fo.add_code_eol('set_evexvv(r,1)',"must be 1")
if var_r:
fo.add_code_eol('enc_evex_modrm_reg_{}(r,{})'.format(vl, var_r))
elif kvar_r:
fo.add_code_eol('enc_evex_modrm_reg_kreg(r,{})'.format(kvar_r))
else:
# some instructions use _N3 as dest (like rotates)
#fo.add_code_eol('set_rexr(r,1)')
#fo.add_code_eol('set_evexrr(r,1)')
if ii.reg_required != 'unspecified':
if ii.reg_required: # ZERO INIT OPTIMIZATION
fo.add_code_eol('set_reg(r,{})'.format(ii.reg_required))
if var_b or kvar_b:
die("SHOULD NOT REACH HERE")
#if var_b:
# fo.add_code_eol('enc_evex_modrm_rm_{}(r,{})'.format(vl, var_b))
#elif kvar_b:
# fo.add_code_eol('enc_evex_modrm_rm_kreg(r,{})'.format(kvar_b))
mod = get_modval(dispsz)
if mod: # ZERO-INIT OPTIMIZATION
if mod == 2:
broadcasting = True if broadcast == 'broadcast' else False
chose_evex_scaled_disp(fo, ii, dispsz, broadcasting)
else:
fo.add_code_eol('set_mod(r,{})'.format(mod))
encode_mem_operand(env, ii, fo, use_index, dispsz)
immw=8 if imm8 else 0
finish_memop(env, ii, fo, dispsz, immw, rexw_forced=False, space='evex')
add_enc_func(ii,fo)
def _enc_evex(env,ii):
# handles rounding, norounding, imm8, no-imm8, masking/nomasking
if evex_2or3xyzmm(ii):
create_evex_xyzmm_and_gpr(env,ii)
elif evex_xyzmm_and_gpr(ii):
create_evex_xyzmm_and_gpr(env,ii)
elif evex_regs_mem(ii): # opt imm8, very broad coverage including kreg(dest) ops
create_evex_regs_mem(env, ii)
elif evex_mask_dest_reg_only(ii):
create_evex_evex_mask_dest_reg_only(env, ii)
elif evex_mask_dest_mem(ii):
create_evex_evex_mask_dest_mem(env, ii) # FIXME: no longer used
def _enc_xop(env,ii):
pass # FIXME: could support XOP instr -- not planned as AMD deprecating them.
def prep_instruction(ii):
setattr(ii,'encoder_functions',[])
setattr(ii,'encoder_skipped',False)
ii.write_masking = False
ii.write_masking_notk0 = False
ii.write_masking_merging_only = False # if true, no zeroing allowed
ii.rounding_form = False
ii.sae_form = False
if ii.space == 'evex':
for op in ii.parsed_operands:
if op.lookupfn_name == 'MASK1':
ii.write_masking = True
elif op.lookupfn_name == 'MASKNOT0':
ii.write_masking = True
ii.write_masking_notk0 = True
if ii.write_masking:
if 'ZEROING=0' in ii.pattern:
ii.write_masking_merging_only = True
if 'AVX512_ROUND()' in ii.pattern:
ii.rounding_form = True
if 'SAE()' in ii.pattern:
ii.sae_form = True
def xed_mode_removal(env,ii):
if 'CLDEMOTE=0' in ii.pattern:
return True
if 'LZCNT=0' in ii.pattern:
return True
if 'TZCNT=0' in ii.pattern:
return True
if 'WBNOINVD=0' in ii.pattern:
return True
if 'P4=0' in ii.pattern:
return True
if 'MODEP5=1' in ii.pattern:
return True
if 'CET=0' in ii.pattern:
return True
if env.short_ud0:
if 'MODE_SHORT_UD0=0' in ii.pattern: # long UD0
return True # skip
else: # long ud0
if 'MODE_SHORT_UD0=1' in ii.pattern: # short UD0
return True # skip
return False
def create_enc_fn(env, ii):
if env.asz == 16:
if special_index_cases(ii):
ii.encoder_skipped = True
return
if xed_mode_removal(env,ii):
ii.encoder_skipped = True
return
elif env.mode == 64:
if ii.mode_restriction == 'not64' or ii.mode_restriction in [0,1]:
# we don't need an encoder function for this form in 64b mode
ii.encoder_skipped = True
return
if ii.easz == 'a16':
# 16b addressing not accessible from 64b mode
ii.encoder_skipped = True
return
elif env.mode == 32:
if ii.mode_restriction in [0,2]:
# we don't need an encoder function for this form in 32b mode
ii.encoder_skipped = True
return
if ii.easz == 'a64':
# 64b addressing not accessible from 64b mode
ii.encoder_skipped = True
return
if ii.space == 'legacy' and (ii.eosz == 'o64' or ii.rexw_prefix == '1'):
# legacy ops with REX.W=1 or EOSZ=3 are 64b mode only
ii.encoder_skipped = True
return
elif env.mode == 16:
if ii.mode_restriction in [1,2]:
# we don't need an encoder function for this form in 16b mode
ii.encoder_skipped = True
return
if ii.easz == 'a64':
# 64b addressing not accessible from 16b mode
ii.encoder_skipped = True
return
if ii.space == 'legacy' and (ii.eosz == 'o64' or ii.rexw_prefix == '1'):
# legacy ops with REX.W=1 or EOSZ=3 are 64b mode only
ii.encoder_skipped = True
return
if ii.space == 'legacy':
_enc_legacy(env,ii)
elif ii.space == 'vex':
_enc_vex(env,ii)
elif ii.space == 'evex':
_enc_evex(env,ii)
elif ii.space == 'xop':
_enc_xop(env,ii)
else:
die("Unhandled encoding space: {}".format(ii.space))
def spew(ii):
"""Print information about the instruction. Purely decorative"""
s = [ii.iclass.lower()]
if ii.iform:
s.append(ii.iform)
else:
s.append("NOIFORM")
s.append(ii.space)
s.append(ii.isa_set)
s.append(hex(ii.opcode_base10))
s.append(str(ii.map))
#dbg('XA: {}'.format(" ".join(s)))
# dump_fields(ii)
modes = ['m16','m32','m64']
if ii.mode_restriction == 'unspecified':
mode = 'mall'
elif ii.mode_restriction == 'not64':
mode = 'mnot64'
else:
mode = modes[ii.mode_restriction]
s.append(mode)
s.append(ii.easz)
s.append(ii.eosz)
if ii.space == 'evex':
if ii.avx512_tuple:
mwc = ii.memop_width_code if hasattr(ii,'memop_width_code') else 'MWC???'
mw = ii.memop_width if hasattr(ii,'memop_width') else 'MW???'
s.append("TUP:{}-{}-{}-{}".format(ii.avx512_tuple,ii.element_size,mwc,mw))
else:
s.append("no-tuple")
if ii.write_masking:
s.append('masking')
if ii.write_masking_merging_only:
s.append('nz')
if ii.write_masking_notk0:
s.append('!k0')
else:
s.append('nomasking')
if ii.space == 'evex':
if ii.rounding_form:
s.append('rounding')
elif ii.sae_form:
s.append('sae')
else:
s.append('noround')
for op in _gen_opnds(ii):
s.append(op.name)
if op.oc2:
s[-1] = s[-1] + '-' + op.oc2
#if op.xtype:
# s[-1] = s[-1] + '-X:' + op.xtype
if op.lookupfn_name:
s.append('({})'.format(op.lookupfn_name))
elif op.bits and op.bits != '1':
s.append('[{}]'.format(op.bits))
if op.name == 'MEM0':
if ii.avx512_vsib:
s[-1] = s[-1] + '-uvsib-{}'.format(ii.avx512_vsib)
elif ii.avx_vsib:
s[-1] = s[-1] + '-vsib-{}'.format(ii.avx_vsib)
if ii.encoder_functions:
dbg("//DONE {}".format(" ".join(s)))
elif ii.encoder_skipped:
dbg("//SKIP {}".format(" ".join(s)))
else:
dbg("//TODO {}".format(" ".join(s)))
def gather_stats(db):
global numbered_functions
unhandled = 0
forms = len(db)
generated_fns = 0
skipped_fns = 0
skipped_mpx = 0
handled = 0
not_done = { 'evex':0, 'vex':0, 'legacy':0, 'xop':0 }
for ii in db:
if ii.encoder_skipped:
skipped_fns += 1
elif ii.isa_set in ['MPX']:
skipped_mpx += 1
else:
gen_fn = len(ii.encoder_functions)
if gen_fn == 0:
unhandled = unhandled + 1
not_done[ii.space] += 1
else:
handled += 1
generated_fns += gen_fn
skipped = skipped_mpx + skipped_fns
tot_focus = handled + unhandled + skipped # not counting various skipped
dbg("// Forms: {:4d}".format(forms))
dbg("// Handled: {:4d} ({:6.2f}%)".format(handled, 100.0*handled/tot_focus ))
dbg("// Irrelevant: {:4d} ({:6.2f}%)".format(skipped, 100.0*skipped/tot_focus ))
dbg("// Not handled: {:4d} ({:6.2f}%)".format(unhandled, 100.0*unhandled/tot_focus))
dbg("// Numbered functions: {:5d}".format(numbered_functions))
dbg("// Generated Encoding functions: {:5d}".format(generated_fns))
dbg("// Skipped Encoding functions: {:5d}".format(skipped_fns))
dbg("// Skipped MPX instr: {:5d}".format(skipped_mpx))
for space in not_done.keys():
dbg("// not-done {:8s}: {:5d}".format(space, not_done[space]))
# object used for the env we pass to the generator
class enc_env_t(object):
def __init__(self, mode, asz, width_info_dict, test_checked_interface=False, short_ud0=False):
self.mode = mode
self.asz = asz
self.function_names = {}
self.test_checked_interface = test_checked_interface
self.tests_per_form = 1
self.short_ud0 = short_ud0
# dictionary by oc2 of the various memop bit widths.
self.width_info_dict = width_info_dict
def __str__(self):
s = []
s.append("mode {}".format(self.mode))
s.append("asz {}".format(self.asz))
return ", ".join(s)
def mem_bits(self, width_name, osz=0):
wi = self.width_info_dict[width_name]
indx = osz if osz else 32
return wi.widths[indx]
def dump_output_file_names(fn, fe_list):
ofn = os.path.join(fn)
o = open(ofn,"w")
for fe in fe_list:
o.write(fe.full_file_name + "\n")
o.close()
def emit_encode_functions(args,
env,
xeddb,
function_type_name='encode',
fn_list_attr='encoder_functions',
config_prefix='',
srcdir='src',
extra_headers=None):
msge("Writing encoder '{}' functions to .c and .h files".format(function_type_name))
# group the instructions by encoding space to allow for
# better link-time garbage collection.
func_lists = collections.defaultdict(list)
for ii in xeddb.recs:
func_lists[ii.space].extend( getattr(ii, fn_list_attr) )
func_list = []
for space in func_lists.keys():
func_list.extend(func_lists[space])
config_descriptor = 'enc2-m{}-a{}'.format(env.mode, env.asz)
fn_prefix = 'xed-{}{}'.format(config_prefix,config_descriptor)
gen_src_dir = os.path.join(args.gendir, config_descriptor, srcdir)
gen_hdr_dir = os.path.join(args.gendir, config_descriptor, 'hdr', 'xed')
mbuild.cmkdir(gen_src_dir)
mbuild.cmkdir(gen_hdr_dir)
file_emitters = codegen.emit_function_list(func_list,
fn_prefix,
args.xeddir,
gen_src_dir,
gen_hdr_dir,
other_headers = extra_headers,
max_lines_per_file=15000,
is_private_header=False,
extra_public_headers=['xed/xed-interface.h'])
return file_emitters
def work():
arg_parser = argparse.ArgumentParser(description="Create XED encoder2")
arg_parser.add_argument('-short-ud0',
help='Encode 2-byte UD0 (default is long UD0 as implemented on modern Intel Core processors. Intel Atom processors implement short 2-byte UD0)',
dest='short_ud0',
action='store_true',
default=False)
arg_parser.add_argument('-m64',
help='64b mode (default)',
dest='modes', action='append_const', const=64)
arg_parser.add_argument('-m32',
help='32b mode',
dest='modes', action='append_const', const=32)
arg_parser.add_argument('-m16' ,
help='16b mode',
dest='modes', action='append_const', const=16)
arg_parser.add_argument('-a64',
help='64b addressing (default)',
dest='asz_list', action='append_const', const=64)
arg_parser.add_argument('-a32',
help='32b addressing',
dest='asz_list', action='append_const', const=32)
arg_parser.add_argument('-a16' ,
help='16b addressing',
dest='asz_list', action='append_const', const=16)
arg_parser.add_argument('-all',
action="store_true",
default=False,
help='all modes and addressing')
arg_parser.add_argument('-chk',
action="store_true",
default=False,
help='Test checked interface')
arg_parser.add_argument('--gendir',
help='output directory, default: "obj"',
default='obj')
arg_parser.add_argument('--xeddir',
help='XED source directory, default: "."',
default='.')
arg_parser.add_argument('--output-file-list',
dest='output_file_list',
help='Name of output file containing list of output files created. ' +
'Default: GENDIR/enc2-list-of-files.txt')
args = arg_parser.parse_args()
args.prefix = os.path.join(args.gendir,'dgen')
if args.output_file_list == None:
args.output_file_list = os.path.join(args.gendir, 'enc2-list-of-files.txt')
def _mkstr(lst):
s = [str(x) for x in lst]
return ":".join(s)
dbg_fn = os.path.join(args.gendir,'enc2out-m{}-a{}.txt'.format(_mkstr(args.modes),
_mkstr(args.asz_list)))
msge("Writing {}".format(dbg_fn))
set_dbg_output(open(dbg_fn,"w"))
gen_setup.make_paths(args)
msge('Reading XED db...')
xeddb = read_xed_db.xed_reader_t(args.state_bits_filename,
args.instructions_filename,
args.widths_filename,
args.element_types_filename,
args.cpuid_filename,
args.map_descriptions)
width_info_dict = xeddb.get_width_info_dict()
for k in width_info_dict.keys():
print("{} -> {}".format(k,width_info_dict[k]))
# all modes and address sizes, filtered appropriately later
if args.all:
args.modes = [16,32,64]
args.asz_list = [16,32,64]
# if you just specify a mode, we supply the full set of address sizes
if args.modes == [64]:
if not args.asz_list:
args.asz_list = [32,64]
elif args.modes == [32]:
if not args.asz_list:
args.asz_list = [16,32]
elif args.modes == [16]:
if not args.asz_list:
args.asz_list = [16,32]
# default 64b mode, 64b address size
if not args.modes:
args.modes = [ 64 ]
if not args.asz_list:
args.asz_list = [ 64 ]
for ii in xeddb.recs:
prep_instruction(ii)
def prune_asz_list_for_mode(mode,alist):
'''make sure we only use addressing modes appropriate for our mode'''
for asz in alist:
if mode == 64:
if asz in [32,64]:
yield asz
elif asz != 64:
yield asz
output_file_emitters = []
#extra_headers = ['xed/xed-encode-direct.h']
for mode in args.modes:
for asz in prune_asz_list_for_mode(mode,args.asz_list):
env = enc_env_t(mode, asz, width_info_dict,
short_ud0=args.short_ud0)
enc2test.set_test_gen_counters(env)
env.tests_per_form = 1
env.test_checked_interface = args.chk
msge("Generating encoder functions for {}".format(env))
for ii in xeddb.recs:
# create encoder function. sets ii.encoder_functions
create_enc_fn(env, ii)
spew(ii)
# create test(s) sets ii.enc_test_functions
enc2test.create_test_fn_main(env, ii)
# create arg checkers. sets ii.enc_arg_check_functions
enc2argcheck.create_arg_check_fn_main(env, ii)
fel = emit_encode_functions(args,
env,
xeddb,
function_type_name='encode',
fn_list_attr='encoder_functions',
config_prefix='',
srcdir='src')
output_file_emitters.extend(fel)
fel = emit_encode_functions(args,
env,
xeddb,
function_type_name='encoder-check',
fn_list_attr='enc_arg_check_functions',
config_prefix='chk-',
srcdir='src-chk',
extra_headers = [ 'xed/xed-enc2-m{}-a{}.h'.format(env.mode, env.asz) ])
output_file_emitters.extend(fel)
msge("Writing encoder 'test' functions to .c and .h files")
func_list = []
iclasses = []
for ii in xeddb.recs:
func_list.extend(ii.enc_test_functions)
# this is for the validation test to check the iclass after decode
n = len(ii.enc_test_functions)
if n:
iclasses.extend(n*[ii.iclass])
config_descriptor = 'enc2-m{}-a{}'.format(mode,asz)
fn_prefix = 'xed-test-{}'.format(config_descriptor)
test_fn_hdr='{}.h'.format(fn_prefix)
enc2_fn_hdr='xed/xed-{}.h'.format(config_descriptor)
enc2_chk_fn_hdr='xed/xed-chk-{}.h'.format(config_descriptor)
gen_src_dir = os.path.join(args.gendir, config_descriptor, 'test', 'src')
gen_hdr_dir = os.path.join(args.gendir, config_descriptor, 'test', 'hdr')
mbuild.cmkdir(gen_src_dir)
mbuild.cmkdir(gen_hdr_dir)
file_emitters = codegen.emit_function_list(func_list,
fn_prefix,
args.xeddir,
gen_src_dir,
gen_hdr_dir,
other_headers = [enc2_fn_hdr, enc2_chk_fn_hdr],
max_lines_per_file=15000)
output_file_emitters.extend(file_emitters)
# emit a C file initializing two arrays: one array with
# test function names, and another of the functdion names
# as strings so I can find them when I need to debug them.
fe = codegen.xed_file_emitter_t(args.xeddir,
gen_src_dir,
'testtable-m{}-a{}.c'.format(mode,asz))
fe.add_header(test_fn_hdr)
fe.start()
array_name = 'test_functions_m{}_a{}'.format(mode,asz)
fe.add_code_eol('typedef xed_uint32_t (*test_func_t)(xed_uint8_t* output_buffer)')
fe.add_code('test_func_t {}[] = {{'.format(array_name))
for fn in func_list:
fe.add_code('{},'.format(fn.get_function_name()))
fe.add_code('0')
fe.add_code('};')
fe.add_code('char const* {}_str[] = {{'.format(array_name))
for fn in func_list:
fe.add_code('"{}",'.format(fn.get_function_name()))
fe.add_code('0')
fe.add_code('};')
fe.add_code('const xed_iclass_enum_t {}_iclass[] = {{'.format(array_name))
for iclass in iclasses:
fe.add_code('XED_ICLASS_{},'.format(iclass))
fe.add_code('XED_ICLASS_INVALID')
fe.add_code('};')
fe.close()
output_file_emitters.append(fe)
gather_stats(xeddb.recs)
dump_numbered_function_creators()
dump_output_file_names( args.output_file_list,
output_file_emitters )
return 0
if __name__ == "__main__":
r = work()
sys.exit(r)
| apache-2.0 | 2,318,196,300,557,789,000 | 33.572113 | 173 | 0.524746 | false |
errordeveloper/fe-devel | Native/Clients/Python/__init__.py | 1 | 63982 | #
# Copyright 2010-2012 Fabric Engine Inc. All rights reserved.
#
import os
import sys
import json
import ctypes
import collections
import atexit
import Queue
import signal
# FIXME Windows
if os.name == 'posix':
_fabric = ctypes.CDLL( os.path.dirname( __file__ ) + '/libFabricPython.so' )
else:
_fabric = ctypes.CDLL( os.path.dirname( __file__ ) + '/FabricPython.dll' )
# FIXME Windows
_caughtSIGINT = False
def _handleSIGINT( signum, frame ):
global _caughtSIGINT
_caughtSIGINT = True
signal.signal( signal.SIGINT, _handleSIGINT )
# catch uncaught exceptions so that we don't wait on threads
_uncaughtException = False
_oldExceptHook = sys.excepthook
def _excepthook( type, value, traceback):
global _uncaughtException
_uncaughtException = True
_oldExceptHook( type, value, traceback )
sys.excepthook = _excepthook
# prevent exit until all our threads complete
_clients = []
def _waitForClose():
# FIXME this will run in a tight loop while waiting
while not _uncaughtException and not _caughtSIGINT and len( _clients ) > 0:
for c in _clients:
c.running()
atexit.register( _waitForClose )
# declare explicit prototypes for all the external library calls
_fabric.identify.argtypes = []
_fabric.createClient.argtypes = [
ctypes.c_void_p
]
_fabric.jsonExec.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_void_p
]
_fabric.runScheduledCallbacks.argtypes = [
ctypes.c_void_p
]
_fabric.freeClient.argtypes = [
ctypes.c_void_p
]
_fabric.freeString.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p
]
_NOTIFYCALLBACK = ctypes.CFUNCTYPE( None, ctypes.c_char_p )
_fabric.setJSONNotifyCallback.argtypes = [
ctypes.c_void_p,
_NOTIFYCALLBACK
]
# print app and version information
_fabric.identify()
def createClient():
return _INTERFACE( _fabric )
# used in unit tests
def stringify( obj ):
return json.dumps( _normalizeForUnitTests( _typeToDict( obj ) ) )
# global for tracking GC ids for core objects
_gcId = 0
def _getNextGCId():
global _gcId
_gcId = _gcId + 1
return _gcId
# for unit tests only, make floats use same precision across different
# versions of python which have different repr() implementations and
# change dicts to sorted lists so ordering doesn't change
def _normalizeForUnitTests( obj ):
if type( obj ) is list:
objlist = []
for elem in obj:
objlist.append( _normalizeForUnitTests( elem ) )
return objlist
elif type( obj ) is dict:
objdictlist = []
for member in obj:
elemobj = {}
elemobj[ member ] = _normalizeForUnitTests( obj[ member ] )
objdictlist.append( elemobj )
objdictlist.sort()
return objdictlist
elif type( obj ) is float:
return format( obj, '.3f' )
else:
return obj
# take a python class and convert its members down to a hierarchy of
# dictionaries, ignoring methods
def _typeToDict( obj ):
if type( obj ) is list:
objlist = []
for elem in obj:
objlist.append( _typeToDict( elem ) )
return objlist
elif type( obj ) is dict:
objdict = {}
for member in obj:
objdict[ member ] = _typeToDict( obj[ member ] )
return objdict
elif not hasattr( obj, '__dict__' ):
return obj
else:
objdict = {}
for member in vars( obj ):
attr = getattr( obj, member )
objdict[ member ] = _typeToDict( attr )
return objdict
# this is the interface object that gets returned to the user
class _INTERFACE( object ):
def __init__( self, fabric ):
self.__client = _CLIENT( fabric )
self.KLC = self.__client.klc
self.MR = self.__client.mr
self.RT = self.__client.rt
self.RegisteredTypesManager = self.RT
self.DG = self.__client.dg
self.DependencyGraph = self.DG
self.EX = self.__client.ex
self.IO = self.__client.io
self.build = self.__client.build
def flush( self ):
self.__client.executeQueuedCommands()
def close( self ):
self.__client.close()
def running( self ):
return self.__client.running()
def waitForClose( self ):
return self.__client.waitForClose()
def getMemoryUsage( self ):
# dictionary hack to simulate Python 3.x nonlocal
memoryUsage = { '_': None }
def __getMemoryUsage( result ):
memoryUsage[ '_' ] = result
self.__client.queueCommand( [], 'getMemoryUsage', None, None, __getMemoryUsage )
self.flush()
return memoryUsage[ '_' ]
class _CLIENT( object ):
def __init__( self, fabric ):
self.__fabric = fabric
self.__fabricClient = self.__createClient()
self.__queuedCommands = []
self.__queuedUnwinds = []
self.__queuedCallbacks = []
self.gc = _GC( self )
self.klc = _KLC( self )
self.mr = _MR( self )
self.rt = _RT( self )
self.dg = _DG( self )
self.ex = _EX( self )
self.io = _IO( self )
self.build = _BUILD( self )
self.__closed = False
self.__state = {}
self.__notifications = Queue.Queue()
# declare all class variables needed in the notifyCallback above
# here as the closure remembers the current class members immediately
self.__registerNotifyCallback()
self.__processAllNotifications()
_clients.append( self )
def waitForClose( self ):
while not _uncaughtException and not _caughtSIGINT and not self.__closed:
self.__processOneNotification()
def running( self ):
self.__processAllNotifications()
return not self.__closed
def __processAllNotifications( self ):
while not self.__notifications.empty():
self.__processOneNotification()
def __processOneNotification( self, timeout = None ):
n = None
try:
n = self.__notifications.get( True, timeout )
except Queue.Empty:
return
arg = None
if 'arg' in n:
arg = n[ 'arg' ]
self._route( n[ 'src' ], n[ 'cmd' ], arg )
n = self.__notifications.task_done()
def __runScheduledCallbacks( self ):
self.__fabric.runScheduledCallbacks( self.__fabricClient )
def __createClient( self ):
result = ctypes.c_void_p()
self.__fabric.createClient( ctypes.pointer( result ) )
return result
def __jsonExec( self, data, length ):
result = ctypes.c_char_p()
if self.__closed:
raise Exception( 'Fabric client has already been closed' )
self.__fabric.jsonExec(
self.__fabricClient,
data,
length,
ctypes.pointer( result )
)
return result
def close( self ):
_clients.remove( self )
self.__closed = True
self.__fabric.freeClient( self.__fabricClient )
# these must be explicitly set to None due to circular referencing
# preventing garbage collection if not
self.gc = None
self.klc = None
self.mr = None
self.rt = None
self.dg = None
self.ex = None
self.io = None
self.build = None
self.__CFUNCTYPE_notifyCallback = None
def getLicenses( self ):
return self.__state.licenses;
def getContextID( self ):
return self.__state.contextID;
def queueCommand( self, dst, cmd, arg = None, unwind = None, callback = None ):
command = { 'dst': dst, 'cmd': cmd }
if ( arg is not None ):
command[ 'arg' ] = arg
self.__queuedCommands.append( command )
self.__queuedUnwinds.append( unwind )
self.__queuedCallbacks.append( callback )
def executeQueuedCommands( self ):
commands = self.__queuedCommands
self.__queuedCommands = []
unwinds = self.__queuedUnwinds
self.__queuedUnwinds = []
callbacks = self.__queuedCallbacks
self.__queuedCallbacks = []
if len( commands ) < 1:
return
jsonEncodedCommands = json.dumps( commands )
jsonEncodedResults = self.__jsonExec( jsonEncodedCommands, len( jsonEncodedCommands ) )
try:
results = json.loads( jsonEncodedResults.value )
except Exception:
raise Exception( 'unable to parse JSON results: ' + jsonEncodedResults )
self.__fabric.freeString( self.__fabricClient, jsonEncodedResults )
self.__processAllNotifications()
for i in range(len(results)):
result = results[i]
callback = callbacks[i]
if ( 'exception' in result ):
for j in range( len( unwinds ) - 1, i, -1 ):
unwind = unwinds[ j ]
if ( unwind is not None ):
unwind()
self.__processAllNotifications()
raise Exception( 'Fabric core exception: ' + result[ 'exception' ] )
elif ( callback is not None ):
callback( result[ 'result' ] )
def _handleStateNotification( self, newState ):
self.__state = {}
self._patch( newState )
if 'build' in newState:
self.build._handleStateNotification( newState[ 'build' ] )
self.dg._handleStateNotification( newState[ 'DG' ] )
self.rt._handleStateNotification( newState[ 'RT' ] )
self.ex._handleStateNotification( newState[ 'EX' ] )
def _patch( self, diff ):
if 'licenses' in diff:
self.__state[ 'licenses' ] = diff[ 'licenses' ]
if 'contextID' in diff:
self.__state[ 'contextID' ] = diff[ 'contextID' ]
def _handle( self, cmd, arg ):
try:
if cmd == 'state':
self._handleStateNotification( arg )
else:
raise Exception( 'unknown command' )
except Exception as e:
raise Exception( 'command "' + cmd + '": ' + str( e ) )
def _route( self, src, cmd, arg ):
if len(src) == 0:
self._handle( cmd, arg )
else:
src = collections.deque( src )
firstSrc = src.popleft()
if firstSrc == 'RT':
self.rt._route( src, cmd, arg )
elif firstSrc == 'DG':
self.dg._route( src, cmd, arg )
elif firstSrc == 'EX':
self.ex._route( src, cmd, arg )
elif firstSrc == 'GC':
self.gc._route( src, cmd, arg )
elif firstSrc == 'ClientWrap':
if cmd == 'runScheduledCallbacks':
self.__runScheduledCallbacks()
else:
raise Exception( 'bad ClientWrap cmd: "' + cmd + '"' )
else:
raise Exception( 'unroutable src: ' + firstSrc )
def __notifyCallback( self, jsonEncodedNotifications ):
try:
notifications = json.loads( jsonEncodedNotifications )
except Exception:
raise Exception( 'unable to parse JSON notifications' )
for i in range( 0, len( notifications ) ):
self.__notifications.put( notifications[i] )
def __getNotifyCallback( self ):
# use a closure here so that 'self' is maintained without us
# explicitly passing it
def notifyCallback( jsonEncodedNotifications ):
self.__notifyCallback( jsonEncodedNotifications )
# this is important, we have to maintain a reference to the CFUNCTYPE
# ptr and not just return it, otherwise it will be garbage collected
# and callbacks will fail
self.__CFUNCTYPE_notifyCallback = _NOTIFYCALLBACK ( notifyCallback )
return self.__CFUNCTYPE_notifyCallback
def __registerNotifyCallback( self ):
self.__fabric.setJSONNotifyCallback( self.__fabricClient, self.__getNotifyCallback() )
class _GCOBJECT( object ):
def __init__( self, nsobj ):
self.__id = "GC_" + str( _getNextGCId() )
self.__nextCallbackID = 0
self.__callbacks = {}
self._nsobj = nsobj
nsobj._getClient().gc.addObject( self )
def dispose( self ):
self._gcObjQueueCommand( 'dispose' )
self.__nsobj._getClient().gc.disposeObject( self )
self.__id = None
def _gcObjQueueCommand( self, cmd, arg = None, unwind = None, callback = None ):
if self.__id is None:
raise Exception( "GC object has already been disposed" )
self._nsobj._objQueueCommand( [ self.__id ], cmd, arg, unwind, callback )
def _synchronousGetOnly( self, cmd ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._gcObjQueueCommand( cmd, None, None, __callback )
self._nsobj._executeQueuedCommands()
return data[ '_' ]
def _registerCallback( self, callback ):
self.__nextCallbackID = self.__nextCallbackID + 1
callbackID = self.__nextCallbackID
self.__callbacks[ callbackID ] = callback
return callbackID
def _route( self, src, cmd, arg ):
callback = self.__callbacks[ arg[ 'serial' ] ]
del self.__callbacks[ arg[ 'serial' ] ]
callback( arg[ 'result' ] )
def getID( self ):
return self.__id
def setID( self, id ):
self.__id = id
def unwind( self ):
self.setID( None )
class _NAMESPACE( object ):
def __init__( self, client, name ):
self.__client = client
self.__name = name
def _getClient( self ):
return self.__client
def _getName( self ):
return self.__namespace
def _objQueueCommand( self, dst, cmd, arg = None, unwind = None, callback = None ):
if dst is not None:
dst = [ self.__name ] + dst
else:
dst = [ self.__name ]
self.__client.queueCommand( dst, cmd, arg, unwind, callback )
def _queueCommand( self, cmd, arg = None, unwind = None, callback = None ):
self._objQueueCommand( None, cmd, arg, unwind, callback )
def _executeQueuedCommands( self ):
self.__client.executeQueuedCommands()
class _DG( _NAMESPACE ):
def __init__( self, client ):
super( _DG, self ).__init__( client, 'DG' )
self._namedObjects = {}
def createBinding( self ):
return self._BINDING()
def _createBindingList( self, dst ):
return self._BINDINGLIST( self, dst )
def __createNamedObject( self, name, cmd, objType ):
if name in self._namedObjects:
raise Exception( 'a NamedObject named "' + name + '" already exists' )
obj = objType( self, name )
self._namedObjects[ name ] = obj
def __unwind():
obj._confirmDestroy()
self._queueCommand( cmd, name, __unwind )
return obj
def createOperator( self, name ):
return self.__createNamedObject( name, 'createOperator', self._OPERATOR )
def createNode( self, name ):
return self.__createNamedObject( name, 'createNode', self._NODE )
def createResourceLoadNode( self, name ):
return self.__createNamedObject( name, 'createResourceLoadNode', self._RESOURCELOADNODE )
def createEvent( self, name ):
return self.__createNamedObject( name, 'createEvent', self._EVENT )
def createEventHandler( self, name ):
return self.__createNamedObject( name, 'createEventHandler', self._EVENTHANDLER )
def getAllNamedObjects( self ):
result ={}
for namedObjectName in self._namedObjects:
result[ namedObjectName ] = self._namedObjects[ namedObjectName ]
return result
def __getOrCreateNamedObject( self, name, type ):
if name not in self._namedObjects:
if type == 'Operator':
self.createOperator( name )
elif type == 'Node':
self.createNode( name )
elif type == 'Event':
self.createEvent( name )
elif type == 'EventHandler':
self.createEventHandler( name )
else:
raise Exception( 'unhandled type "' + type + '"' )
return self._namedObjects[ name ]
def _handleStateNotification( self, state ):
self._namedObjects = {}
for namedObjectName in state:
namedObjectState = state[ namedObjectName ]
self.__getOrCreateNamedObject( namedObjectName, namedObjectState[ 'type' ] )
for namedObjectName in state:
self._namedObjects[ namedObjectName ]._patch( state[ namedObjectName ] )
def _handle( self, cmd, arg ):
# FIXME no logging callback implemented yet
if cmd == 'log':
pass
#if ( self.__logCallback ):
# self.__logCallback( arg )
else:
raise Exception( 'command "' + cmd + '": unrecognized' )
def _route( self, src, cmd, arg ):
if len( src ) == 0:
self._handle( cmd, arg )
else:
src = collections.deque( src )
namedObjectName = src.popleft()
namedObjectType = None
if type( arg ) is dict and 'type' in arg:
namedObjectType = arg[ 'type' ]
self.__getOrCreateNamedObject( namedObjectName, namedObjectType )._route( src, cmd, arg )
class _NAMEDOBJECT( object ):
def __init__( self, dg, name ):
self.__name = name
self.__errors = []
self.__destroyed = None
self._dg = dg
def _nObjQueueCommand( self, cmd, arg = None, unwind = None, callback = None ):
if not self.isValid():
raise Exception( 'NamedObject "' + self.__name + '" has been destroyed' )
self._dg._objQueueCommand( [ self.__name ], cmd, arg, unwind, callback )
def _patch( self, diff ):
if 'errors' in diff:
self.__errors = diff[ 'errors' ]
def _confirmDestroy( self ):
del self._dg._namedObjects[ self.__name ]
self.__destroyed = True
def _setAsDestroyed( self ):
self.__destroyed = True
def _unsetDestroyed( self ):
self._dg._namedObjects[ self.__name ] = self;
self.__destroyed = None
def _handle( self, cmd, arg ):
if cmd == 'delta':
self._patch( arg )
elif cmd == 'destroy':
self._confirmDestroy()
else:
raise Exception( 'command "' + cmd + '" not recognized' )
def _route( self, src, cmd, arg ):
if len( src ) == 0:
self._handle( cmd, arg )
else:
raise Exception( 'unroutable' )
def getName( self ):
return self.__name
def getErrors( self ):
self._dg._executeQueuedCommands()
return self.__errors
def isValid( self ):
return self.__destroyed is None
class _BINDINGLIST( object ):
def __init__( self, dg, dst ):
self.__bindings = []
self._dg = dg
self.__dst = dst
def _patch( self, state ):
self.__bindings = []
for i in range( 0, len( state ) ):
binding = {
'operator': self._dg._namedObjects[ state[ i ][ 'operator' ] ],
'parameterLayout': state[ i ][ 'parameterLayout' ]
}
self.__bindings.append( binding )
def _handle( self, cmd, arg ):
if cmd == 'delta':
self._patch( arg )
else:
raise Exception( 'command "' + cmd + '": unrecognized' )
def _route( self, src, cmd, arg ):
if len( src ) == 0:
self._handle( cmd, arg )
else:
raise Exception( 'unroutable' )
def _handleStateNotification( self, state ):
self._patch( state )
def empty( self ):
if self.__bindings is None:
self._dg._executeQueuedCommands()
return len( self.__bindings ) == 0
def getLength( self ):
if self.__bindings is None:
self._dg._executeQueuedCommands()
return len( self.__bindings )
def getOperator( self, index ):
if self.__bindings is None:
self._dg._executeQueuedCommands()
return self.__bindings[ index ]['operator']
def append( self, binding ):
operatorName = None
try:
operatorName = binding.getOperator().getName()
except Exception:
raise Exception('operator: not an operator')
oldBindings = self.__bindings
self.__bindings = None
def __unwind():
self.__bindings = oldBindings
args = {
'operatorName': operatorName,
'parameterLayout': binding.getParameterLayout()
}
self._dg._objQueueCommand( self.__dst, 'append', args, __unwind )
def insert( self, binding, beforeIndex ):
operatorName = None
try:
operatorName = binding.getOperator().getName()
except Exception:
raise Exception('operator: not an operator')
if type( beforeIndex ) is not int:
raise Exception( 'beforeIndex: must be an integer' )
oldBindings = self.__bindings
self.__bindings = None
def __unwind():
self.__bindings = oldBindings
args = {
'beforeIndex': beforeIndex,
'operatorName': operatorName,
'parameterLayout': binding.getParameterLayout()
}
self._dg._objQueueCommand( self.__dst, 'insert', args, __unwind )
def remove( self, index ):
oldBindings = self.__bindings
self.__bindings = None
def __unwind():
self.__bindings = oldBindings
args = {
'index': index,
}
self._dg._objQueueCommand( self.__dst, 'remove', args, __unwind )
class _BINDING( object ):
def __init__( self ):
self.__operator = None
self.__parameterLayout = None
def getOperator( self ):
return self.__operator
def setOperator( self, operator ):
self.__operator = operator
def getParameterLayout( self ):
return self.__parameterLayout
def setParameterLayout( self, parameterLayout ):
self.__parameterLayout = parameterLayout
class _OPERATOR( _NAMEDOBJECT ):
def __init__( self, dg, name ):
super( _DG._OPERATOR, self ).__init__( dg, name )
self.__diagnostics = []
self.__filename = None
self.__sourceCode = None
self.__entryFunctionName = None
self.__mainThreadOnly = None
def _patch( self, diff ):
super( _DG._OPERATOR, self )._patch( diff )
if 'filename' in diff:
self.__filename = diff[ 'filename' ]
if 'sourceCode' in diff:
self.__sourceCode = diff[ 'sourceCode' ]
if 'entryPoint' in diff:
self.__entryFunctionName = diff[ 'entryPoint' ]
if 'diagnostics' in diff:
self.__diagnostics = diff[ 'diagnostics' ]
if 'mainThreadOnly' in diff:
self.__mainThreadOnly = diff[ 'mainThreadOnly' ]
def getMainThreadOnly( self ):
if self.__mainThreadOnly is None:
self._dg._executeQueuedCommands()
return self.__mainThreadOnly
def setMainThreadOnly( self, mainThreadOnly ):
oldMainThreadOnly = self.__mainThreadOnly
self.__mainThreadOnly = mainThreadOnly
def __unwind():
self.__mainThreadOnly = oldMainThreadOnly
self._nObjQueueCommand( 'setMainThreadOnly', mainThreadOnly, __unwind )
def getFilename( self ):
if self.__filename is None:
self._dg._executeQueuedCommands()
return self.__filename
def getSourceCode( self ):
if self.__sourceCode is None:
self._dg._executeQueuedCommands()
return self.__sourceCode
def setSourceCode( self, filename, sourceCode = None ):
# this is legacy usage, sourceCode only
if sourceCode is None:
sourceCode = filename
filename = "(unknown)"
oldFilename = self.__filename
self.__filename = filename
oldSourceCode = self.__sourceCode
self.__sourceCode = sourceCode
oldDiagnostics = self.__diagnostics
self.__diagnostics = []
def __unwind():
self.__filename = oldFilename
self.__sourceCode = oldSourceCode
self.__diagnostics = oldDiagnostics
args = {
'filename': filename,
'sourceCode': sourceCode
}
self._nObjQueueCommand( 'setSourceCode', args, __unwind )
def getEntryPoint( self ):
if self.__entryFunctionName is None:
self._dg._executeQueuedCommands()
return self.__entryFunctionName
def getEntryFunctionName( self ):
print "Warning: getEntryFunctionName() is deprecated and will be removed in a future version; use getEntryPoint() instead"
return self.getEntryPoint()
def setEntryPoint( self, entryPoint ):
oldEntryFunctionName = self.__entryFunctionName
self.__entryFunctionName = entryPoint
def __unwind():
self.__entryFunctionName = oldEntryFunctionName
self._nObjQueueCommand( 'setEntryPoint', entryPoint, __unwind )
self.__diagnostics = []
def setEntryFunctionName( self, entryPoint ):
print "Warning: setEntryFunctionName() is deprecated and will be removed in a future version; use setEntryPoint() instead"
self.setEntryPoint( entryPoint )
def getDiagnostics( self ):
if len( self.__diagnostics ) == 0:
self._dg._executeQueuedCommands()
return self.__diagnostics
class _CONTAINER( _NAMEDOBJECT ):
def __init__( self, dg, name ):
super( _DG._CONTAINER, self ).__init__( dg, name )
self.__rt = dg._getClient().rt
self.__members = None
self.__size = None
self.__sizeNeedRefresh = True
def _patch( self, diff ):
super( _DG._CONTAINER, self )._patch( diff )
if 'members' in diff:
self.__members = diff[ 'members' ]
if 'size' in diff:
self.__size = diff[ 'size' ]
def _handle( self, cmd, arg ):
if cmd == 'dataChange':
memberName = arg[ 'memberName' ]
sliceIndex = arg[ 'sliceIndex' ]
# FIXME invalidate cache here, see pzion comment in node.js
else:
super( _DG._CONTAINER, self )._handle( cmd, arg )
def destroy( self ):
self._setAsDestroyed()
def __unwind():
self._unsetDestroyed()
# Don't call self._nObjQueueCommand as it checks isValid()
self._dg._objQueueCommand( [ self.getName() ], 'destroy', None, __unwind )
def getCount( self ):
if self.__sizeNeedRefresh:
self.__sizeNeedRefresh = None
self._dg._executeQueuedCommands()
return self.__size
def size( self ):
return self.getCount()
def setCount( self, count ):
self._nObjQueueCommand( 'resize', count )
self.__sizeNeedRefresh = True
def resize( self, count ):
self.setCount( count )
def getMembers( self ):
if self.__members is None:
self._dg._executeQueuedCommands()
return self.__members
def addMember( self, memberName, memberType, defaultValue = None ):
if self.__members is None:
self.__members = {}
if memberName in self.__members:
raise Exception( 'there is already a member named "' + memberName + '"' )
arg = { 'name': memberName, 'type': memberType }
if defaultValue is not None:
arg[ 'defaultValue' ] = _typeToDict( defaultValue )
self.__members[ memberName ] = arg
def __unwind():
if memberName in self.__members:
del self.__members[ memberName ]
self._nObjQueueCommand( 'addMember', arg, __unwind )
def removeMember( self, memberName ):
if self.__members is None or memberName not in self.__members:
raise Exception( 'there is no member named "' + memberName + '"' )
oldMember = self.__members[ memberName ]
del self.__members[ memberName ]
def __unwind():
self.__members[ memberName ] = oldMember
self._nObjQueueCommand( 'removeMember', memberName, __unwind )
def getData( self, memberName, sliceIndex = None ):
if sliceIndex is None:
sliceIndex = 0
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = self.__rt._assignPrototypes( result,
self.__members[ memberName ][ 'type' ] )
args = { 'memberName': memberName, 'sliceIndex': sliceIndex }
self._nObjQueueCommand( 'getData', args, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def getDataJSON( self, memberName, sliceIndex = None ):
if sliceIndex is None:
sliceIndex = 0
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
args = { 'memberName': memberName, 'sliceIndex': sliceIndex }
self._nObjQueueCommand( 'getDataJSON', args, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def getDataSize( self, memberName, sliceIndex ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
args = { 'memberName': memberName, 'sliceIndex': sliceIndex }
self._nObjQueueCommand( 'getDataSize', args, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def getDataElement( self, memberName, sliceIndex, elementIndex ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = self.__rt._assignPrototypes(
result,
# remove the braces since we are getting a single element
self.__members[ memberName ][ 'type' ][0:-2]
)
args = {
'memberName': memberName,
'sliceIndex': sliceIndex,
'elementIndex': elementIndex
}
self._nObjQueueCommand( 'getDataElement', args, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def setData( self, memberName, sliceIndex, data = None ):
if data is None:
data = sliceIndex
sliceIndex = 0
args = {
'memberName': memberName,
'sliceIndex': sliceIndex,
'data': _typeToDict( data )
}
self._nObjQueueCommand( 'setData', args )
def getBulkData( self ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
for memberName in result:
member = result[ memberName ]
for i in range( 0, len( member ) ):
# FIXME this is incorrect, ignoring return value
self.__rt._assignPrototypes(
member[ i ],
self.__members[ memberName ][ 'type' ]
)
data[ '_' ] = result
self._nObjQueueCommand( 'getBulkData', None, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def setBulkData( self, data ):
self._nObjQueueCommand( 'setBulkData', _typeToDict( data ) )
def getSliceBulkData( self, index ):
if type( index ) is not int:
raise Exception( 'index: must be an integer' )
return self.getSlicesBulkData( [ index ] )[ 0 ]
def getSlicesBulkData( self, indices ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
obj = []
for i in range( 0, len( result ) ):
sliceobj = {}
obj.append( sliceobj )
for memberName in result[ i ]:
sliceobj[ memberName ] = self.__rt._assignPrototypes(
result[ i ][ memberName ],
self.__members[ memberName ][ 'type' ]
)
data[ '_' ] = obj
self._nObjQueueCommand( 'getSlicesBulkData', indices, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def getMemberBulkData( self, member ):
if type( member ) is not str:
raise Exception( 'member: must be a string' )
return self.getMembersBulkData( [ member ] )[ member ]
def getMembersBulkData( self, members ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
obj = {}
for member in result:
memberobj = []
obj[ member ] = memberobj
memberData = result[ member ]
for i in range( 0, len( memberData ) ):
memberobj.append( self.__rt._assignPrototypes(
memberData[ i ],
self.__members[ member ][ 'type' ]
)
)
data[ '_' ] = obj
self._nObjQueueCommand( 'getMembersBulkData', members, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def setSlicesBulkData( self, data ):
self._nObjQueueCommand( 'setSlicesBulkData', data )
def setSliceBulkData( self, sliceIndex, data ):
args = [ { 'sliceIndex': sliceIndex, 'data': data } ]
self._nObjQueueCommand( 'setSlicesBulkData', args )
def getBulkDataJSON( self ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._nObjQueueCommand( 'getBulkDataJSON', None, None, __callback )
self._dg._executeQueuedCommands()
return data[ '_' ]
def setBulkDataJSON( self, data ):
self._nObjQueueCommand( 'setBulkDataJSON', data )
def putResourceToFile( self, fileHandle, memberName ):
args = {
'memberName': memberName,
'file': fileHandle
}
self._nObjQueueCommand( 'putResourceToFile', args )
self._dg._executeQueuedCommands()
class _NODE( _CONTAINER ):
def __init__( self, dg, name ):
super( _DG._NODE, self ).__init__( dg, name )
self.__dependencies = {}
self.__evaluateAsyncFinishedSerial = 0
self.__evaluateAsyncFinishedCallbacks = {}
self.bindings = self._dg._createBindingList( [ name, 'bindings' ] )
def _patch( self, diff ):
super( _DG._NODE, self )._patch( diff )
if 'dependencies' in diff:
self.__dependencies = {}
for dependencyName in diff[ 'dependencies' ]:
dependencyNodeName = diff[ 'dependencies' ][ dependencyName ]
self.__dependencies[ dependencyName ] = self._dg._namedObjects[ dependencyNodeName ]
if 'bindings' in diff:
self.bindings._patch( diff[ 'bindings' ] )
def _route( self, src, cmd, arg ):
if len( src ) == 1 and src[ 0 ] == 'bindings':
src = collections.deque( src )
src.popleft()
self.bindings._route( src, cmd, arg )
elif cmd == 'evaluateAsyncFinished':
callback = self.__evaluateAsyncFinishedCallbacks[ arg ]
del self.__evaluateAsyncFinishedCallbacks[ arg ]
callback()
else:
super( _DG._NODE, self )._route( src, cmd, arg )
def getType( self ):
return 'Node'
def __checkDependencyName( self, dependencyName ):
try:
if type( dependencyName ) != str:
raise Exception( 'must be a string' )
elif dependencyName == '':
raise Exception( 'must not be empty' )
elif dependencyName == 'self':
raise Exception( 'must not be "self"' )
except Exception as e:
raise Exception( 'dependencyName: ' + e )
def setDependency( self, dependencyNode, dependencyName ):
self.__checkDependencyName( dependencyName )
oldDependency = None
if dependencyName in self.__dependencies:
oldDependency = self.__dependencies[ dependencyName ]
self.__dependencies[ dependencyName ] = dependencyNode
args = { 'name': dependencyName, 'node': dependencyNode.getName() }
def __unwind():
if ( oldDependency is not None ):
self.__dependencies[ dependencyName ] = oldDependency
else:
del self.__dependencies[ dependencyName ]
self._nObjQueueCommand( 'setDependency', args, __unwind )
def getDependencies( self ):
return self.__dependencies
def getDependency( self, name ):
if name not in self.__dependencies:
raise Exception( 'no dependency named "' + name + '"' )
return self.__dependencies[ name ]
def removeDependency( self, dependencyName ):
self.__checkDependencyName( dependencyName )
oldDependency = None
if dependencyName in self.__dependencies:
oldDependency = self.__dependencies[ dependencyName ]
del self.__dependencies[ dependencyName ]
def __unwind():
if ( oldDependency is not None ):
self.__dependencies[ dependencyName ] = oldDependency
else:
del self.__dependencies[ dependencyName ]
self._nObjQueueCommand( 'removeDependency', dependencyName, __unwind )
def evaluate( self ):
self._nObjQueueCommand( 'evaluate' )
self._dg._executeQueuedCommands()
def evaluateAsync( self, callback ):
serial = self.__evaluateAsyncFinishedSerial
self.__evaluateAsyncFinishedSerial = self.__evaluateAsyncFinishedSerial + 1
self.__evaluateAsyncFinishedCallbacks[ serial ] = callback
self._nObjQueueCommand( 'evaluateAsync', serial )
self._dg._executeQueuedCommands()
class _RESOURCELOADNODE( _NODE ):
def __init__( self, dg, name ):
super( _DG._RESOURCELOADNODE, self ).__init__( dg, name )
self.__onloadSuccessCallbacks = []
self.__onloadProgressCallbacks = []
self.__onloadFailureCallbacks = []
def _handle( self, cmd, arg ):
if cmd == 'resourceLoadSuccess':
for i in range( 0, len( self.__onloadSuccessCallbacks ) ):
self.__onloadSuccessCallbacks[ i ]( self )
elif cmd == 'resourceLoadProgress':
for i in range( 0, len( self.__onloadProgressCallbacks ) ):
self.__onloadProgressCallbacks[ i ]( self, arg )
elif cmd == 'resourceLoadFailure':
for i in range( 0, len( self.__onloadFailureCallbacks ) ):
self.__onloadFailureCallbacks[ i ]( self )
else:
super( _DG._RESOURCELOADNODE, self )._handle( cmd, arg )
def addOnLoadSuccessCallback( self, callback ):
self.__onloadSuccessCallbacks.append( callback )
def addOnLoadProgressCallback( self, callback ):
self.__onloadProgressCallbacks.append( callback )
def addOnLoadFailureCallback( self, callback ):
self.__onloadFailureCallbacks.append( callback )
class _EVENT( _CONTAINER ):
def __init__( self, dg, name ):
super( _DG._EVENT, self ).__init__( dg, name )
self.__eventHandlers = None
self.__typeName = None
self.__rt = dg._getClient().rt
def _patch( self, diff ):
super( _DG._EVENT, self )._patch( diff )
self.__eventHandlers = None
if 'eventHandlers' in diff:
self.__eventHandlers = []
for name in diff[ 'eventHandlers' ]:
self.__eventHandlers.append( self._dg._namedObjects[ name ] )
def getType( self ):
return 'Event'
def appendEventHandler( self, eventHandler ):
self.__eventHandlers = None
self._nObjQueueCommand( 'appendEventHandler', eventHandler.getName() )
def getEventHandlers( self ):
if self.__eventHandlers is None:
self._dg._executeQueuedCommands()
return self.__eventHandlers
def fire( self ):
self._nObjQueueCommand( 'fire' )
self._dg._executeQueuedCommands()
def setSelectType( self, tn ):
self._nObjQueueCommand( 'setSelectType', tn )
self._dg._executeQueuedCommands()
self.__typeName = tn
def select( self ):
data = []
def __callback( results ):
for i in range( 0, len( results ) ):
result = results[ i ]
data.append( {
'node': self._dg._namedObjects[ result[ 'node' ] ],
'value': self.__rt._assignPrototypes( result[ 'data' ], self.__typeName )
})
self._nObjQueueCommand( 'select', self.__typeName, None, __callback )
self._dg._executeQueuedCommands()
return data
class _EVENTHANDLER( _CONTAINER ):
def __init__( self, dg, name ):
super( _DG._EVENTHANDLER, self ).__init__( dg, name )
self.__scopes = {}
self.__bindingName = None
self.__childEventHandlers = None
self.preDescendBindings = self._dg._createBindingList( [ name, 'preDescendBindings' ] )
self.postDescendBindings = self._dg._createBindingList( [ name, 'postDescendBindings' ] )
def _patch( self, diff ):
super( _DG._EVENTHANDLER, self )._patch( diff )
if 'bindingName' in diff:
self.__bindingName = diff[ 'bindingName' ]
if 'childEventHandlers' in diff:
self.__childEventHandlers = []
for name in diff[ 'childEventHandlers' ]:
self.__childEventHandlers.append( self._dg._namedObjects[ name ] )
if 'scopes' in diff:
self.__scopes = {}
for name in diff[ 'scopes' ]:
nodeName = diff[ 'scopes' ][ name ]
self.__scopes[ name ] = self._dg._namedObjects[ nodeName ]
if 'preDescendBindings' in diff:
self.preDescendBindings._patch( diff[ 'preDescendBindings' ] )
if 'postDescendBindings' in diff:
self.postDescendBindings._patch( diff[ 'postDescendBindings' ] )
def _route( self, src, cmd, arg ):
if len( src ) == 1 and src[ 0 ] == 'preDescendBindings':
src = collections.deque( src )
src.popleft()
self.preDescendBindings._route( src, cmd, arg )
elif len( src ) == 1 and src[ 0 ] == 'postDescendBindings':
src = collections.deque( src )
src.popleft()
self.postDescendBindings._route( src, cmd, arg )
else:
super( _DG._EVENTHANDLER, self )._route( src, cmd, arg )
def getType( self ):
return 'EventHandler'
def getScopeName( self ):
return self.__bindingName
def setScopeName( self, bindingName ):
oldBindingName = self.__bindingName
def __unwind():
self.__bindingName = oldBindingName
self._nObjQueueCommand( 'setScopeName', bindingName, __unwind )
def appendChildEventHandler( self, childEventHandler ):
oldChildEventHandlers = self.__childEventHandlers
self.__childEventHandlers = None
def __unwind():
self.__childEventHandlers = oldChildEventHandlers
self._nObjQueueCommand( 'appendChildEventHandler', childEventHandler.getName(), __unwind )
def removeChildEventHandler( self, childEventHandler ):
oldChildEventHandlers = self.__childEventHandlers
self.__childEventHandlers = None
def __unwind():
self.__childEventHandlers = oldChildEventHandlers
self._nObjQueueCommand( 'removeChildEventHandler', childEventHandler.getName(), __unwind )
def getChildEventHandlers( self ):
if self.__childEventHandlers is None:
self._dg._executeQueuedCommands()
return self.__childEventHandlers
def __checkScopeName( self, name ):
try:
if type( name ) != str:
raise Exception( 'must be a string' )
elif name == '':
raise Exception( 'must not be empty' )
except Exception as e:
raise Exception( 'name: ' + e )
def setScope( self, name, node ):
self.__checkScopeName( name )
oldNode = None
if name in self.__scopes:
oldNode = self.__scopes[ name ]
self.__scopes[ name ] = node
def __unwind():
if oldNode is not None:
self.__scopes[ name ] = oldNode
else:
del self.__scopes[ name ]
args = { 'name': name, 'node': node.getName() }
self._nObjQueueCommand( 'setScope', args, __unwind )
def removeScope( self, name ):
self.__checkScopeName( name )
oldNode = None
if name in self.__scopes:
oldNode = self.__scopes[ name ]
del self.__scopes[ name ]
def __unwind():
if oldNode is not None:
self.__scopes[ name ] = oldNode
self._nObjQueueCommand( 'removeScope', name, __unwind )
def getScopes( self ):
return self.__scopes
def setSelector( self, targetName, binding ):
operatorName = None
try:
operatorName = binding.getOperator().getName()
except Exception:
raise Exception( 'operator: not an operator' )
args = {
'targetName': targetName,
'operator': operatorName,
'parameterLayout': binding.getParameterLayout()
}
self._nObjQueueCommand( 'setSelector', args )
class _MR( _NAMESPACE ):
def __init__( self, client ):
super( _MR, self ).__init__( client, 'MR' )
def createConstArray( self, elementType, data = None ):
valueArray = self._ARRAYPRODUCER( self )
arg = { 'id': valueArray.getID() }
if type( elementType ) is str:
arg[ 'elementType' ] = elementType
arg[ 'data' ] = data
elif type ( elementType ) is dict:
inputArg = elementType
arg[ 'elementType' ] = inputArg[ 'elementType' ]
if 'data' in inputArg:
arg[ 'data' ] = inputArg[ 'data' ]
if 'jsonData' in inputArg:
arg[ 'jsonData' ] = inputArg[ 'jsonData' ]
else:
raise Exception( "createConstArray: first argument must be str or dict" )
self._queueCommand( 'createConstArray', arg, valueArray.unwind )
return valueArray
def createConstValue( self, valueType, data ):
value = self._VALUEPRODUCER( self )
arg = {
'id': value.getID(),
'valueType': valueType,
'data': data
}
self._queueCommand( 'createConstValue', arg, value.unwind )
return value
def createValueCache( self, input ):
return self.__createMRCommand( self._VALUEPRODUCER( self ), 'createValueCache', input, None, None )
def createValueGenerator( self, operator ):
return self.__createMRCommand( self._VALUEPRODUCER( self ), 'createValueGenerator', None, operator, None )
def createValueMap( self, input, operator, shared = None ):
return self.__createMRCommand( self._VALUEPRODUCER( self ), 'createValueMap', input, operator, shared )
def createValueTransform( self, input, operator, shared = None ):
return self.__createMRCommand( self._VALUEPRODUCER( self ), 'createValueTransform', input, operator, shared )
def createArrayCache( self, input ):
return self.__createMRCommand( self._ARRAYPRODUCER( self ), 'createArrayCache', input, None, None )
def createArrayGenerator( self, count, operator, shared = None ):
obj = self._ARRAYPRODUCER( self )
arg = {
'id': obj.getID(),
'countID': count.getID(),
'operatorID': operator.getID()
}
if ( shared is not None ):
arg[ 'sharedID' ] = shared.getID()
self._queueCommand( 'createArrayGenerator', arg, obj.unwind )
return obj
def createArrayMap( self, input, operator, shared = None ):
return self.__createMRCommand( self._ARRAYPRODUCER( self ), 'createArrayMap', input, operator, shared )
def createArrayTransform( self, input, operator, shared = None ):
return self.__createMRCommand( self._ARRAYPRODUCER( self ), 'createArrayTransform', input, operator, shared )
def createReduce( self, inputArrayProducer, reduceOperator, sharedValueProducer = None ):
reduce = self._VALUEPRODUCER( self )
arg = {
'id': reduce.getID(),
'inputID': inputArrayProducer.getID(),
'operatorID': reduceOperator.getID()
}
if ( sharedValueProducer is not None ):
arg[ 'sharedID' ] = sharedValueProducer.getID()
self._queueCommand( 'createReduce', arg, reduce.unwind )
return reduce
def __createMRCommand( self, obj, cmd, input, operator, shared ):
arg = {
'id': obj.getID()
}
if ( input is not None ):
arg[ 'inputID' ] = input.getID()
if ( operator is not None ):
arg[ 'operatorID' ] = operator.getID()
if ( shared is not None ):
arg[ 'sharedID' ] = shared.getID()
self._queueCommand( cmd, arg, obj.unwind )
return obj
class _PRODUCER( _GCOBJECT ):
def __init__( self, mr ):
super( _MR._PRODUCER, self ).__init__( mr )
def toJSON( self ):
# dictionary hack to simulate Python 3.x nonlocal
json = { '_': None }
def __toJSON( result ):
json[ '_' ] = result
self._gcObjQueueCommand( 'toJSON', None, None, __toJSON )
return json[ '_' ]
class _ARRAYPRODUCER( _PRODUCER ):
def __init__( self, mr ):
super( _MR._ARRAYPRODUCER, self ).__init__( mr )
def getCount( self ):
# dictionary hack to simulate Python 3.x nonlocal
count = { '_': None }
def __getCount( result ):
count[ '_' ] = result
self._gcObjQueueCommand( 'getCount', None, None, __getCount )
self._nsobj._executeQueuedCommands()
return count[ '_' ]
def produce( self, index = None, count = None ):
arg = { }
if ( index is not None ):
if ( count is not None ):
arg[ 'count' ] = count
arg[ 'index' ] = index
# dictionary hack to simulate Python 3.x nonlocal
result = { '_': None }
def __produce( data ):
result[ '_' ] = data
self._gcObjQueueCommand( 'produce', arg, None, __produce )
self._nsobj._executeQueuedCommands()
return result[ '_' ]
def flush( self ):
self._gcObjQueueCommand( 'flush' )
def produceAsync( self, arg1, arg2 = None, arg3 = None ):
arg = { }
callback = None
if arg3 is None and arg2 is None:
callback = arg1
elif arg3 is None:
arg[ 'index' ] = arg1
callback = arg2
else:
arg[ 'index' ] = arg1
arg[ 'count' ] = arg2
callback = arg3
arg[ 'serial' ] = self._registerCallback( callback )
self._gcObjQueueCommand( 'produceAsync', arg )
self._nsobj._executeQueuedCommands()
class _VALUEPRODUCER( _PRODUCER ):
def __init__( self, client ):
super( _MR._VALUEPRODUCER, self ).__init__( client )
def produce( self ):
# dictionary hack to simulate Python 3.x nonlocal
result = { '_': None }
def __produce( data ):
result[ '_' ] = data
self._gcObjQueueCommand( 'produce', None, None, __produce )
self._nsobj._executeQueuedCommands()
return result[ '_' ]
def produceAsync( self, callback ):
self._gcObjQueueCommand( 'produceAsync', self._registerCallback( callback ) )
self._nsobj._executeQueuedCommands()
def flush( self ):
self._gcObjQueueCommand( 'flush' )
class _KLC( _NAMESPACE ):
def __init__( self, client ):
super( _KLC, self ).__init__( client, 'KLC' )
def createCompilation( self, sourceName = None, sourceCode = None ):
obj = self._COMPILATION( self )
arg = { 'id': obj.getID() }
if sourceName is not None:
arg[ 'sourceName' ] = sourceName
if sourceCode is not None:
arg[ 'sourceCode' ] = sourceCode
self._queueCommand( 'createCompilation', arg, obj.unwind )
return obj
def _createExecutable( self ):
return self._EXECUTABLE( self )
def createExecutable( self, sourceName, sourceCode ):
obj = self._createExecutable()
arg = {
'id': obj.getID(),
'sourceName': sourceName,
'sourceCode': sourceCode
}
self._queueCommand( 'createExecutable', arg, obj.unwind )
return obj
def _createOperatorOnly( self ):
operator = self._OPERATOR( self )
return operator
def _createOperator( self, operatorName, cmd, sourceName = None, sourceCode = None ):
operator = self._createOperatorOnly()
arg = {
'id': operator.getID(),
'operatorName': operatorName,
'sourceName': sourceName,
'sourceCode': sourceCode
}
self._queueCommand( cmd, arg, operator.unwind )
return operator
def createReduceOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createReduceOperator', sourceName, sourceCode )
def createValueGeneratorOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createValueGeneratorOperator', sourceName, sourceCode )
def createValueMapOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createValueMapOperator', sourceName, sourceCode )
def createValueTransformOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createValueTransformOperator', sourceName, sourceCode )
def createArrayGeneratorOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createArrayGeneratorOperator', sourceName, sourceCode )
def createArrayMapOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createArrayMapOperator', sourceName, sourceCode )
def createArrayTransformOperator( self, sourceName, sourceCode, operatorName ):
return self._createOperator( operatorName, 'createArrayTransformOperator', sourceName, sourceCode )
class _OPERATOR( _GCOBJECT ):
def __init__( self, klc ):
super( _KLC._OPERATOR, self ).__init__( klc )
def toJSON( self ):
return self._synchronousGetOnly( 'toJSON' )
def getDiagnostics( self ):
return self._synchronousGetOnly( 'getDiagnostics' )
class _EXECUTABLE( _GCOBJECT ):
def __init__( self, klc ):
super( _KLC._EXECUTABLE, self ).__init__( klc )
def __resolveOperator( self, operatorName, cmd ):
operator = self._nsobj._createOperatorOnly()
arg = {
'id': operator.getID(),
'operatorName': operatorName
}
self._gcObjQueueCommand( cmd, arg, operator.unwind )
return operator
def getAST( self ):
return self._synchronousGetOnly( 'getAST' )
def getDiagnostics( self ):
return self._synchronousGetOnly( 'getDiagnostics' )
def resolveReduceOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveReduceOperator' )
def resolveValueGeneratorOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveValueGeneratorOperator' )
def resolveValueMapOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveValueMapOperator' )
def resolveValueTransformOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveValueTransformOperator' )
def resolveArrayGeneratorOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveArrayGeneratorOperator' )
def resolveArrayMapOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveArrayMapOperator' )
def resolveArrayTransformOperator( self, operatorName ):
return self.__resolveOperator( operatorName, 'resolveArrayTransformOperator' )
class _COMPILATION( _GCOBJECT ):
def __init__( self, klc ):
super( _KLC._COMPILATION, self ).__init__( klc )
self.__sourceCodes = {}
def addSource( self, sourceName, sourceCode ):
oldSourceCode = None
if sourceName in self.__sourceCodes:
oldSourceCode = self.__sourceCodes[ sourceName ]
self.__sourceCodes[ sourceName ] = sourceCode
def __unwind():
if oldSourceCode is not None:
self.__sourceCodes[ sourceName ] = oldSourceCode
else:
del self.__sourceCodes[ sourceName ]
args = { 'sourceName': sourceName, 'sourceCode': sourceCode }
self._gcObjQueueCommand( 'addSource', args, __unwind )
def removeSource( self, sourceName ):
oldSourceCode = None
if sourceName in self.__sourceCodes:
oldSourceCode = self.__sourceCodes[ sourceName ]
del self.__sourceCodes[ sourceName ]
def __unwind():
if oldSourceCode is not None:
self.__sourceCodes[ sourceName ] = oldSourceCode
args = { 'sourceName': sourceName }
self._gcObjQueueCommand( 'removeSource', args, __unwind )
def getSources( self ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._gcObjQueueCommand( 'getSources', None, None, __callback )
self._nsobj._executeQueuedCommands()
return data[ '_' ]
def run( self ):
executable = self._nsobj._createExecutable()
args = { 'id': executable.getID() }
self._gcObjQueueCommand( 'run', args, executable.unwind )
return executable
class _RT( _NAMESPACE ):
def __init__( self, client ):
super( _RT, self ).__init__( client, 'RT' )
self.__prototypes = {}
self.__registeredTypes = {}
def _assignPrototypes( self, data, typeName ):
if typeName[-2:] == '[]':
obj = []
typeName = typeName[0:-2]
for i in range( 0, len( data ) ):
obj.append( self._assignPrototypes( data[ i ], typeName ) )
return obj
elif typeName in self.__prototypes:
obj = self.__prototypes[ typeName ]()
if 'members' in self.__registeredTypes[ typeName ]:
members = self.__registeredTypes[ typeName ][ 'members' ]
for i in range( 0, len( members ) ):
member = members[ i ]
setattr( obj, member[ 'name' ],
self._assignPrototypes( data[ member[ 'name' ] ], member[ 'type' ] )
)
return obj
else:
return data
def getRegisteredTypes( self ):
self._executeQueuedCommands()
return self.__registeredTypes
def registerType( self, name, desc ):
if type( desc ) is not dict:
raise Exception( 'RT.registerType: second parameter: must be an object' )
if 'members' not in desc:
raise Exception( 'RT.registerType: second parameter: missing members element' )
if type( desc[ 'members' ] ) is not list:
raise Exception( 'RT.registerType: second parameter: invalid members element' )
members = []
for i in range( 0, len( desc[ 'members' ] ) ):
member = desc[ 'members' ][ i ]
memberName, memberType = member.popitem()
if len( member ) > 0:
raise Exception( 'improperly formatted member' )
member = {
'name': memberName,
'type': memberType
}
members.append( member )
constructor = None
if 'constructor' in desc:
constructor = desc[ 'constructor' ]
else:
class _Empty:
pass
constructor = _Empty
defaultValue = constructor()
self.__prototypes[ name ] = constructor
arg = {
'name': name,
'members': members,
'defaultValue': _typeToDict( defaultValue )
}
if ( 'klBindings' in desc ):
arg[ 'klBindings' ] = desc[ 'klBindings' ]
def __unwind():
del self.__prototypes[ name ]
self._queueCommand( 'registerType', arg, __unwind )
def _patch( self, diff ):
if 'registeredTypes' in diff:
self.__registeredTypes = {}
for typeName in diff[ 'registeredTypes' ]:
self.__registeredTypes[ typeName ] = diff[ 'registeredTypes' ][ typeName ]
def _handleStateNotification( self, state ):
self.__prototypes = {}
self._patch( state )
def _handle( self, cmd, arg ):
if cmd == 'delta':
self._patch( arg )
else:
raise Exception( 'command "' + cmd + '": unrecognized' )
def _route( self, src, cmd, arg ):
if len( src ) == 0:
self._handle( cmd, arg )
elif len( src ) == 1:
typeName = src[ 0 ]
try:
if cmd == 'delta':
self.__registeredTypes[ typeName ] = arg
self.__registeredTypes[ typeName ][ 'defaultValue' ] = self._assignPrototypes(
self.__registeredTypes[ typeName ][ 'defaultValue' ],
typeName
)
else:
raise Exception( 'unrecognized' )
except Exception as e:
raise Exception( '"' + cmd + '": ' + e )
else:
raise Exception( '"' + src + '": unroutable ' )
class _GC( _NAMESPACE ):
def __init__( self, client ):
super( _GC, self ).__init__( client, 'GC' )
self.__objects = {}
def addObject( self, obj ):
self.__objects[ obj.getID() ] = obj
def disposeObject( self, obj ):
del self.__objects[ obj.getID() ]
def _route( self, src, cmd, arg ):
src = collections.deque( src )
id = src.popleft()
obj = self.__objects[ id ]
obj._route( src, cmd, arg )
class _EX( _NAMESPACE ):
def __init__( self, client ):
super( _EX, self ).__init__( client, 'EX' )
self.__loadedExts = {}
def _patch( self, diff ):
for name in diff:
if diff[ name ]:
self.__loadedExts = diff[ name ]
elif name in self.__loadedExts:
del self.__loadedExts[ name ]
def _handleStateNotification( self, state ):
self.__loadedExts = {}
self._patch( state )
def _handle( self, cmd, arg ):
if cmd == 'delta':
self._patch( arg )
else:
raise Exception( 'command "' + cmd + '": unrecognized' )
def _route( self, src, cmd, arg ):
if len( src ) > 0:
self._handle( cmd, arg )
else:
raise Exception( 'unroutable' )
def getLoadedExts( self ):
return self.__loadedExts
class _IO( _NAMESPACE ):
def __init__( self, client ):
super( _IO, self ).__init__( client, 'IO' )
self.forOpen = 'openMode'
self.forOpenWithWriteAccess = 'openWithWriteAccessMode'
self.forSave = 'saveMode'
def __queryUserFile( self, funcname, mode, uiTitle, extension, defaultFileName ):
if mode != self.forOpen and mode != self.forOpenWithWriteAccess and mode != self.forSave:
raise Exception( 'Invalid mode: "' + mode + '": can be IO.forOpen, IO.forOpenWithWriteAccess or IO.forSave' )
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
args = {
'existingFile': mode == self.forOpenWithWriteAccess or mode == self.forOpen,
'writeAccess': mode == self.forOpenWithWriteAccess or mode == self.forSave,
'uiOptions': {
'title': uiTitle,
'extension': extension,
'defaultFileName': defaultFileName
}
}
self._queueCommand( funcname, args, None, __callback )
self._executeQueuedCommands()
return data[ '_' ]
def queryUserFileAndFolderHandle( self, mode, uiTitle, extension, defaultFileName ):
return self.__queryUserFile( 'queryUserFileAndFolder', mode, uiTitle, extension, defaultFileName )
def queryUserFileHandle( self, mode, uiTitle, extension, defaultFileName ):
return self.__queryUserFile( 'queryUserFile', mode, uiTitle, extension, defaultFileName )
def getTextFileContent( self, handle ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._queueCommand( 'getTextFileContent', handle, None, __callback )
self._executeQueuedCommands()
return data[ '_' ]
def putTextFileContent( self, handle, content, append = None ):
args = {
'content': content,
'file': handle,
'append': False if append is None else append
}
self._queueCommand( 'putTextFileContent', args )
self._executeQueuedCommands()
def buildFileHandleFromRelativePath( self, handle ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._queueCommand( 'createFileHandleFromRelativePath', handle, None, __callback )
self._executeQueuedCommands()
return data[ '_' ]
def buildFolderHandleFromRelativePath( self, handle ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._queueCommand( 'createFolderHandleFromRelativePath', handle, None, __callback )
self._executeQueuedCommands()
return data[ '_' ]
def getFileHandleInfo( self, handle ):
# dictionary hack to simulate Python 3.x nonlocal
data = { '_': None }
def __callback( result ):
data[ '_' ] = result
self._queueCommand( 'getFileInfo', handle, None, __callback )
self._executeQueuedCommands()
return data[ '_' ]
class _BUILD( _NAMESPACE ):
def __init__( self, client ):
super( _BUILD, self ).__init__( client, 'build' )
self.__build = {}
def _handleStateNotification( self, state ):
pass
def _patch( self, diff ):
for name in diff:
self.__build[ name ] = diff[ name ]
def _handleStateNotification( self, state ):
self._patch( state )
def _handle( self, cmd, arg ):
if cmd == 'delta':
self._patch( arg )
else:
raise Exception( 'command "' + cmd + '": unrecognized' )
def _route( self, src, cmd, arg ):
if len( src ) == 0:
self._handle( cmd, arg )
else:
raise Exception( 'unroutable' )
def isExpired( self ):
return self.__build[ 'isExpired' ]
def getName( self ):
return self.__build[ 'name' ]
def getPureVersion( self ):
return self.__build[ 'pureVersion' ]
def getFullVersion( self ):
return self.__build[ 'fullVersion' ]
def getDesc( self ):
return self.__build[ 'desc' ]
def getCopyright( self ):
return self.__build[ 'copyright' ]
def getURL( self ):
return self.__build[ 'url' ]
def getOS( self ):
return self.__build[ 'os' ]
def getArch( self ):
return self.__build[ 'arch' ]
| agpl-3.0 | 9,016,573,328,412,113,000 | 30.847685 | 128 | 0.61597 | false |
dankilman/clash | clash/tests/commands/test_user_commands.py | 1 | 4763 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import sys
import json
from clash import tests
class TestUserCommands(tests.BaseTest):
def test_basic_command(self):
self._test_command(verbose=False, command=['command1'])
def test_verbose_basic_command(self):
self._test_command(verbose=True, command=['command1'])
def test_nested_command(self):
self._test_command(verbose=False, command=['nested', 'command2'])
def _test_command(self, verbose, command):
config_path = 'end_to_end.yaml'
output_path = self.workdir / 'output.json'
self.dispatch(config_path, 'env', 'create')
self.dispatch(config_path, 'init')
args = [config_path] + command + ['arg1_value']
output = self.dispatch(*args, a='arg3_value', output_path=output_path,
verbose=verbose).stdout
self.assertEqual(json.loads(output_path.text()), {
'param1': 'arg1_value',
'param2': 'arg2_default',
'param3': 'arg3_value'
})
self.assertIn('from workflow1', output)
assertion = self.assertIn if verbose else self.assertNotIn
assertion("Starting 'workflow1'", output)
def test_task_config_default(self):
config_path = 'task_config_default.yaml'
counts = (0, 1, 1)
self._test_task_config(config_path, counts)
def test_task_config_global(self):
config_path = 'task_config_global.yaml'
counts = (4, 4, 4)
self._test_task_config(config_path, counts)
def test_task_config_command(self):
config_path = 'task_config_command.yaml'
counts = (3, 3, 3)
self._test_task_config(config_path, counts)
def _test_task_config(self, config_path, counts):
output_path = self.workdir / 'output.json'
self.dispatch(config_path, 'env', 'create')
self.dispatch(config_path, 'init')
self.dispatch(config_path, 'command1', output_path)
self.assertEqual(json.loads(output_path.text()), {
'retries': counts[0],
'retry_interval': counts[1],
'thread_pool_size': counts[2]
})
def test_update_python_path(self):
config_path = 'pythonpath.yaml'
storage_dir_functions = self.workdir / 'storage_dir_functions'
storage_dir_functions.mkdir_p()
(storage_dir_functions / '__init__.py').touch()
script_path = storage_dir_functions / 'functions.py'
script_path.write_text('def func2(**_): return 2')
self.dispatch(config_path, 'env', 'create')
self.dispatch(config_path, 'init')
output = self.dispatch(config_path, 'command1').stdout
self.assertIn('all good', output)
self.assertIn('param1: 1, param2: 2', output)
def test_env_path(self):
config_path = 'envpath.yaml'
self.dispatch(config_path, 'env', 'create')
self.dispatch(config_path, 'init')
output = self.dispatch(config_path, 'command1').stdout
self.assertIn('all good', output)
self.assertIn(os.path.dirname(sys.executable), output)
def test_event_cls(self):
config_path = 'event_cls.yaml'
self.dispatch(config_path, 'env', 'create')
self.dispatch(config_path, 'init')
output1 = self.dispatch(config_path, 'command1').stdout
output2 = self.dispatch(config_path, 'command2').stdout
output3 = self.dispatch(config_path, 'command3',
verbose=True).stdout
self.assertIn('EVENT1', output1)
self.assertIn('EVENT2', output2)
self.assertIn('EVENT3 env: .local, verbose: True, '
'workflow: workflow4', output3)
def test_functions(self):
config_path = 'functions.yaml'
self.dispatch(config_path, 'env', 'create')
self.dispatch(config_path, 'init')
for command in [['command1'], ['nested', 'command2']]:
args = ['val1', '--arg2', 'val2']
command += args
output = self.dispatch(config_path, *command).stdout.strip()
self.assertEqual('val1 val2 functions', output)
| apache-2.0 | 8,647,986,952,816,754,000 | 38.691667 | 78 | 0.619358 | false |
whd/python_moztelemetry | tests/test_store.py | 1 | 3881 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import tempfile
import boto3
import pytest
from moztelemetry.store import S3Store, InMemoryStore
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_empty_store(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
keys = store.list_keys('/')
assert list(keys) == []
folders = store.list_folders()
assert list(folders) == []
assert store.is_prefix_empty('/')
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_upload_key(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
key = 'my-key'
value = 'my-value'
with tempfile.TemporaryFile() as f:
f.write(value)
f.seek(0)
store.upload_file(f, '', key)
assert store.get_key(key).read() == value
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_list_keys(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
keys = ('dir1/key1', 'dir2/value2')
values = ('value1', 'value2')
for index, key in enumerate(keys):
with tempfile.TemporaryFile() as f:
f.write(values[index])
f.seek(0)
store.upload_file(f, '', key)
for index, item in enumerate(sorted(store.list_keys('dir1'))):
assert item['key'] == 'dir1/key1'
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_list_folders(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
keys = ('dir1/subdir1/key1', 'dir2/another-dir/key2')
values = ('value1', 'value2')
for index, key in enumerate(keys):
with tempfile.TemporaryFile() as f:
f.write(values[index])
f.seek(0)
store.upload_file(f, '', key)
expected = ('dir1/subdir1/',)
for index, item in enumerate(sorted(store.list_folders(prefix='dir1/'))):
assert item == expected[index]
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_get_key(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
key = 'key1'
value = 'value1'
with tempfile.TemporaryFile() as f:
f.write(value)
f.seek(0)
store.upload_file(f, '', key)
assert store.get_key(key).read() == value
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_get_non_existing_key(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
with pytest.raises(Exception) as exc_info:
store.get_key('random-key')
assert str(exc_info.value) == 'Error retrieving key "random-key" from S3'
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_delete_key(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
key = 'key1'
value = 'value1'
with tempfile.TemporaryFile() as f:
f.write(value)
f.seek(0)
store.upload_file(f, '', key)
store.delete_key(key)
bucket = boto3.resource('s3').Bucket(store.bucket_name)
assert len(list(bucket.objects.all())) == 0
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_delete_non_existing_key(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
store.delete_key('random-key-2')
@pytest.mark.parametrize('store_class', [S3Store, InMemoryStore])
def test_is_prefix_empty(dummy_bucket, store_class):
store = store_class(dummy_bucket.name)
key = 'dir1/key1'
value = 'value1'
with tempfile.TemporaryFile() as f:
f.write(value)
f.seek(0)
store.upload_file(f, '', key)
assert not store.is_prefix_empty('dir1/')
assert store.is_prefix_empty('random-dir/')
| mpl-2.0 | -8,470,257,807,440,689,000 | 29.085271 | 77 | 0.65344 | false |
arista-eosplus/pyeapi | pyeapi/client.py | 1 | 35204 | #
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Python Client for eAPI
This module provides the client for eAPI. It provides the primary functions
for building applications that work with Arista EOS eAPI-enabled nodes. The
first function is to provide a client for sending and receiving eAPI
request and response objects on a per node basis. The second function
provides a library for building API enabled data models for configuring
EOS nodes.
This library allows for creating connections to EOS eAPI enabled nodes using
the connect or connect_to function. Both functions will return an instance
of a Node object that can be used to send and receive eAPI commands. The
Node object can autoload API modules for a structured object oriented
approach to configuring the EOS node with native Python objects.
Example:
>>> import pyeapi
>>> conn = pyeapi.connect(host='10.1.1.1', transport='http')
>>> conn.execute(['show version'])
{u'jsonrpc': u'2.0', u'result': [{u'memTotal': 2028008, u'version':
u'4.14.5F', u'internalVersion': u'4.14.5F-2209869.4145F', u'serialNumber':
u'', u'systemMacAddress': u'00:0c:29:f5:d2:7d', u'bootupTimestamp':
1421765066.11, u'memFree': 213212, u'modelName': u'vEOS', u'architecture':
u'i386', u'internalBuildId': u'f590eed4-1e66-43c6-8943-cee0390fbafe',
u'hardwareRevision': u''}], u'id': u'4312565648'}
>>> node = pyeapi.connect_to('veos01')
>>> node.enable('show version')
{u'jsonrpc': u'2.0', u'result': [{u'memTotal': 2028008, u'version':
u'4.14.5F', u'internalVersion': u'4.14.5F-2209869.4145F', u'serialNumber':
u'', u'systemMacAddress': u'00:0c:29:f5:d2:7d', u'bootupTimestamp':
1421765066.11, u'memFree': 213212, u'modelName': u'vEOS', u'architecture':
u'i386', u'internalBuildId': u'f590eed4-1e66-43c6-8943-cee0390fbafe',
u'hardwareRevision': u''}], u'id': u'4312565648'}
Additionally the node object can automatically load API modules to work
with the resources in the configuration. The API autoloader supports
automatic loading of modules in pyeapi.api as well as provides the ability
to build custom API modules to be loaded from a different namespace.
Example:
>>> import pyeapi
>>> node = pyeapi.connect_to('veos01')
>>> node.api('vlans').get(1)
{'state': 'active', 'name': 'default', 'vlan_id': 1, 'trunk_groups': []}
The API autoloader loads API modules by their filename.
The following objects are provide in this module for creating clients to
interface with eAPI.
Node -- Creates an instance of a node object that represents a single EOS
device. Each EOS device to be managed should have a Node instance
Config -- A subclass of ConfigParser.SafeConfigParser that handles the
configuration file. The configuration file is an INI style file that
contains the settings for nodes used by the connect_to function.
"""
from uuid import uuid4
import os
import re
try:
# Try Python 3.x import first
# Note: SafeConfigParser is deprecated and replaced by ConfigParser
from configparser import ConfigParser as SafeConfigParser
from configparser import Error as SafeConfigParserError
except ImportError:
# Use Python 2.7 import as a fallback
from ConfigParser import SafeConfigParser
from ConfigParser import Error as SafeConfigParserError
from pyeapi.utils import load_module, make_iterable, debug
from pyeapi.eapilib import HttpEapiConnection, HttpsEapiConnection
from pyeapi.eapilib import HttpsEapiCertConnection
from pyeapi.eapilib import SocketEapiConnection, HttpLocalEapiConnection
from pyeapi.eapilib import CommandError
CONFIG_SEARCH_PATH = ['~/.eapi.conf', '/mnt/flash/eapi.conf']
TRANSPORTS = {
'socket': SocketEapiConnection,
'http_local': HttpLocalEapiConnection,
'http': HttpEapiConnection,
'https': HttpsEapiConnection,
'https_certs': HttpsEapiCertConnection
}
DEFAULT_TRANSPORT = 'https'
class Config(SafeConfigParser):
"""Conifguration instance for managing the eapi.conf file.
This class provides an instance for handling the configuration file. It
should normally need to be instantiated. A single config object is
instantiated by the module for working with the config.
Attributes:
filename (str): The full path to the loaded filename
Args:
filename(str): The full path to the filename to be loaded when the
object is instantiated.
"""
def __init__(self, filename=None):
SafeConfigParser.__init__(self)
self.filename = filename
self.tags = dict()
self.autoload()
@property
def connections(self):
"""
Returns all of the loaded connections names as a list
"""
conn = lambda x: str(x).replace('connection:', '')
return [conn(name) for name in self.sections()]
def autoload(self):
""" Loads the eapi.conf file
This method will use the module variable CONFIG_SEARCH_PATH to
attempt to locate a valid eapi.conf file if a filename is not already
configured. This method will load the first eapi.conf file it
finds and then return.
The CONFIG_SEARCH_PATH can be overridden using an environment variable
by setting EAPI_CONF.
"""
path = list(CONFIG_SEARCH_PATH)
if 'EAPI_CONF' in os.environ:
path = os.environ['EAPI_CONF']
elif self.filename:
path = self.filename
path = make_iterable(path)
for filename in path:
filename = os.path.expanduser(filename)
if os.path.exists(filename):
self.filename = filename
return self.read(filename)
self._add_default_connection()
def read(self, filename):
"""Reads the file specified by filename
This method will load the eapi.conf file specified by filename into
the instance object. It will also add the default connection localhost
if it was not defined in the eapi.conf file
Args:
filename (str): The full path to the file to load
"""
try:
SafeConfigParser.read(self, filename)
except SafeConfigParserError as exc:
# Ignore file and syslog a message on SafeConfigParser errors
msg = ("%s: parsing error in eapi conf file: %s" %
(type(exc).__name__, filename))
debug(msg)
self._add_default_connection()
for name in self.sections():
if name.startswith('connection:') and \
'host' not in dict(self.items(name)):
self.set(name, 'host', name.split(':')[1])
self.generate_tags()
def _add_default_connection(self):
"""Checks the loaded config and adds the localhost profile if needed
This method wil load the connection:localhost profile into the client
configuration if it is not already present.
"""
if not self.get_connection('localhost'):
self.add_connection('localhost', transport='socket')
def generate_tags(self):
""" Generates the tags with collection with hosts
"""
self.tags = dict()
for section in self.sections():
if self.has_option(section, 'tags'):
tags = self.get(section, 'tags')
for tag in [str(t).strip() for t in tags.split(',')]:
if tag not in self.tags:
self.tags[tag] = list()
self.tags[tag].append(section.split(':')[1])
def load(self, filename):
"""Loads the file specified by filename
This method works in conjunction with the autoload method to load the
file specified by filename.
Args:
filename (str): The full path to the file to be loaded
"""
self.filename = filename
self.reload()
def reload(self):
"""Reloades the configuration
This method will reload the configuration instance using the last
known filename. Note this method will initially clear the
configuration and reload all entries.
"""
for section in self.sections():
self.remove_section(section)
self.autoload()
def get_connection(self, name):
"""Returns the properties for a connection name
This method will return the settings for the configuration specified
by name. Note that the name argument should only be the name.
For instance, give the following eapi.conf file
.. code-block:: ini
[connection:veos01]
transport: http
The name to use to retrieve the configuration would be veos01
>>> pyeapi.client.config.get_connection('veos01')
Args:
name (str): The name of the connection to return
Returns:
A Python dictionary object of key/value pairs that represent
the node configuration. If the name provided in the argument
is not found, then None is returned.
"""
name = 'connection:{}'.format(name)
if not self.has_section(name):
return None
return dict(self.items(name))
def add_connection(self, name, **kwargs):
"""Adds a connection to the configuration
This method will add a connection to the configuration. The connection
added is only available for the lifetime of the object and is not
persisted.
Note:
If a call is made to load() or reload(), any connections added
with this method must be re-added to the config instance
Args:
name (str): The name of the connection to add to the config. The
name provided will automatically be prepended with the string
connection:
**kwargs (dict); The set of properties used to provide the node
configuration
"""
name = 'connection:{}'.format(name)
self.add_section(name)
for key, value in list(kwargs.items()):
self.set(name, key, value)
self.generate_tags()
# TODO: This is a global variable (in the module) - to review the impact on
# having a shared state for the config file.
config = Config()
def load_config(filename):
"""Function method that loads a conf file
This function will load the file specified by filename into the config
instance. Its a convenience function that calls load on the config
instance
Args:
filename (str): The full path to the filename to load
"""
return config.load(filename)
def config_for(name):
""" Function to get settings for named config
This function will return the settings for a specific connection as
specified by name. Its a convenience function that calls get_connection
on the global config instance
Args:
name (str): The name of the connection to return. The connection
name is specified as the string right of the : in the INI file
Returns:
A Python dictionary object of key/value pairs that represent the
nodes configuration settings from the config instance
"""
return config.get_connection(name)
def hosts_for_tag(tag):
""" Returns the hosts assocated with the specified tag
This function will return the hosts assoicated with the tag specified
in the argument. It will return an array of connecition names.
Args:
tag (str): The name of the tag to retrieve the list of hosts for
Returns:
list: A Python list object that includes the list of hosts assoicated
with the specified tag.
None: If the specified tag does not exist, then None is returned.
"""
return config.tags.get(tag)
def make_connection(transport, **kwargs):
""" Creates a connection instance based on the transport
This function creates the EapiConnection object based on the desired
transport. It looks up the transport class in the TRANSPORTS global
dictionary.
Args:
transport (string): The transport to use to create the instance.
**kwargs: Arbitrary keyword arguments.
Returns:
An instance of a connection object based on the transport
Raises:
TypeError: A TypeError is raised if the transport keyword is not
found in the list (keys) of available transports.
"""
if transport not in TRANSPORTS:
raise TypeError('invalid transport specified')
klass = TRANSPORTS[transport]
return klass(**kwargs)
def connect(transport=None, host='localhost', username='admin',
password='', port=None, key_file=None, cert_file=None,
ca_file=None, timeout=60, return_node=False, **kwargs):
""" Creates a connection using the supplied settings
This function will create a connection to an Arista EOS node using
the arguments. All arguments are optional with default values.
Args:
transport (str): Specifies the type of connection transport to use.
Valid values for the connection are socket, http_local, http, and
https. The default value is specified in DEFAULT_TRANSPORT
host (str): The IP addres or DNS host name of the connection device.
The default value is 'localhost'
username (str): The username to pass to the device to authenticate
the eAPI connection. The default value is 'admin'
password (str): The password to pass to the device to authenticate
the eAPI connection. The default value is ''
port (int): The TCP port of the endpoint for the eAPI connection. If
this keyword is not specified, the default value is automatically
determined by the transport type. (http=80, https=443)
key_file (str): Path to private key file for ssl validation
cert_file (str): Path to PEM formatted cert file for ssl validation
ca_file (str): Path to CA PEM formatted cert file for ssl validation
timeout (int): timeout
return_node (bool): Returns a Node object if True, otherwise
returns an EapiConnection object.
Returns:
An instance of an EapiConnection object for the specified transport.
"""
transport = transport or DEFAULT_TRANSPORT
connection = make_connection(transport, host=host, username=username,
password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file,
port=port, timeout=timeout)
if return_node:
return Node(connection, transport=transport, host=host,
username=username, password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file, port=port, **kwargs)
return connection
class Node(object):
"""Represents a single device for sending and receiving eAPI messages
The Node object provides an instance for communicating with Arista EOS
devices. The Node object provides easy to use methods for sending both
enable and config commands to the device using a specific transport. This
object forms the base for communicating with devices.
Attributes:
connection (EapiConnection): The connection property represents the
underlying transport used by the Node object to communicate
with the device using eAPI.
running_config (str): The running-config from the device. This
property is lazily loaded and refreshed over the life cycle of
the instance.
startup_config (str): The startup-config from the device. This
property is lazily loaded and refreshed over the life cycle of
the instance.
autorefresh (bool): If True, the running-config and startup-config are
refreshed on config events. If False, then the config properties
must be manually refreshed.
settings (dict): Provides access to the settings used to create the
Node instance.
Args:
connection (EapiConnection): An instance of EapiConnection used as the
transport for sending and receiving eAPI requests and responses.
**kwargs: An arbitrary list of keyword arguments
"""
def __init__(self, connection, **kwargs):
self._connection = connection
self._running_config = None
self._startup_config = None
self._version = None
self._version_number = None
self._model = None
self._session_name = None
self._enablepwd = kwargs.get('enablepwd')
self.autorefresh = kwargs.get('autorefresh', True)
self.settings = kwargs
def __str__(self):
return 'Node(connection=%s)' % str(self._connection)
def __repr__(self):
return 'Node(connection=%s)' % repr(self._connection)
@property
def connection(self):
return self._connection
@property
def running_config(self):
if self._running_config is not None:
return self._running_config
self._running_config = self.get_config(params='all',
as_string=True)
return self._running_config
@property
def startup_config(self):
if self._startup_config is not None:
return self._startup_config
self._startup_config = self.get_config('startup-config',
as_string=True)
return self._startup_config
@property
def version(self):
if self._version:
return self._version
self._get_version_properties()
return self._version
@property
def version_number(self):
if self._version_number:
return self._version_number
self._get_version_properties()
return self._version_number
@property
def model(self):
if self._model:
return self._model
self._get_version_properties()
return self._model
def _get_version_properties(self):
"""Parses version and model information out of 'show version' output
and uses the output to populate class properties.
"""
# Parse out version info
output = self.enable('show version')
self._version = str(output[0]['result']['version'])
match = re.match(r'[\d.\d]+', str(output[0]['result']['version']))
if match:
self._version_number = str(match.group(0))
else:
self._version_number = str(output[0]['result']['version'])
# Parse out model number
match = re.search(r'\d\d\d\d', str(output[0]['result']['modelName']))
if match:
self._model = str(match.group(0))
else:
self._model = str(output[0]['result']['modelName'])
def enable_authentication(self, password):
"""Configures the enable mode authentication password
EOS supports an additional password authentication mechanism for
sessions that want to switch to executive (or enable) mode. This
method will configure the password, if required, for entering
executive mode
Args:
password (str): The password string in clear text used to
authenticate to exec mode
"""
self._enablepwd = str(password).strip()
def config(self, commands, **kwargs):
"""Configures the node with the specified commands
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Args:
commands (str, list): The commands to send to the node in config
mode. If the commands argument is a string it will be cast to
a list.
The list of commands will also be prepended with the
necessary commands to put the session in config mode.
**kwargs: Additional keyword arguments for expanded eAPI
functionality. Only supported eAPI params are used in building
the request
Returns:
The config method will return a list of dictionaries with the
output from each command. The function will strip the
response from any commands it prepends.
"""
if self._session_name: # If in a config session
return self._configure_session(commands, **kwargs)
return self._configure_terminal(commands, **kwargs)
def _configure_terminal(self, commands, **kwargs):
"""Configures the node with the specified commands with leading "configure terminal"
"""
commands = make_iterable(commands)
commands = list(commands)
# push the configure command onto the command stack
commands.insert(0, 'configure terminal')
response = self.run_commands(commands, **kwargs)
if self.autorefresh:
self.refresh()
# pop the configure command output off the stack
response.pop(0)
return response
def _configure_session(self, commands, **kwargs):
"""Configures the node with the specified commands with leading "configure session <session name>"
"""
if not self._session_name:
raise CommandError('Not currently in a session')
commands = make_iterable(commands)
commands = list(commands)
# push the configure command onto the command stack
commands.insert(0, 'configure session %s' % self._session_name)
response = self.run_commands(commands, **kwargs)
# pop the configure command output off the stack
response.pop(0)
return response
def section(self, regex, config='running_config'):
"""Returns a section of the config
Args:
regex (str): A valid regular expression used to select sections
of configuration to return
config (str): The configuration to return. Valid values for config
are "running_config" or "startup_config". The default value
is "running_config"
Returns:
The configuration section as a string object.
"""
if config in ['running_config', 'startup_config']:
config = getattr(self, config)
match = re.search(regex, config, re.M)
if not match:
raise TypeError('config section not found')
block_start, line_end = match.regs[0]
match = re.search(r'^[^\s]', config[line_end:], re.M)
if not match:
raise TypeError('could not find end block')
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end]
def enable(self, commands, encoding='json', strict=False,
send_enable=True, **kwargs):
"""Sends the array of commands to the node in enable mode
This method will send the commands to the node and evaluate
the results. If a command fails due to an encoding error,
then the command set will be re-issued individual with text
encoding.
Args:
commands (list): The list of commands to send to the node
encoding (str): The requested encoding of the command output.
Valid values for encoding are JSON or text
strict (bool): If False, this method will attempt to run a
command with text encoding if JSON encoding fails
send_enable (bool): If True the enable command will be
prepended to the command list automatically.
**kwargs: Additional keyword arguments for expanded eAPI
functionality. Only supported eAPI params are used in building
the request
Returns:
A dict object that includes the response for each command along
with the encoding
Raises:
TypeError:
This method does not support sending configure
commands and will raise a TypeError if configuration commands
are found in the list of commands provided
This method will also raise a TypeError if the specified
encoding is not one of 'json' or 'text'
CommandError: This method will raise a CommandError if any one
of the commands fails.
"""
commands = make_iterable(commands)
if 'configure' in commands:
raise TypeError('config mode commands not supported')
results = list()
# IMPORTANT: There are two keys (response, result) that both
# return the same value. 'response' was originally placed
# there in error and both are now present to avoid breaking
# existing scripts. 'response' will be removed in a future release.
if strict:
responses = self.run_commands(commands, encoding, send_enable,
**kwargs)
for index, response in enumerate(responses):
results.append(dict(command=commands[index],
result=response,
response=response,
encoding=encoding))
else:
for command in commands:
try:
resp = self.run_commands(command, encoding, send_enable,
**kwargs)
results.append(dict(command=command,
result=resp[0],
encoding=encoding))
except CommandError as exc:
if exc.error_code == 1003:
resp = self.run_commands(command, 'text', send_enable,
**kwargs)
results.append(dict(command=command,
result=resp[0],
encoding='text'))
else:
raise
return results
def run_commands(self, commands, encoding='json', send_enable=True,
**kwargs):
"""Sends the commands over the transport to the device
This method sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use config() or enable().
Args:
commands (list): The ordered list of commands to send to the
device using the transport
encoding (str): The encoding method to use for the request and
excpected response.
send_enable (bool): If True the enable command will be
prepended to the command list automatically.
**kwargs: Additional keyword arguments for expanded eAPI
functionality. Only supported eAPI params are used in building
the request
Returns:
This method will return the raw response from the connection
which is a Python dictionary object.
"""
commands = make_iterable(commands)
# Some commands are multiline commands. These are banner commands and
# SSL commands. So with this two lines we
# can support those by passing commands by doing:
# banner login MULTILINE: This is my banner.\nAnd I even support
# multiple lines.
# Why this? To be able to read a configuration from a file, split it
# into lines and pass it as it is
# to pyeapi without caring about multiline commands.
commands = [{'cmd': c.split('MULTILINE:')[0],
'input': '%s\n' % (c.split('MULTILINE:')[1].strip())}
if 'MULTILINE:' in c else c for c in commands]
if send_enable:
if self._enablepwd:
commands.insert(0, {'cmd': 'enable', 'input': self._enablepwd})
else:
commands.insert(0, 'enable')
response = self._connection.execute(commands, encoding, **kwargs)
# pop enable command from the response only if we sent enable
if send_enable:
response['result'].pop(0)
return response['result']
def api(self, name, namespace='pyeapi.api'):
"""Loads the specified api module
This method is the API autoload mechanism that will load the API
module specified by the name argument. The API module will be loaded
and look first for an initialize() function and secondly for an
instance() function. In both cases, the node object is passed to
the module.
Args:
name (str): The name of the module to load. The name should be
the name of the python file to import
namespace (str): The namespace to use to load the module. The
default value is 'pyeapi.api'
Returns:
The API module loaded with the node instance.
"""
module = load_module('{}.{}'.format(namespace, name))
if hasattr(module, 'initialize'):
module.initialize(self)
if hasattr(module, 'instance'):
return module.instance(self)
return module
def get_config(self, config='running-config', params=None,
as_string=False):
""" Retreives the config from the node
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
Args:
config (str): Specifies to return either the nodes startup-config
or running-config. The default value is the running-config
params (str): A string of keywords to append to the command for
retrieving the config.
as_string (boo): Flag that determines the response. If True, then
the configuration is returned as a raw string. If False, then
the configuration is returned as a list. The default value is
False
Returns:
This method will return either a string or a list depending on the
states of the as_string keyword argument.
Raises:
TypeError: If the specified config is not one of either
'running-config' or 'startup-config'
"""
if config not in ['startup-config', 'running-config']:
raise TypeError('invalid config name specified')
command = 'show %s' % config
if params:
command += ' %s' % params
result = self.run_commands(command, 'text')
if as_string:
return str(result[0]['output']).strip()
return str(result[0]['output']).split('\n')
def refresh(self):
"""Refreshes the instance config properties
This method will refresh the public running_config and startup_config
properites. Since the properties are lazily loaded, this method will
clear the current internal instance variables. One the next call the
instance variables will be repopulated with the current config
"""
self._running_config = None
self._startup_config = None
def configure_session(self):
"""Enter a config session
"""
self._session_name = self._session_name or uuid4()
def diff(self):
"""Returns session-config diffs in text encoding
Note: "show session-config diffs" doesn't support json encoding
"""
response = self._configure_session(['show session-config diffs'], encoding='text')
return response[0]['output']
def commit(self):
"""Commits the current config session
"""
return self._configure_and_exit_session(['commit'])
def abort(self):
"""Aborts the current config session
"""
return self._configure_session(['abort'])
def _configure_and_exit_session(self, commands, **kwargs):
response = self._configure_session(commands, **kwargs)
if self.autorefresh:
self.refresh()
# Exit the current config session
self._session_name = None
return response
def connect_to(name):
"""Creates a node instance based on an entry from the config
This function will retrieve the settings for the specified connection
from the config and return a Node instance. The configuration must
be loaded prior to calling this function.
Args:
name (str): The name of the connection to load from the config. The
name argument should be the connection name (everything right of
the colon from the INI file)
Returns:
This function will return an instance of Node with the settings
from the config instance.
Raises:
AttributeError: raised if the specified configuration name is not
found in the loaded configuration
"""
kwargs = config_for(name)
if not kwargs:
raise AttributeError('connection profile not found in config')
node = connect(return_node=True, **kwargs)
return node
| bsd-3-clause | -6,460,163,297,546,971,000 | 37.348584 | 106 | 0.631491 | false |
HybridF5/jacket | jacket/conf/netconf.py | 1 | 1108 | import socket
from oslo_config import cfg
from oslo_utils import netutils
CONF = cfg.CONF
netconf_opts = [
cfg.StrOpt('my_ip',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.StrOpt('my_block_storage_ip',
default='$my_ip',
help='Block storage IP address of this host'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.BoolOpt('use_ipv6',
default=False,
help='Use IPv6'),
]
def register_opts(conf):
conf.register_opts(netconf_opts)
# TODO(pumaranikar): We can consider moving these options to quota group
# and renaming them all to drop the quota bit.
def list_opts():
return {'DEFAULT': netconf_opts} | apache-2.0 | -5,560,293,642,524,442,000 | 28.972973 | 77 | 0.579422 | false |
whitingjp/whitgl | scripts/process_model.py | 1 | 2957 | #!/usr/bin/python
import struct
import argparse
import sys
import os.path
def process_mtl(filename):
materials = []
file = open(filename)
for line in file:
tokens = line.split()
if(len(tokens) == 0):
continue
ident = tokens.pop(0)
if(ident == 'newmtl'):
m = {}
m['name'] = tokens[0]
m['color'] = (1,1,1)
materials.append(m)
if(ident == 'Kd'):
materials[-1]['color'] = (float(tokens[0]),float(tokens[1]),float(tokens[2]))
return materials
def process_obj(filename):
file = open(filename)
vertices = []
normals = []
faces = []
materials = []
default_material = {}
default_material['name'] = 'default'
default_material['color'] = (1,0.1,1)
materials.append(default_material)
current_material = 0
for line in file:
tokens = line.split()
if(len(tokens) == 0):
continue
ident = tokens.pop(0)
if(ident == 'v'):
vertex = (float(tokens[0]),float(tokens[1]),float(tokens[2]));
vertices.append(vertex)
if(ident == 'vn'):
normal = (float(tokens[0]),float(tokens[1]),float(tokens[2]));
normals.append(normal)
if(ident == 'f'):
face = {}
face['vertices'] = (int(tokens[0].split('/')[0]),int(tokens[1].split('/')[0]),int(tokens[2].split('/')[0]))
face['normals'] = (int(tokens[0].split('/')[2]),int(tokens[1].split('/')[2]),int(tokens[2].split('/')[2]))
face['material'] = current_material
faces.append(face)
if len(tokens) == 4:
face['vertices'] = (int(tokens[2].split('/')[0]),int(tokens[3].split('/')[0]),int(tokens[0].split('/')[0]))
face['normals'] = (int(tokens[2].split('/')[2]),int(tokens[3].split('/')[2]),int(tokens[0].split('/')[2]))
face['material'] = current_material
faces.append(face)
if(ident == 'mtllib'):
path = os.path.join(os.path.dirname(filename), tokens[0])
materials += process_mtl(path)
if(ident == 'usemtl'):
current_material = 0
for i in range(len(materials)):
if materials[i]['name'] == tokens[0]:
current_material = i
return vertices, normals, faces, materials
def main():
parser = argparse.ArgumentParser(description='Convert a wavefront obj file to use in slicer.')
parser.add_argument('src', help='obj file name')
parser.add_argument('dst', help='wmd file name')
args = parser.parse_args()
print("Converting %s to %s" % (args.src, args.dst))
vertices, normals, faces, materials = process_obj(args.src)
vertices_size = len(faces)*3*3*4;
colours_size = vertices_size * 2;
size = vertices_size + colours_size
print ("Vertices %d size %d" % (len(faces)*3, size))
out = open(args.dst, 'wb')
out.write(struct.pack('i', size))
for face in faces:
m = materials[face['material']]
for i in range(3):
vertex = vertices[face['vertices'][i]-1]
normal = normals[face['normals'][i]-1]
for f in vertex:
out.write(struct.pack('f', f))
for c in m['color']:
out.write(struct.pack('f', c))
for n in normal:
out.write(struct.pack('f', n))
if __name__ == "__main__":
main()
| mit | -4,945,124,057,508,311,000 | 28.868687 | 111 | 0.619547 | false |
bittwiddler1/dNES | libdnes/specs/perfect6502/create_opcode_info.py | 1 | 2777 | # Copyright (c) 2015 Saul D Beniquez
# used to parse the output of perfect6502's measure test program and generate
# a table cycle timings per opcode
import re #regular expressions
import sys # stdio
timing_table = [int(-1)] * 256
addressmode_table = [int(-1)] * 256
lines = list()
def address_mode_value(x):
return {
'imm' : 0x10,
'acc': 0xa0,
'zp' : 0xb0,
'zpx': 0xb1,
'zpy': 0xb2,
'rel': 0xc0,
'abs': 0xd0,
'absx': 0xd1,
'absy': 0xd2,
'idx' : 0xf0,
'izy': 0xf1,
'izx': 0xf2
}.get(x, 0)
def print_byte_table(table, width = 16, base16 = False):
for j in range(0, 16/width):
sys.stdout.write(' //')
for i in range(0, width ):
val = int(i)
val = (int(j) * width) + val
fmt = ' %01x '
if not base16:
fmt = " %1x "
sys.stdout.write((fmt % val).upper())
sys.stdout.write('\n')
sys.stdout.write(' ')
width = width -1
for index in range(0x00, len(table)):
if table[index] > 0xFF:
continue
output = str()
fmt = str()
fmt = " 0x%02x"
if not base16:
fmt = "%d" #if table[index] > -1 else "-%d"
output = (fmt % table[index]);
sys.stdout.write(output);
if (index == 0xFF):
sys.stdout.write(" ]; // F \n");
else :
if ((index & width) == width):
sys.stdout.write((", // %01x\n " % (index >> 4)).upper())
else:
sys.stdout.write(", ")
sys.stdout.write("\n");
# Load file
with open("measure.log") as f:
lines = (l.strip() for l in f.readlines())
# Parse file
for line in lines:
if line:
print line
regex = \
re.compile(r"^\$([A-Za-z0-9][A-Za-z0-9]):\s*(CRASH$|(bytes:\s*(\d|\D)\s*cycles:\s*(\d*)\s*\S*\s*\S*\s*(\w+)$))");
match = regex.match(line)
opcode_str = match.group(1)
cycles_str = match.group(5) if (opcode_str != "00") else "7"
addressmode_str = match.group(6)
#print("[0x{0}] = {1}".format(opcode_str, cycles_str))
opcode = int(opcode_str,16)
cycles = int(cycles_str, 16) if (cycles_str != None) else 0;
addressmode = address_mode_value(addressmode_str)
timing_table[opcode] = cycles;
addressmode_table[opcode] = addressmode;
# Output several c-style array definition
sys.stdout.write("ubyte cycleCountTable[256] = [\n");
print_byte_table(timing_table)
sys.stdout.write("ubyte addressModeTable[256] = [\n");
print_byte_table(addressmode_table, 8, True)
| gpl-3.0 | -3,885,315,377,486,115,300 | 26.77 | 129 | 0.5009 | false |
Kopei/manualscore | config.py | 1 | 3100 | # -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'greed is good who is your daddy?'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.126.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_SUBJECT_PREFIX = u'[手策]'
MAIL_SENDER = u'手策管理员 <[email protected]>'
ADMIN = os.environ.get('ADMIN')
POSTS_PER_PAGE = 20
FOLLOWERS_PER_PAGE = 50
COMMENTS_PER_PAGE = 30
SLOW_DB_QUERY_TIME=0.5
UPLOAD_FOLDER = '/path/to/upload'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'jpg', 'png', 'gif', 'jpeg'])
ALLOW_MAX_FILE = 16 * 1024 * 1024
WHOOSH_BASE = os.path.join(basedir, 'search.db')
MAX_SEARCH_RESULTS = 50
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.MAIL_SENDER,
toaddrs=[cls.ADMIN],
subject=cls.MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
| mit | -5,622,696,804,971,524,000 | 29.554455 | 83 | 0.63059 | false |
sYnfo/samba | python/samba/subunit/__init__.py | 1 | 2711 | # Subunit handling
# Copyright (C) Jelmer Vernooij <[email protected]> 2014
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Subunit test protocol."""
from __future__ import absolute_import
import samba
samba.ensure_third_party_module("iso8601", "pyiso8601")
import iso8601
import unittest
PROGRESS_SET = 0
PROGRESS_CUR = 1
PROGRESS_PUSH = 2
PROGRESS_POP = 3
def RemoteError(description=""):
return (Exception, Exception(description), None)
class RemotedTestCase(unittest.TestCase):
"""A class to represent test cases run in child processes.
Instances of this class are used to provide the Python test API a TestCase
that can be printed to the screen, introspected for metadata and so on.
However, as they are a simply a memoisation of a test that was actually
run in the past by a separate process, they cannot perform any interactive
actions.
"""
def __eq__ (self, other):
try:
return self.__description == other.__description
except AttributeError:
return False
def __init__(self, description):
"""Create a psuedo test case with description description."""
self.__description = description
def error(self, label):
raise NotImplementedError("%s on RemotedTestCases is not permitted." %
label)
def setUp(self):
self.error("setUp")
def tearDown(self):
self.error("tearDown")
def shortDescription(self):
return self.__description
def id(self):
return "%s" % (self.__description,)
def __str__(self):
return "%s (%s)" % (self.__description, self._strclass())
def __repr__(self):
return "<%s description='%s'>" % \
(self._strclass(), self.__description)
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
result.addError(self, RemoteError("Cannot run RemotedTestCases.\n"))
result.stopTest(self)
def _strclass(self):
cls = self.__class__
return "%s.%s" % (cls.__module__, cls.__name__)
| gpl-3.0 | 143,757,858,295,772,800 | 29.122222 | 78 | 0.666913 | false |
libo/Enigma2 | lib/python/Plugins/SystemPlugins/Videomode/VideoWizard.py | 1 | 7752 | from Screens.Wizard import WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from VideoHardware import video_hw
from Components.Pixmap import Pixmap, MovingPixmap, MultiPixmap
from Components.config import config, ConfigBoolean, configfile
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Tools.HardwareInfo import HardwareInfo
config.misc.showtestcard = ConfigBoolean(default = False)
class VideoWizardSummary(WizardSummary):
skin = (
"""<screen name="VideoWizardSummary" position="0,0" size="132,64" id="1">
<widget name="text" position="6,4" size="120,40" font="Regular;12" transparent="1" />
<widget source="parent.list" render="Label" position="6,40" size="120,21" font="Regular;14">
<convert type="StringListSelection" />
</widget>
<!--widget name="pic" pixmap="%s" position="6,22" zPosition="10" size="64,64" transparent="1" alphatest="on"/-->
</screen>""",
"""<screen name="VideoWizardSummary" position="0,0" size="96,64" id="2">
<widget name="text" position="0,4" size="96,40" font="Regular;12" transparent="1" />
<widget source="parent.list" render="Label" position="0,40" size="96,21" font="Regular;14">
<convert type="StringListSelection" />
</widget>
<!--widget name="pic" pixmap="%s" position="0,22" zPosition="10" size="64,64" transparent="1" alphatest="on"/-->
</screen>""")
#% (resolveFilename(SCOPE_PLUGINS, "SystemPlugins/Videomode/lcd_Scart.png"))
def __init__(self, session, parent):
WizardSummary.__init__(self, session, parent)
#self["pic"] = Pixmap()
def setLCDPicCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setLCDPic(self, file):
self["pic"].instance.setPixmapFromFile(file)
class VideoWizard(WizardLanguage, Rc):
skin = """
<screen position="0,0" size="720,576" title="Welcome..." flags="wfNoBorder" >
<widget name="text" position="153,50" size="340,270" font="Regular;23" />
<widget source="list" render="Listbox" position="200,300" size="290,200" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<widget name="config" position="50,300" zPosition="1" size="440,200" transparent="1" scrollbarMode="showOnDemand" />
<widget name="wizard" pixmap="skin_default/wizard.png" position="40,50" zPosition="10" size="110,174" transparent="1" alphatest="on"/>
<ePixmap pixmap="skin_default/buttons/button_red.png" position="40,225" zPosition="0" size="15,16" transparent="1" alphatest="on" />
<widget name="languagetext" position="55,225" size="95,30" font="Regular;18" />
<widget name="portpic" pixmap="%s" position="50,300" zPosition="10" size="150,150" transparent="1" alphatest="on"/>
<widget name="rc" pixmaps="skin_default/rc.png,skin_default/rcold.png" position="500,50" zPosition="10" size="154,500" transparent="1" alphatest="on"/>
<widget name="arrowdown" pixmap="skin_default/arrowdown.png" position="0,0" zPosition="11" size="37,70" transparent="1" alphatest="on"/>
<widget name="arrowdown2" pixmap="skin_default/arrowdown.png" position="0,0" zPosition="11" size="37,70" transparent="1" alphatest="on"/>
<widget name="arrowup" pixmap="skin_default/arrowup.png" position="-100,-100" zPosition="11" size="37,70" transparent="1" alphatest="on"/>
<widget name="arrowup2" pixmap="skin_default/arrowup.png" position="-100,-100" zPosition="11" size="37,70" transparent="1" alphatest="on"/>
</screen>""" % (resolveFilename(SCOPE_PLUGINS, "SystemPlugins/Videomode/Scart.png"))
def __init__(self, session):
# FIXME anyone knows how to use relative paths from the plugin's directory?
self.xmlfile = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/Videomode/videowizard.xml")
self.hw = video_hw
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
self["wizard"] = Pixmap()
self["portpic"] = Pixmap()
self.port = None
self.mode = None
self.rate = None
def createSummary(self):
print "++++++++++++***++**** VideoWizard-createSummary"
from Screens.Wizard import WizardSummary
return VideoWizardSummary
def markDone(self):
config.misc.videowizardenabled.value = 0
config.misc.videowizardenabled.save()
configfile.save()
def listInputChannels(self):
hw_type = HardwareInfo().get_device_name()
has_hdmi = HardwareInfo().has_hdmi()
list = []
for port in self.hw.getPortList():
if self.hw.isPortUsed(port):
descr = port
if descr == 'DVI' and has_hdmi:
descr = 'HDMI'
if port != "DVI-PC":
list.append((descr,port))
list.sort(key = lambda x: x[0])
print "listInputChannels:", list
return list
def inputSelectionMade(self, index):
print "inputSelectionMade:", index
self.port = index
self.inputSelect(index)
def inputSelectionMoved(self):
hw_type = HardwareInfo().get_device_name()
has_hdmi = HardwareInfo().has_hdmi()
print "input selection moved:", self.selection
self.inputSelect(self.selection)
if self["portpic"].instance is not None:
picname = self.selection
if picname == 'DVI' and has_hdmi:
picname = "HDMI"
self["portpic"].instance.setPixmapFromFile(resolveFilename(SCOPE_PLUGINS, "SystemPlugins/Videomode/" + picname + ".png"))
def inputSelect(self, port):
print "inputSelect:", port
modeList = self.hw.getModeList(self.selection)
print "modeList:", modeList
self.port = port
if (len(modeList) > 0):
ratesList = self.listRates(modeList[0][0])
self.hw.setMode(port = port, mode = modeList[0][0], rate = ratesList[0][0])
def listModes(self):
list = []
print "modes for port", self.port
for mode in self.hw.getModeList(self.port):
#if mode[0] != "PC":
list.append((mode[0], mode[0]))
print "modeslist:", list
return list
def modeSelectionMade(self, index):
print "modeSelectionMade:", index
self.mode = index
self.modeSelect(index)
def modeSelectionMoved(self):
print "mode selection moved:", self.selection
self.modeSelect(self.selection)
def modeSelect(self, mode):
ratesList = self.listRates(mode)
print "ratesList:", ratesList
if self.port == "DVI" and mode in ("720p", "1080i"):
self.rate = "multi"
self.hw.setMode(port = self.port, mode = mode, rate = "multi")
else:
self.hw.setMode(port = self.port, mode = mode, rate = ratesList[0][0])
def listRates(self, querymode = None):
if querymode is None:
querymode = self.mode
list = []
print "modes for port", self.port, "and mode", querymode
for mode in self.hw.getModeList(self.port):
print mode
if mode[0] == querymode:
for rate in mode[1]:
if self.port == "DVI-PC":
print "rate:", rate
if rate == "640x480":
list.insert(0, (rate, rate))
continue
list.append((rate, rate))
return list
def rateSelectionMade(self, index):
print "rateSelectionMade:", index
self.rate = index
self.rateSelect(index)
def rateSelectionMoved(self):
print "rate selection moved:", self.selection
self.rateSelect(self.selection)
def rateSelect(self, rate):
self.hw.setMode(port = self.port, mode = self.mode, rate = rate)
def showTestCard(self, selection = None):
if selection is None:
selection = self.selection
print "set config.misc.showtestcard to", {'yes': True, 'no': False}[selection]
if selection == "yes":
config.misc.showtestcard.value = True
else:
config.misc.showtestcard.value = False
def keyNumberGlobal(self, number):
if number in (1,2,3):
if number == 1:
self.hw.saveMode("DVI", "720p", "multi")
elif number == 2:
self.hw.saveMode("DVI", "1080i", "multi")
elif number == 3:
self.hw.saveMode("Scart", "Multi", "multi")
self.hw.setConfiguredMode()
self.close()
WizardLanguage.keyNumberGlobal(self, number)
| gpl-2.0 | 4,749,442,493,845,231,000 | 37.187192 | 154 | 0.689886 | false |
arthurdejong/python-stdnum | update/cn_loc.py | 1 | 3075 | #!/usr/bin/env python3
# update/cn_loc.py - script to fetch data from the CN Open Data community
#
# Copyright (C) 2014-2015 Jiangge Zhang
# Copyright (C) 2015-2019 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""This script downloads birth place codes from the CN Open Data community on
Github."""
from __future__ import print_function, unicode_literals
import sys
from collections import OrderedDict
from datetime import datetime
import requests
data_url = 'https://github.com/cn/GB2260'
data_revisions = [
'GB2260-2002',
'GB2260-2003',
'GB2260-200306',
'GB2260-2004',
'GB2260-200403',
'GB2260-200409',
'GB2260-2005',
'GB2260-200506',
'GB2260-2006',
'GB2260-2007',
'GB2260-2008',
'GB2260-2009',
'GB2260-2010',
'GB2260-2011',
'GB2260-2012',
'GB2260-2013',
'GB2260-2014',
]
def fetch_data():
"""Return the data from tab-separated revisions as one code/name dict."""
data_collection = OrderedDict()
for revision in data_revisions:
response = requests.get('%s/raw/release/%s.txt' % (data_url, revision))
response.raise_for_status()
if response.ok:
print('%s is fetched' % revision, file=sys.stderr)
else:
print('%s is missing' % revision, file=sys.stderr)
continue
for line in response.text.strip().split('\n'):
code, name = line.split('\t')
data_collection[code.strip()] = name.strip()
return data_collection
def group_data(data_collection):
"""Filter the data and return codes with names."""
for code, name in sorted(data_collection.items()):
if code.endswith('00'):
continue # county only
province_code = code[:2] + '0000'
prefecture_code = code[:4] + '00'
province_name = data_collection[province_code]
prefecture_name = data_collection[prefecture_code]
yield code, name, prefecture_name, province_name
if __name__ == '__main__':
"""Output a data file in the right format."""
print("# generated from National Bureau of Statistics of the People's")
print('# Republic of China, downloaded from %s' % data_url)
print('# %s' % datetime.utcnow())
data_collection = fetch_data()
for data in group_data(data_collection):
print('%s county="%s" prefecture="%s" province="%s"' % data)
| lgpl-2.1 | 5,329,826,409,305,947,000 | 32.064516 | 79 | 0.663415 | false |
spdx/tools-python | spdx/config.py | 1 | 2638 | # Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import json
import os
from spdx.version import Version
_base_dir = os.path.dirname(__file__)
_licenses = os.path.join(_base_dir, "licenses.json")
_exceptions = os.path.join(_base_dir, "exceptions.json")
def _load_list(file_name, object_type="licenses", id_attribute="licenseId"):
"""
Return a list version tuple and a mapping of licenses
name->id and id->name loaded from a JSON file
from https://github.com/spdx/license-list-data
"""
licenses_map = {}
with codecs.open(file_name, "rb", encoding="utf-8") as lics:
licenses = json.load(lics)
version = tuple(licenses["licenseListVersion"].split("."))
for lic in licenses[object_type]:
if lic.get("isDeprecatedLicenseId"):
continue
name = lic["name"]
identifier = lic[id_attribute]
licenses_map[name] = identifier
licenses_map[identifier] = name
return version, licenses_map
def load_license_list(file_name):
"""
Return the licenses list version tuple and a mapping of licenses
name->id and id->name loaded from a JSON file
from https://github.com/spdx/license-list-data
"""
return _load_list(file_name, object_type="licenses", id_attribute="licenseId")
def load_exception_list(file_name):
"""
Return the exceptions list version tuple and a mapping of exceptions
name->id and id->name loaded from a JSON file
from https://github.com/spdx/license-list-data
"""
return _load_list(
file_name, object_type="exceptions", id_attribute="licenseExceptionId"
)
(_lmajor, _lminor), LICENSE_MAP = load_license_list(_licenses)
LICENSE_LIST_VERSION = Version(major=_lmajor, minor=_lminor)
(_emajor, _eminor), EXCEPTION_MAP = load_exception_list(_exceptions)
EXCEPTION_LIST_VERSION = Version(major=_emajor, minor=_eminor)
assert LICENSE_LIST_VERSION == EXCEPTION_LIST_VERSION
del _emajor, _eminor, EXCEPTION_LIST_VERSION
| apache-2.0 | 7,593,922,663,134,030,000 | 34.173333 | 82 | 0.705838 | false |
davidcox/glumpy | glumpy/atb/handlers_glut.py | 1 | 2701 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import string
import glumpy
from raw import *
def _make_glumpy_map():
ret = {}
for c in string.letters:
ret[getattr(glumpy.key, c.upper())] = ord(c)
for c in string.digits:
ret[getattr(glumpy.key, "_"+c)] = ord(c)
ret.update({
glumpy.key.SPACE: ord(' '),
glumpy.key.BACKSPACE: ord('\b'),
glumpy.key.RETURN: ord('\r'),
glumpy.key.PERIOD: ord('.'),
glumpy.key.MINUS: ord('-'),
})
return ret
_glumpy_key_map = _make_glumpy_map()
_glumpy_button_map = {
glumpy.mouse.LEFT: TW_MOUSE_LEFT,
glumpy.mouse.MIDDLE: TW_MOUSE_MIDDLE,
glumpy.mouse.RIGHT: TW_MOUSE_RIGHT,
}
def map_key(key):
return _glumpy_key_map[key]
def map_button(button):
return _glumpy_button_map[button]
def map_modifiers(modifiers):
ret = TW_KMOD_NONE
if modifiers & glumpy.key.MOD_SHIFT:
ret |= TW_KMOD_SHIFT
if modifiers & glumpy.key.MOD_CTRL:
ret |= TW_KMOD_CTRL
if modifiers & glumpy.key.MOD_ALT:
ret |= TW_KMOD_ALT
return ret
class Handlers(object):
def __init__(self, window):
self.window = window
def on_resize(self, width, height):
TwWindowSize(width, height)
def on_key_press(self, symbol, modifiers):
try:
TwKeyPressed(map_key(symbol), map_modifiers(modifiers))
self.window.draw()
return True
except:
pass
return False
def on_mouse_press(self, x, y, button):
if not button in _glumpy_button_map.keys():
return False
if TwMouseButton(TW_MOUSE_PRESSED, map_button(button)):
self.window.draw()
return True
def on_mouse_release(self, x, y, button):
if not button in _glumpy_button_map.keys():
return False
if TwMouseButton(TW_MOUSE_RELEASED, map_button(button)):
self.window.draw()
return True
def on_mouse_drag(self, x, y, dx, dy, buttons):
if TwMouseMotion(x, self.window.height-y):
self.window.draw()
return True
def on_mouse_motion(self, x, y, dx, dy):
if TwMouseMotion(x, self.window.height-y):
self.window.draw()
return True
def on_draw(self):
TwDraw()
| bsd-3-clause | -7,359,036,954,441,203,000 | 27.734043 | 78 | 0.547945 | false |
bradmontgomery/word2html | setup.py | 1 | 1173 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup script for python-bitbucket."""
from setuptools import setup
from word2html import VERSION
SHORT_DESC = (
"A quick and dirty script to convert a Word "
"(docx) document to html."
)
setup(
name='word2html',
version=VERSION,
description=SHORT_DESC,
long_description=open('README.md').read(),
keywords='word to html',
author='Brad Montgomery',
author_email='[email protected]',
url='https://github.com/bradmontgomery/word2html',
license='MIT',
packages=['word2html'],
include_package_data=True,
package_data={'': ['README.md', 'LICENSE.txt']},
zip_safe=False,
install_requires=[
'pypandoc',
'pytidylib',
],
entry_points={
'console_scripts': [
'word2html = word2html.main:main',
],
},
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Topic :: Text Processing',
'Topic :: Utilities',
],
)
| mit | 993,002,640,202,158,300 | 25.066667 | 77 | 0.606138 | false |
ssdi-drive/nuxeo-drive | nuxeo-drive-client/nxdrive/wui/modal.py | 1 | 2265 | # coding: utf-8
from PyQt4.QtCore import Qt, pyqtSlot
from nxdrive.wui.dialog import WebDialog, WebDriveApi
class WebModalApi(WebDriveApi):
@pyqtSlot(str)
def result(self, button_id):
self.dialog.set_result(button_id)
def _json_default(self, obj):
if isinstance(obj, WebModalButton):
return self._export_button(obj)
else:
return super(WebModalApi, self)._json_default(obj)
def _export_button(self, obj):
result = dict()
result["uid"] = obj.uid
result["label"] = obj._label
result["style"] = obj._style
return result
@pyqtSlot(result=str)
def get_message(self):
return self.dialog.get_message()
@pyqtSlot(result=str)
def get_buttons(self):
res = []
for button in self.dialog.get_buttons().itervalues():
res.append(button)
return self._json(res)
class WebModalButton(object):
# for style see bootstrap
def __init__(self, uid, label, style="default"):
self.uid = uid
self._label = label
self._style = style
class WebModal(WebDialog):
def __init__(self, application, message, page="modal.html", title="Nuxeo Drive", api=None, buttons=None):
if api is None:
api = WebModalApi(application)
super(WebModal, self).__init__(application, page=page, title=title, api=api)
self.setSizeGripEnabled(False)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self._buttons = dict()
self._message = message
self._result = ""
if buttons is not None:
for button in buttons:
self._buttons[button.uid] = button
def get_message(self):
return self._message
def get_buttons(self):
return self._buttons
def set_result(self, res):
self._result = res
self.accept()
def get_result(self):
return self._result
def remove_button(self, uid):
if uid in self._buttons:
del self._buttons[uid]
def add_button(self, uid, label, style="default"):
self._buttons[uid] = WebModalButton(uid, label, style)
def exec_(self):
super(WebModal, self).exec_()
return self.get_result()
| lgpl-2.1 | -4,551,620,661,134,086,700 | 26.621951 | 109 | 0.600883 | false |
micahflee/securedrop | securedrop/tests/conftest.py | 1 | 2229 | # -*- coding: utf-8 -*-
import os
import shutil
import signal
import subprocess
import psutil
import pytest
os.environ['SECUREDROP_ENV'] = 'test' # noqa
import config
# TODO: the PID file for the redis worker is hard-coded below.
# Ideally this constant would be provided by a test harness.
# It has been intentionally omitted from `config.py.example`
# in order to isolate the test vars from prod vars.
TEST_WORKER_PIDFILE = '/tmp/securedrop_test_worker.pid'
def pytest_addoption(parser):
parser.addoption("--page-layout", action="store_true",
default=False, help="run page layout tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--page-layout"):
return
skip_page_layout = pytest.mark.skip(
reason="need --page-layout option to run page layout tests"
)
for item in items:
if "pagelayout" in item.keywords:
item.add_marker(skip_page_layout)
@pytest.fixture(scope='session')
def setUptearDown():
_start_test_rqworker(config)
yield
_stop_test_rqworker()
_cleanup_test_securedrop_dataroot(config)
def _start_test_rqworker(config):
if not psutil.pid_exists(_get_pid_from_file(TEST_WORKER_PIDFILE)):
tmp_logfile = open('/tmp/test_rqworker.log', 'w')
subprocess.Popen(['rqworker', 'test',
'-P', config.SECUREDROP_ROOT,
'--pid', TEST_WORKER_PIDFILE],
stdout=tmp_logfile,
stderr=subprocess.STDOUT)
def _stop_test_rqworker():
rqworker_pid = _get_pid_from_file(TEST_WORKER_PIDFILE)
if rqworker_pid:
os.kill(rqworker_pid, signal.SIGTERM)
try:
os.remove(TEST_WORKER_PIDFILE)
except OSError:
pass
def _get_pid_from_file(pid_file_name):
try:
return int(open(pid_file_name).read())
except IOError:
return None
def _cleanup_test_securedrop_dataroot(config):
# Keyboard interrupts or dropping to pdb after a test failure sometimes
# result in the temporary test SecureDrop data root not being deleted.
try:
shutil.rmtree(config.SECUREDROP_DATA_ROOT)
except OSError:
pass
| agpl-3.0 | 1,630,277,785,121,707,500 | 27.948052 | 75 | 0.645581 | false |
TemoaProject/temoa | temoa_model/pformat_results.py | 1 | 29209 | """
Tools for Energy Model Optimization and Analysis (Temoa):
An open source framework for energy systems optimization modeling
Copyright (C) 2015, NC State University
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
A complete copy of the GNU General Public License v2 (GPLv2) is available
in LICENSE.txt. Users uncompressing this from an archive may not have
received this license file. If not, see <http://www.gnu.org/licenses/>.
"""
# ---------------------------------------------------------------------------
# This module processes model output data, which can be sent to three possible
# locations: the shell, a user-specified database, or an Excel file. Users can
# configure the available outputs.
# ---------------------------------------------------------------------------
__all__ = ('pformat_results', 'stringify_data')
from collections import defaultdict
from sys import stderr as SE, stdout as SO
from shutil import rmtree
import sqlite3
import os
import re
import subprocess
import sys
import pandas as pd
from temoa_config import TemoaConfig
# Need line below to import DB_to_Excel.py from data_processing
sys.path.append(os.path.join(os.getcwd(), 'data_processing'))
from DB_to_Excel import make_excel
# Ensure compatibility with Python 2.7 and 3
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from pyomo.core import value
def stringify_data ( data, ostream=SO, format='plain' ):
# data is a list of tuples of ('var_name[index]', value)
# data must be a list, as this function replaces each row,
# format is currently unused, but will be utilized to implement things like
# csv
# This padding code is what makes the display of the output values
# line up on the decimal point.
for i, (v, val) in enumerate( data ):
ipart, fpart = repr(f"{val:.6f}").split('.')
data[i] = (ipart, fpart, v)
cell_lengths = ( map(len, l[:-1] ) for l in data )
max_lengths = map(max, zip(*cell_lengths)) # max length of each column
fmt = u' {{:>{:d}}}.{{:<{:d}}} {{}}\n'.format( *max_lengths )
for row in data:
ostream.write( fmt.format(*row) )
def pformat_results ( pyomo_instance, pyomo_result, options ):
from pyomo.core import Objective, Var, Constraint
output = StringIO()
m = pyomo_instance # lazy typist
result = pyomo_result
soln = result['Solution']
solv = result['Solver'] # currently unused, but may want it later
prob = result['Problem'] # currently unused, but may want it later
optimal_solutions = (
'feasible', 'globallyOptimal', 'locallyOptimal', 'optimal'
)
if str(soln.Status) not in optimal_solutions:
output.write( 'No solution found.' )
return output
objs = list(m.component_data_objects( Objective ))
if len( objs ) > 1:
msg = '\nWarning: More than one objective. Using first objective.\n'
SE.write( msg )
Cons = soln.Constraint
def collect_result_data( cgroup, clist, epsilon):
# cgroup = "Component group"; i.e., Vars or Cons
# clist = "Component list"; i.e., where to store the data
# epsilon = absolute value below which to ignore a result
results = defaultdict(list)
for name, data in cgroup.items():
if 'Value' not in data.keys() or (abs( data['Value'] ) < epsilon ) : continue
# name looks like "Something[some,index]"
group, index = name[:-1].split('[')
results[ group ].append( (name.replace("'", ''), data['Value']) )
clist.extend( t for i in sorted( results ) for t in sorted(results[i]))
supp_outputs_df = pd.DataFrame.from_dict(cgroup, orient='index')
supp_outputs_df = supp_outputs_df.loc[(supp_outputs_df != 0).any(axis=1)]
if 'Dual' in supp_outputs_df.columns:
duals = supp_outputs_df['Dual'].copy()
duals = -duals
duals = duals[duals>epsilon]
duals.index.name = 'constraint_name'
duals = duals.to_frame()
duals.loc[:,'scenario'] = options.scenario
return duals
#Create a dictionary in which to store "solved" variable values
svars = defaultdict( lambda: defaultdict( float ))
con_info = list()
epsilon = 1e-9 # threshold for "so small it's zero"
emission_keys = { (r, i, t, v, o) : set() for r, e, i, t, v, o in m.EmissionActivity }
for r, e, i, t, v, o in m.EmissionActivity:
emission_keys[(r, i, t, v, o)].add(e)
P_0 = min( m.time_optimize )
P_e = m.time_future.last()
GDR = value( m.GlobalDiscountRate )
MLL = m.ModelLoanLife
MPL = m.ModelProcessLife
LLN = m.LifetimeLoanProcess
x = 1 + GDR # convenience variable, nothing more
if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') in options.file_location:
original_dbpath = options.output
con = sqlite3.connect(original_dbpath)
cur = con.cursor()
time_periods = cur.execute("SELECT t_periods FROM time_periods WHERE flag='f'").fetchall()
P_0 = time_periods[0][0]
P_e = time_periods[-1][0]
# We need to know if a myopic run is the last run or not.
P_e_time_optimize = time_periods[-2][0]
P_e_current = int(options.file_location.split("_")[-1])
con.commit()
con.close()
# Extract optimal decision variable values related to commodity flow:
for r, p, s, d, t, v in m.V_StorageLevel:
val = value( m.V_StorageLevel[r, p, s, d, t, v] )
if abs(val) < epsilon: continue
svars['V_StorageLevel'][r, p, s, d, t, v] = val
# vflow_in is defined only for storage techs
for r, p, s, d, i, t, v, o in m.V_FlowIn:
val_in = value( m.V_FlowIn[r, p, s, d, i, t, v, o] )
if abs(val_in) < epsilon: continue
svars['V_FlowIn'][r, p, s, d, i, t, v, o] = val_in
for r, p, s, d, i, t, v, o in m.V_FlowOut:
val_out = value( m.V_FlowOut[r, p, s, d, i, t, v, o] )
if abs(val_out) < epsilon: continue
svars['V_FlowOut'][r, p, s, d, i, t, v, o] = val_out
if t not in m.tech_storage:
val_in = value( m.V_FlowOut[r, p, s, d, i, t, v, o] ) / value(m.Efficiency[r, i, t, v, o])
svars['V_FlowIn'][r, p, s, d, i, t, v, o] = val_in
if (r, i, t, v, o) not in emission_keys: continue
emissions = emission_keys[r, i, t, v, o]
for e in emissions:
evalue = val_out * m.EmissionActivity[r, e, i, t, v, o]
svars[ 'V_EmissionActivityByPeriodAndProcess' ][r, p, e, t, v] += evalue
for r, p, i, t, v, o in m.V_FlowOutAnnual:
for s in m.time_season:
for d in m.time_of_day:
val_out = value( m.V_FlowOutAnnual[r, p, i, t, v, o] ) * value( m.SegFrac[s , d ])
if abs(val_out) < epsilon: continue
svars['V_FlowOut'][r, p, s, d, i, t, v, o] = val_out
svars['V_FlowIn'][r, p, s, d, i, t, v, o] = val_out / value(m.Efficiency[r, i, t, v, o])
if (r, i, t, v, o) not in emission_keys: continue
emissions = emission_keys[r, i, t, v, o]
for e in emissions:
evalue = val_out * m.EmissionActivity[r, e, i, t, v, o]
svars[ 'V_EmissionActivityByPeriodAndProcess' ][r, p, e, t, v] += evalue
for r, p, s, d, i, t, v, o in m.V_Curtailment:
val = value( m.V_Curtailment[r, p, s, d, i, t, v, o] )
if abs(val) < epsilon: continue
svars['V_Curtailment'][r, p, s, d, i, t, v, o] = val
svars['V_FlowIn'][r, p, s, d, i, t, v, o] = (val + value( m.V_FlowOut[r, p, s, d, i, t, v, o] )) / value(m.Efficiency[r, i, t, v, o])
if (r, i, t, v, o) not in emission_keys: continue
emissions = emission_keys[r, i, t, v, o]
for e in emissions:
evalue = val * m.EmissionActivity[r, e, i, t, v, o]
svars[ 'V_EmissionActivityByPeriodAndProcess' ][r, p, e, t, v] += evalue
for r, p, i, t, v, o in m.V_FlexAnnual:
for s in m.time_season:
for d in m.time_of_day:
val_out = value( m.V_FlexAnnual[r, p, i, t, v, o] ) * value( m.SegFrac[s , d ])
if abs(val_out) < epsilon: continue
svars['V_Curtailment'][r, p, s, d, i, t, v, o] = val_out
svars['V_FlowOut'][r, p, s, d, i, t, v, o] -= val_out
for r, p, s, d, i, t, v, o in m.V_Flex:
val_out = value( m.V_Flex[r, p, s, d, i, t, v, o] )
if abs(val_out) < epsilon: continue
svars['V_Curtailment'][r, p, s, d, i, t, v, o] = val_out
svars['V_FlowOut'][r, p, s, d, i, t, v, o] -= val_out
# Extract optimal decision variable values related to capacity:
if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location:
for r, t, v in m.V_Capacity:
val = value( m.V_Capacity[r, t, v] )
if abs(val) < epsilon: continue
svars['V_Capacity'][r, t, v] = val
else:
for r, t, v in m.V_Capacity:
if v in m.time_optimize:
val = value( m.V_Capacity[r, t, v] )
if abs(val) < epsilon: continue
svars['V_Capacity'][r, t, v] = val
for r, p, t in m.V_CapacityAvailableByPeriodAndTech:
val = value( m.V_CapacityAvailableByPeriodAndTech[r, p, t] )
if abs(val) < epsilon: continue
svars['V_CapacityAvailableByPeriodAndTech'][r, p, t] = val
# Calculate model costs:
if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location:
# This is a generic workaround. Not sure how else to automatically discover
# the objective name
obj_name, obj_value = objs[0].getname(True), value( objs[0] )
svars[ 'Objective' ]["('"+obj_name+"')"] = obj_value
for r, t, v in m.CostInvest.sparse_iterkeys(): # Returns only non-zero values
icost = value( m.V_Capacity[r, t, v] )
if abs(icost) < epsilon: continue
icost *= value( m.CostInvest[r, t, v] )*(
(
1 - x**( -min( value(m.LifetimeProcess[r, t, v]), P_e - v ) )
)/(
1 - x**( -value( m.LifetimeProcess[r, t, v] ) )
)
)
svars[ 'Costs' ][ 'V_UndiscountedInvestmentByProcess', r, t, v] += icost
icost *= value( m.LoanAnnualize[r, t, v] )
icost *= (
value( LLN[r, t, v] ) if not GDR else
(x **(P_0 - v + 1) * (1 - x **(-value( LLN[r, t, v] ))) / GDR)
)
svars[ 'Costs' ][ 'V_DiscountedInvestmentByProcess', r, t, v] += icost
for r, p, t, v in m.CostFixed.sparse_iterkeys():
fcost = value( m.V_Capacity[r, t, v] )
if abs(fcost) < epsilon: continue
fcost *= value( m.CostFixed[r, p, t, v] )
svars[ 'Costs' ][ 'V_UndiscountedFixedCostsByProcess', r, t, v] += fcost * value( MPL[r, p, t, v] )
fcost *= (
value( MPL[r, p, t, v] ) if not GDR else
(x **(P_0 - p + 1) * (1 - x **(-value( MPL[r, p, t, v] ))) / GDR)
)
svars[ 'Costs' ][ 'V_DiscountedFixedCostsByProcess', r, t, v] += fcost
for r, p, t, v in m.CostVariable.sparse_iterkeys():
if t not in m.tech_annual:
vcost = sum(
value (m.V_FlowOut[r, p, S_s, S_d, S_i, t, v, S_o])
for S_i in m.processInputs[r, p, t, v]
for S_o in m.ProcessOutputsByInput[r, p, t, v, S_i]
for S_s in m.time_season
for S_d in m.time_of_day
)
else:
vcost = sum(
value (m.V_FlowOutAnnual[r, p, S_i, t, v, S_o])
for S_i in m.processInputs[r, p, t, v]
for S_o in m.ProcessOutputsByInput[r, p, t, v, S_i]
)
if abs(vcost) < epsilon: continue
vcost *= value( m.CostVariable[r, p, t, v] )
svars[ 'Costs' ][ 'V_UndiscountedVariableCostsByProcess', r, t, v] += vcost * value( MPL[r, p, t, v] )
vcost *= (
value( MPL[r, p, t, v] ) if not GDR else
(x **(P_0 - p + 1) * (1 - x **(-value( MPL[r, p, t, v] ))) / GDR)
)
svars[ 'Costs' ][ 'V_DiscountedVariableCostsByProcess', r, t, v] += vcost
#update the costs of exchange technologies.
#Assumption 1: If Ri-Rj appears in the cost tables but Rj-Ri does not,
#then the total costs are distributed between the regions
#Ri and Rj proportional to their use of the exchange technology connecting the
#regions.
#Assumption 2: If both the directional entries appear in the cost tables,
#Assumption 1 is no longer applied and the costs are calculated as they
#are entered in the cost tables.
# assumption 3: Unlike other output tables in which Ri-Rj and Rj-Ri entries
# are allowed in the region column, for the Output_Costs table the region
#to the right of the hyphen sign gets the costs.
for i in m.RegionalExchangeCapacityConstraint_rrtv.iterkeys():
reg_dir1 = i[0]+"-"+i[1]
reg_dir2 = i[1]+"-"+i[0]
tech = i[2]
vintage = i[3]
key = (reg_dir1, tech, vintage)
try:
act_dir1 = value (sum(m.V_FlowOut[reg_dir1, p, s, d, S_i, tech, vintage, S_o]
for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage)
for s in m.time_season
for d in m.time_of_day
for S_i in m.processInputs[reg_dir1, p, tech, vintage]
for S_o in m.ProcessOutputsByInput[reg_dir1, p, tech, vintage, S_i]
))
act_dir2 = value (sum(m.V_FlowOut[reg_dir2, p, s, d, S_i, tech, vintage, S_o]
for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage)
for s in m.time_season
for d in m.time_of_day
for S_i in m.processInputs[reg_dir2, p, tech, vintage]
for S_o in m.ProcessOutputsByInput[reg_dir2, p, tech, vintage, S_i]
))
except:
act_dir1 = value (sum(m.V_FlowOutAnnual[reg_dir1, p, S_i, tech, vintage, S_o]
for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage)
for S_i in m.processInputs[reg_dir1, p, tech, vintage]
for S_o in m.ProcessOutputsByInput[reg_dir1, p, tech, vintage, S_i]
))
act_dir2 = value (sum(m.V_FlowOutAnnual[reg_dir2, p, S_i, tech, vintage, S_o]
for p in m.time_optimize if (p < vintage + value(m.LifetimeProcess[reg_dir1, tech, vintage])) and (p >= vintage)
for S_i in m.processInputs[reg_dir2, p, tech, vintage]
for S_o in m.ProcessOutputsByInput[reg_dir2, p, tech, vintage, S_i]
))
for item in list(svars[ 'Costs' ]):
if item[2] == tech:
opposite_dir = item[1][item[1].find("-")+1:]+"-"+item[1][:item[1].find("-")]
if (item[0],opposite_dir,item[2],item[3]) in svars[ 'Costs' ].keys():
continue #if both directional entries are already in svars[ 'Costs' ], they're left intact.
if item[1] == reg_dir1:
svars[ 'Costs' ][(item[0],reg_dir2,item[2],item[3])] = svars[ 'Costs' ][item] * act_dir2 / (act_dir1 + act_dir2)
svars[ 'Costs' ][item] = svars[ 'Costs' ][item] * act_dir1 / (act_dir1 + act_dir2)
#Remove Ri-Rj entries from being populated in the Outputs_Costs. Ri-Rj means a cost
#for region Rj
for item in list(svars[ 'Costs' ]):
if item[2] in m.tech_exchange:
svars[ 'Costs' ][(item[0],item[1][item[1].find("-")+1:],item[2],item[3])] = svars[ 'Costs' ][item]
del svars[ 'Costs' ][item]
duals = collect_result_data( Cons, con_info, epsilon=1e-9 )
msg = ( 'Model name: %s\n'
'Objective function value (%s): %s\n'
'Non-zero variable values:\n'
)
if hasattr(options, 'file_location') and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location:
output.write( msg % (m.name, obj_name, obj_value) )
def make_var_list ( variables ):
var_list = []
for vgroup, values in sorted( variables.items() ):
for vindex, val in sorted( values.items() ):
if isinstance( vindex, tuple ):
vindex = ','.join( str(i) for i in vindex )
var_list.append(( '{}[{}]'.format(vgroup, vindex), val ))
return var_list
if svars:
stringify_data( make_var_list(svars), output )
else:
output.write( '\nAll variables have a zero (0) value.\n' )
if len( con_info ) > 0:
output.write( '\nBinding constraint values:\n' )
stringify_data( con_info, output )
del con_info
else:
# Since not all Coopr solvers give constraint results, must check
msg = '\nSelected Coopr solver plugin does not give constraint data.\n'
output.write( msg )
output.write( '\n\nIf you use these results for a published article, '
"please run Temoa with the '--how_to_cite' command line argument for "
'citation information.\n')
# -----------------------------------------------------------------
# Write outputs stored in dictionary to the user-specified database
# -----------------------------------------------------------------
# Table dictionary below maps variable names to database table names
tables = { "V_FlowIn" : "Output_VFlow_In", \
"V_FlowOut" : "Output_VFlow_Out", \
"V_Curtailment" : "Output_Curtailment", \
"V_Capacity" : "Output_V_Capacity", \
"V_CapacityAvailableByPeriodAndTech" : "Output_CapacityByPeriodAndTech", \
"V_EmissionActivityByPeriodAndProcess" : "Output_Emissions", \
"Objective" : "Output_Objective", \
"Costs" : "Output_Costs"
}
db_tables = ['time_periods', 'time_season', 'time_of_day', 'technologies', 'commodities',\
'LifetimeTech', 'LifetimeProcess', 'Efficiency', 'EmissionActivity', 'ExistingCapacity']
if isinstance(options, TemoaConfig):
if not options.output:
if options.saveTEXTFILE or options.keepPyomoLP:
for inpu in options.dot_dat:
print(inpu)
file_ty = re.search(r"\b([\w-]+)\.(\w+)\b", inpu)
new_dir = options.path_to_data+os.sep+file_ty.group(1)+'_'+options.scenario+'_model'
if os.path.exists( new_dir ):
rmtree( new_dir )
os.mkdir(new_dir)
print("No Output File specified.")
return output
if not os.path.exists(options.output) :
print("Please put the "+options.output+" file in the right Directory")
return output
con = sqlite3.connect(options.output)
cur = con.cursor() # A database cursor enables traversal over DB records
con.text_factory = str # This ensures data is explored with UTF-8 encoding
### Copy tables from Input File to DB file.
# IF output file is empty database.
cur.execute("SELECT * FROM technologies")
is_db_empty = False #False for empty db file
for elem in cur:
is_db_empty = True #True for non-empty db file
break
if is_db_empty: #This file could be schema with populated results from previous run. Or it could be a normal db file.
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='input_file';")
does_input_file_table_exist = False
for i in cur: # This means that the 'input_file' table exists in db.
does_input_file_table_exist = True
if does_input_file_table_exist: #This block distinguishes normal database from schema.
#This is schema file.
cur.execute("SELECT file FROM input_file WHERE id is '1';")
for i in cur:
tagged_file = i[0]
tagged_file = re.sub('["]', "", tagged_file)
if tagged_file == options.dot_dat[0]:
#If Input_file name matches, add output and check tech/comm
dat_to_db(options.dot_dat[0], con)
else:
#If not a match, delete output tables and update input_file. Call dat_to_db
for i in db_tables:
cur.execute("DELETE FROM "+i+";")
cur.execute("VACUUM;")
for i in tables.keys():
cur.execute("DELETE FROM "+tables[i]+";")
cur.execute("VACUUM;")
for i in options.dot_dat:
cur.execute("DELETE FROM input_file WHERE id=1;")
cur.execute("INSERT INTO input_file VALUES(1, '"+i+"');")
break
dat_to_db(i, con)
else: #empty schema db file
cur.execute("CREATE TABLE IF NOT EXISTS input_file ( id integer PRIMARY KEY, file varchar(30));")
for i in tables.keys():
cur.execute("DELETE FROM "+tables[i]+";")
cur.execute("VACUUM;")
for i in options.dot_dat:
cur.execute("DELETE FROM input_file WHERE id=1;")
cur.execute("INSERT INTO input_file(id, file) VALUES(?, ?);", (1, '"'+i+'"'))
break
dat_to_db(i, con)
for table in svars.keys() :
if table in tables :
cur.execute("SELECT DISTINCT scenario FROM '"+tables[table]+"'")
for val in cur :
# If scenario exists, delete unless it's a myopic run (for myopic, the scenario results are deleted
# before the run in temoa_config.py)
if hasattr(options, 'file_location') and options.scenario == val[0] and os.path.join('temoa_model', 'config_sample_myopic') not in options.file_location:
cur.execute("DELETE FROM "+tables[table]+" \
WHERE scenario is '"+options.scenario+"'")
if table == 'Objective' : # Only table without sector info
for key in svars[table].keys():
key_str = str(key) # only 1 row to write
key_str = key_str[1:-1] # Remove parentheses
cur.execute("INSERT INTO "+tables[table]+" \
VALUES('"+options.scenario+"',"+key_str+", \
"+str(svars[table][key])+");")
else : # First add 'NULL' for sector then update
for key in svars[table].keys() : # Need to loop over keys (rows)
key_str = str(key)
key_str = key_str[1:-1] # Remove parentheses
if table != 'Costs':
cur.execute("INSERT INTO "+tables[table]+ \
" VALUES('"+str(key[0])+"', '"+options.scenario+"','NULL', \
"+key_str[key_str.find(',')+1:]+","+str(svars[table][key])+");")
else:
key_str = str((key[0],key[2],key[3]))
key_str = key_str[1:-1] # Remove parentheses
cur.execute("INSERT INTO "+tables[table]+ \
" VALUES('"+str(key[1])+"', '"+options.scenario+"','NULL', \
"+key_str+","+str(svars[table][key])+");")
cur.execute("UPDATE "+tables[table]+" SET sector = \
(SELECT technologies.sector FROM technologies \
WHERE "+tables[table]+".tech = technologies.tech);")
#WRITE DUALS RESULTS
overwrite_keys = [str(tuple(x)) for x in duals.reset_index()[['constraint_name','scenario']].to_records(index=False)]
#delete records that will be overwritten by new duals dataframe
cur.execute("DELETE FROM Output_Duals WHERE (constraint_name, scenario) IN (VALUES " + ','.join(overwrite_keys) + ")")
#write new records from new duals dataframe
duals.to_sql('Output_Duals',con, if_exists='append')
con.commit()
con.close()
if options.saveEXCEL or options.saveTEXTFILE or options.keepPyomoLP:
for inpu in options.dot_dat:
file_ty = re.search(r"\b([\w-]+)\.(\w+)\b", inpu)
new_dir = options.path_to_data+os.sep+file_ty.group(1)+'_'+options.scenario+'_model'
if os.path.exists( new_dir ):
rmtree( new_dir )
os.mkdir(new_dir)
if options.saveEXCEL:
file_type = re.search(r"([\w-]+)\.(\w+)\b", options.output)
file_n = file_type.group(1)
temp_scenario = set()
temp_scenario.add(options.scenario)
#make_excel function imported near the top
make_excel(options.output, new_dir+os.sep+options.scenario, temp_scenario)
#os.system("python data_processing"+os.sep+"DB_to_Excel.py -i \
# ""+options.output+" \
# " -o data_files"+os.sep+options.scenario+" -s "+options.scenario)
return output
def dat_to_db(input_file, output_schema, run_partial=False):
def traverse_dat(dat_filename, search_tablename):
result_string = ""
table_found_flag = False
with open(dat_filename) as f:
for line in f:
line = re.sub("[#].*$", " ", line)
if table_found_flag:
result_string += line
if re.search(";\s*$", line):
break
if re.search(""+search_tablename+"\s*[:][=]", line):
result_string += line
table_found_flag = True
if re.search(";\s*$", line):
break
return result_string
#####Code Starts here
tables_single_value = [ 'time_exist', 'time_future', 'time_season', 'time_of_day', \
'tech_baseload', 'tech_resource', 'tech_production', 'tech_storage', \
'commodity_physical', 'commodity_demand', 'commodity_emissions']
partial_run_tech = ['tech_baseload', 'tech_resource', 'tech_production', 'tech_storage']
partial_run_comm = ['commodity_physical', 'commodity_demand', 'commodity_emissions']
tables_multiple_value = ['ExistingCapacity', 'Efficiency', 'LifetimeTech', \
'LifetimeProcess', 'EmissionActivity']
parsed_data = {}
#if db_or_dat_flag: #This is an input db file
# import pdb; pdb.set_trace()
# output_schema.execute("ATTACH DATABASE ? AS db2;", "'"+input_file+"'")
# for i in db_tables:
# output_schema.execute("INSERT INTO "+i+" SELECT * FROM db2."+i+";")
if run_partial:
comm_set = set()
tech_set = set()
for i in partial_run_comm:
raw_string = traverse_dat(input_file, i)
raw_string = re.sub("\s+", " ", raw_string)
raw_string = re.sub("^.*[:][=]", "", raw_string)
raw_string = re.sub(";\s*$", "", raw_string)
raw_string = re.sub("^\s+|\s+$", "", raw_string)
parsed_data[i] = re.split(" ", raw_string)
for datas in parsed_data[i]:
if datas == '':
continue
comm_set.add(datas)
for i in partial_run_tech:
raw_string = traverse_dat(input_file, i)
raw_string = re.sub("\s+", " ", raw_string)
raw_string = re.sub("^.*[:][=]", "", raw_string)
raw_string = re.sub(";\s*$", "", raw_string)
raw_string = re.sub("^\s+|\s+$", "", raw_string)
parsed_data[i] = re.split(" ", raw_string)
for datas in parsed_data[i]:
if datas == '':
continue
tech_set.add(datas)
return comm_set, tech_set
#This is an input dat file
for i in tables_single_value:
raw_string = traverse_dat(input_file, i)
raw_string = re.sub("\s+", " ", raw_string)
raw_string = re.sub("^.*[:][=]", "", raw_string)
raw_string = re.sub(";\s*$", "", raw_string)
raw_string = re.sub("^\s+|\s+$", "", raw_string)
parsed_data[i] = re.split(" ", raw_string)
for i in tables_multiple_value:
raw_string = traverse_dat(input_file, i)
raw_string = re.sub("\n", ",", raw_string)
raw_string = re.sub("\s+", " ", raw_string)
raw_string = re.sub("^.*[:][=]\s*,", "", raw_string)
raw_string = re.sub(",?;\s*,?$", "", raw_string)
raw_string = re.sub("^\s+|\s+$", "", raw_string)
raw_string = re.sub("\s?,\s?", ",", raw_string)
raw_string = re.sub(",+", ",", raw_string)
parsed_data[i] = re.split(",", raw_string)
#Fill time_periods
for i in parsed_data['time_exist']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO time_periods VALUES("+i+", 'e');")
for i in parsed_data['time_future']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO time_periods VALUES("+i+", 'f');")
#Fill time_season
for i in parsed_data['time_season']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO time_season VALUES('"+i+"');")
#Fill time_of_day
for i in parsed_data['time_of_day']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO time_of_day VALUES('"+i+"');")
#Fill technologies
for i in parsed_data['tech_baseload']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'pb', '', '');")
for i in parsed_data['tech_storage']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'ph', '', '');")
for i in parsed_data['tech_production']:
if i is '':
continue
if i in parsed_data['tech_storage']:
continue
if i in parsed_data['tech_baseload']:
continue
output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'p', '', '');")
for i in parsed_data['tech_resource']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO technologies VALUES('"+i+"', 'r', '', '');")
#Fill commodities
for i in parsed_data['commodity_demand']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO commodities VALUES('"+i+"', 'd', '');")
for i in parsed_data['commodity_physical']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO commodities VALUES('"+i+"', 'p', '');")
for i in parsed_data['commodity_emissions']:
if i is '':
continue
output_schema.execute("INSERT OR REPLACE INTO commodities VALUES('"+i+"', 'e', '');")
#Fill ExistingCapacity
for i in parsed_data['ExistingCapacity']:
if i is '':
continue
row_data = re.split(" ", i)
row_data.append('')
row_data.append('')
output_schema.execute("INSERT OR REPLACE INTO ExistingCapacity VALUES(?, ?, ?, ?, ?);", row_data)
#Fill Efficiency
for i in parsed_data['Efficiency']:
if i is '':
continue
row_data = re.split(" ", i)
row_data.append('')
output_schema.execute("INSERT OR REPLACE INTO Efficiency VALUES(?, ?, ?, ?, ?, ?);", row_data)
#Fill LifetimeTech
for i in parsed_data['LifetimeTech']:
if i is '':
continue
row_data = re.split(" ", i)
row_data.append('')
output_schema.execute("INSERT OR REPLACE INTO LifetimeTech VALUES(?, ?, ?);", row_data)
#Fill LifetimeProcess
for i in parsed_data['LifetimeProcess']:
if i is '':
continue
row_data = re.split(" ", i)
row_data.append('')
output_schema.execute("INSERT OR REPLACE INTO LifetimeProcess VALUES(?, ?, ?, ?);", row_data)
#Fill EmissionActivity
for i in parsed_data['EmissionActivity']:
if i is '':
continue
row_data = re.split(" ", i)
row_data.append('')
if len(row_data) is 7:
row_data.append('')
output_schema.execute("INSERT OR REPLACE INTO EmissionActivity VALUES(?, ?, ?, ?, ?, ?, ?, ?);", row_data)
| gpl-2.0 | -683,635,195,755,906,400 | 37.082138 | 158 | 0.623746 | false |
kvvzr/Melete | melete/login_util.py | 1 | 1999 | import os, inspect, config
from functools import wraps
from flask import (
request,
g, session, url_for, redirect
)
from flask_oauthlib.client import OAuth
from melete.models import *
app = inspect.getmodule(inspect.stack()[1][0]).app
oauth = OAuth(app)
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if g.user is None:
return redirect(url_for('login'))
return f(*args, **kwargs)
return decorated_function
twitter = oauth.remote_app(
'twitter',
consumer_key=os.environ.get('TWITTER_API_KEY') if 'TWITTER_API_KEY' in os.environ else config.twitter['consumer_key'],
consumer_secret=os.environ.get('TWITTER_API_SECRET') if 'TWITTER_API_SECRET' in os.environ else config.twitter['consumer_secret'],
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
)
@twitter.tokengetter
def get_twitter_token():
if 'twitter_oauth' in session:
res = session['twitter_oauth']
return res['oauth_token'], res['oauth_token_secret']
@app.before_request
def before_request():
g.user = None
if 'twitter_oauth' in session:
g.user = session['twitter_oauth']
@app.route('/login/twitter')
def login_twitter():
callback_url = url_for('oauthorized', next=request.args.get('next'))
return twitter.authorize(callback=callback_url or request.referrer or None)
@app.route('/oauthorized')
def oauthorized():
res = twitter.authorized_response()
if res is None:
return redirect(url_for('index'))
session['twitter_oauth'] = res
user = Users.query.filter_by(twitter_id=res['user_id']).first()
if user is None:
return redirect(url_for('sign_up'))
session['user_id'] = user.id
session['user_name'] = user.name
return redirect(url_for('index'))
| mit | 1,977,923,067,768,644,400 | 30.234375 | 134 | 0.671336 | false |
UdK-VPT/Open_eQuarter | mole/extensions/eval_enev/oeq_UE_Window.py | 1 | 1187 | # -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import rb_contemporary_window_uvalue_by_building_age_lookup
from mole.stat_corr import nrb_contemporary_window_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
return {'WN_UE': {'type': QVariant.Double, 'value': 1.3}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='U-Values EnEV',
extension_name='Window Quality (U_Value, EnEV)',
layer_name= 'U Window EnEV',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='WN_UE',
source_type='none',
par_in=[],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['WN_UE'],
description=u"Calculate the EnEV U-Value of the Building's windows",
evaluation_method=calculation)
extension.registerExtension(default=True)
| gpl-2.0 | -469,534,712,401,246,140 | 32.914286 | 80 | 0.709351 | false |
yaybu/touchdown | touchdown/aws/vpc/nat_gateway.py | 1 | 1925 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, serializers
from touchdown.core.plan import Plan
from touchdown.core.resource import Resource
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy
from .elastic_ip import ElasticIp
from .subnet import Subnet
class NatGateway(Resource):
resource_name = "nat_gateway"
name = argument.Callable(lambda r: r.subnet.name)
elastic_ip = argument.Resource(
ElasticIp, field="AllocationId", serializer=serializers.Property("AllocationId")
)
subnet = argument.Resource(
Subnet, field="SubnetId", serializer=serializers.Identifier()
)
class Describe(SimpleDescribe, Plan):
resource = NatGateway
service_name = "ec2"
api_version = "2015-10-01"
describe_action = "describe_nat_gateways"
describe_envelope = "NatGateways"
key = "NatGatewayId"
signature = ()
def get_describe_filters(self):
subnet = self.runner.get_plan(self.resource.subnet)
if not subnet.resource_id:
return None
return {"Filters": [{"Name": "subnet-id", "Values": [subnet.resource_id]}]}
class Apply(SimpleApply, Describe):
create_action = "create_nat_gateway"
waiter = "nat_gateway_available"
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_nat_gateway"
waiter = "nat_gateway_deleted"
| apache-2.0 | 8,066,024,475,042,157,000 | 28.166667 | 88 | 0.717922 | false |
Neill3d/MoPlugs | PythonScripts/Startup/CompositionPropertyView.py | 1 | 19176 | # Copyright 2016-2017 Sergey Solokhin (Neill3d)
#
# Github repo - https://github.com/Neill3d/MoPlugs
# Licensed under BSD 3-clause
# https://github.com/Neill3d/MoPlugs/blob/master/LICENSE
#
# Script description:
# Creating property views for the Composition Toolkit components
#
# Topic: Composition Toolkit
#
from pyfbsdk import *
gClassStr = ''
gCompositionModelsPath = 'Browsing/Templates/Shading Elements/Composition/Models'
lMgr = FBPropertyViewManager()
def AddGlobalPropertyView(propName, hierarchy, isFolder=False):
lMgr.AddPropertyView(gClassStr, propName, hierarchy)
def AddPropertyToViewList(pOwner, pPropertyName, pViewList, pHierarchy, pSetOpen=False):
lProperty = pOwner.PropertyList.Find(pPropertyName)
if lProperty != None:
lView = pViewList.AddPropertyView(lProperty, pHierarchy)
if pSetOpen:
lView.SetOpen(pSetOpen,True)
return lView
return None
#################################
def PropertyViewForShadowZone():
viewName = 'Shadow Zone View'
lModel = FBCreateObject( gCompositionModelsPath, 'Model Shadow Zone', 'test zone')
# Create local(by object) property view called 'PythonCreatedView'
lViewList = lMgr.FindPropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
if lViewList != None:
lMgr.RemovePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
lViewList = lMgr.CreatePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
AddPropertyToViewList(lModel, 'Enabled', lViewList, '')
# Add 'Show' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Show', lViewList, 'Visibility Options')
# Add 'Visibility' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility', lViewList, 'Visibility Options')
# Here we add 'Visibility Inheritance' under 'Visibility' which is under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility Inheritance', lViewList, 'Visibility Options.Visibility')
# Same this as above, adding properties under 'Transformation Options'
AddPropertyToViewList(lModel, 'Translation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Rotation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Scaling (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Quaternion Rotation', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Resolution', lViewList, 'Quality')
AddPropertyToViewList(lModel, 'Bias', lViewList, 'Quality')
AddPropertyToViewList(lModel, 'Enable Offset', lViewList, 'Quality')
AddPropertyToViewList(lModel, 'Offset Factor', lViewList, 'Quality')
AddPropertyToViewList(lModel, 'Offset Units', lViewList, 'Quality')
AddPropertyToViewList(lModel, 'Zone Blend Mode', lViewList, '')
AddPropertyToViewList(lModel, 'Master Light', lViewList, '')
AddPropertyToViewList(lModel, 'Shadow Color', lViewList, '')
AddPropertyToViewList(lModel, 'Density', lViewList, '')
AddPropertyToViewList(lModel, 'Feather', lViewList, '')
AddPropertyToViewList(lModel, 'Near Plane', lViewList, '')
AddPropertyToViewList(lModel, 'Far Plane', lViewList, '')
AddPropertyToViewList(lModel, 'Include Object List', lViewList, 'Models Filter')
AddPropertyToViewList(lModel, 'Exclude Object List', lViewList, 'Models Filter')
AddPropertyToViewList(lModel, 'Use GPU Cache', lViewList, 'Models Filter')
AddPropertyToViewList(lModel, 'Render Transparency', lViewList, 'Models Filter')
AddPropertyToViewList(lModel, 'Auto Volume Culling', lViewList, 'Models Filter')
AddPropertyToViewList(lModel, 'Draw Node Count', lViewList, 'Stats')
AddPropertyToViewList(lModel, 'Draw GeomCache Count', lViewList, 'Stats')
AddPropertyToViewList(lModel, 'RealTime Update', lViewList, '')
AddPropertyToViewList(lModel, 'Update', lViewList, '')
AddPropertyToViewList(lModel, 'Update On TimeSlider Change', lViewList, 'Update Options')
AddPropertyToViewList(lModel, 'Update When Selected', lViewList, 'Update Options')
AddPropertyToViewList(lModel, 'Update Skip Frames', lViewList, 'Update Options')
# Select model to see our new property view in Properties tool
#lModel.Selected = True
# In this case we don't have to refresh, but if you update already existing View, you should do it.
lMgr.RefreshPropertyViews()
if lModel != None:
lModel.FBDelete()
def PropertyViewForFogVolume():
viewName = 'Volume Fog View'
lModel = FBCreateObject( gCompositionModelsPath, 'Model Fog Volume', 'test volume')
# Create local(by object) property view called 'PythonCreatedView'
lViewList = lMgr.FindPropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
if lViewList != None:
lMgr.RemovePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
lViewList = lMgr.CreatePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
AddPropertyToViewList(lModel, 'Enabled', lViewList, '')
# Add 'Show' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Show', lViewList, 'Visibility Options')
# Add 'Visibility' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility', lViewList, 'Visibility Options')
# Here we add 'Visibility Inheritance' under 'Visibility' which is under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility Inheritance', lViewList, 'Visibility Options.Visibility')
# Same this as above, adding properties under 'Transformation Options'
AddPropertyToViewList(lModel, 'Translation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Rotation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Scaling (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Quaternion Rotation', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Volume Blend Mode', lViewList, '')
AddPropertyToViewList(lModel, 'Color RGB', lViewList, '')
AddPropertyToViewList(lModel, 'Volume Density', lViewList, '')
AddPropertyToViewList(lModel, 'Volume Feather', lViewList, '')
# Select model to see our new property view in Properties tool
#lModel.Selected = True
# In this case we don't have to refresh, but if you update already existing View, you should do it.
lMgr.RefreshPropertyViews()
if lModel != None:
lModel.FBDelete()
def PropertyViewForFogTarget():
viewName = 'Volume Target View'
lModel = FBCreateObject( gCompositionModelsPath, 'Model Fog Target', 'test target')
# Create local(by object) property view called 'PythonCreatedView'
lViewList = lMgr.FindPropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
if lViewList != None:
lMgr.RemovePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
lViewList = lMgr.CreatePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
AddPropertyToViewList(lModel, 'Enabled', lViewList, '')
# Add 'Show' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Show', lViewList, 'Visibility Options')
# Add 'Visibility' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility', lViewList, 'Visibility Options')
# Here we add 'Visibility Inheritance' under 'Visibility' which is under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility Inheritance', lViewList, 'Visibility Options.Visibility')
# Same this as above, adding properties under 'Transformation Options'
AddPropertyToViewList(lModel, 'Translation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Rotation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Scaling (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Quaternion Rotation', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Target Blend Mode', lViewList, '')
AddPropertyToViewList(lModel, 'Color RGB', lViewList, '')
AddPropertyToViewList(lModel, 'Target Density', lViewList, '')
AddPropertyToViewList(lModel, 'Target Feather', lViewList, '')
AddPropertyToViewList(lModel, 'Target Near Plane', lViewList, '')
AddPropertyToViewList(lModel, 'Target Far Plane', lViewList, '')
# Select model to see our new property view in Properties tool
#lModel.Selected = True
# In this case we don't have to refresh, but if you update already existing View, you should do it.
lMgr.RefreshPropertyViews()
if lModel != None:
lModel.FBDelete()
def PropertyViewForDecal():
viewName = 'Model Decal View'
lModel = FBCreateObject( gCompositionModelsPath, 'Model Decal', 'test decal')
# Create local(by object) property view called 'PythonCreatedView'
lViewList = lMgr.FindPropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
if lViewList != None:
lMgr.RemovePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
lViewList = lMgr.CreatePropertyList(lModel, FBPropertyViewType.kFBViewByObjectType, viewName)
AddPropertyToViewList(lModel, 'Enabled', lViewList, '')
AddPropertyToViewList(lModel, 'Opacity', lViewList, '')
# Add 'Show' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Show', lViewList, 'Visibility Options')
# Add 'Visibility' property under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility', lViewList, 'Visibility Options')
# Here we add 'Visibility Inheritance' under 'Visibility' which is under 'Visibility Options' node
AddPropertyToViewList(lModel, 'Visibility Inheritance', lViewList, 'Visibility Options.Visibility')
# Same this as above, adding properties under 'Transformation Options'
AddPropertyToViewList(lModel, 'Translation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Rotation (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Scaling (Lcl)', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Quaternion Rotation', lViewList, 'Transformation Options')
AddPropertyToViewList(lModel, 'Resolution Width', lViewList, 'Texture Info')
AddPropertyToViewList(lModel, 'Resolution Height', lViewList, 'Texture Info')
AddPropertyToViewList(lModel, 'Aspect Ratio', lViewList, 'Texture Info')
AddPropertyToViewList(lModel, 'Uses Story Track', lViewList, 'Texture Info')
AddPropertyToViewList(lModel, 'Video Clip Path', lViewList, 'Texture Info')
AddPropertyToViewList(lModel, 'Near Plane Distance', lViewList, 'Projection Options')
AddPropertyToViewList(lModel, 'Far Plane Distance', lViewList, 'Projection Options')
AddPropertyToViewList(lModel, 'Projection Type', lViewList, 'Projection Options')
AddPropertyToViewList(lModel, 'Field Of View', lViewList, 'Projection Options')
AddPropertyToViewList(lModel, 'Ortho Scale', lViewList, 'Projection Options')
AddPropertyToViewList(lModel, 'Contrast', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Saturation', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Brightness', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Gamma', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Inverse', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Hue', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Hue Saturation', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Lightness', lViewList, 'Color Correction')
AddPropertyToViewList(lModel, 'Grab Values', lViewList, 'Camera Controls')
AddPropertyToViewList(lModel, 'Apply To Current', lViewList, 'Camera Controls')
AddPropertyToViewList(lModel, 'Apply To Perspective', lViewList, 'Camera Controls')
AddPropertyToViewList(lModel, 'Blend Mode', lViewList, '')
AddPropertyToViewList(lModel, 'Texture', lViewList, '')
AddPropertyToViewList(lModel, 'Refresh', lViewList, '')
# Select model to see our new property view in Properties tool
#lModel.Selected = True
# In this case we don't have to refresh, but if you update already existing View, you should do it.
lMgr.RefreshPropertyViews()
if lModel != None:
lModel.FBDelete()
def GlobalPropertyViewForComposition():
global gClassStr
gClassStr = 'ObjectComposition'
AddGlobalPropertyView("", "Statistics", True);
AddGlobalPropertyView( 'Stats Width', "Statistics" )
AddGlobalPropertyView( 'Stats Height', "Statistics" )
AddGlobalPropertyView( 'Stats Compute Shaders', "Statistics" )
AddGlobalPropertyView( 'Stats Dispatch Groups', "Statistics" )
AddGlobalPropertyView( 'Stats Textures Count', "Statistics" )
AddGlobalPropertyView( 'Stats Textures Memory', "Statistics" )
AddGlobalPropertyView("", "Background", True);
AddGlobalPropertyView("Background Width", "Background");
AddGlobalPropertyView("Background Height", "Background");
AddGlobalPropertyView("Background Color", "Background");
AddGlobalPropertyView("Background Alpha", "Background");
AddGlobalPropertyView("Use Background Gradient", "Background");
AddGlobalPropertyView("Background Upper Color", "Background");
AddGlobalPropertyView("Background Upper Alpha", "Background");
AddGlobalPropertyView("Background Lower Color", "Background");
AddGlobalPropertyView("Background Lower Alpha", "Background");
#AddGlobalPropertyView("Use Background Texture", "Background");
AddGlobalPropertyView("Background Texture", "Background");
AddGlobalPropertyView("Back Texture Width", "Background");
AddGlobalPropertyView("Back Texture Height", "Background");
AddGlobalPropertyView("", "Size Control", True);
AddGlobalPropertyView("Override Size", "Size Control");
AddGlobalPropertyView("User Width", "Size Control");
AddGlobalPropertyView("User Height", "Size Control");
AddGlobalPropertyView("ReSize Factor", "Size Control");
AddGlobalPropertyView("", "Batch processing", True);
AddGlobalPropertyView("Use For Batch Processing", "Batch processing");
AddGlobalPropertyView("Batch input", "Batch processing");
AddGlobalPropertyView("Batch output", "Batch processing");
AddGlobalPropertyView("Input", "")
AddGlobalPropertyView("Out Width", "")
AddGlobalPropertyView("Out Height", "")
def GlobalPropertyViewForFilter(classstr):
global gClassStr
gClassStr = classstr
AddGlobalPropertyView("Active", "")
AddGlobalPropertyView("Reload Shader", "")
AddGlobalPropertyView("Opacity", "")
AddGlobalPropertyView("", "Masking", True)
AddGlobalPropertyView("Use Composite Mask", "Masking")
AddGlobalPropertyView("Select Composite Mask", "Masking")
AddGlobalPropertyView("Invert Composite Mask", "Masking")
AddGlobalPropertyView("Custom Mask", "Masking")
AddGlobalPropertyView("", "Info", True)
AddGlobalPropertyView("Out Width", "Info")
AddGlobalPropertyView("Out Height", "Info")
def GlobalForFogFilter(classstr):
global gClassStr
gClassStr = classstr
AddGlobalPropertyView("Use Model Properties", "")
AddGlobalPropertyView("Color RGB", "")
AddGlobalPropertyView("Mode", "")
AddGlobalPropertyView("Density", "")
AddGlobalPropertyView("Alpha Texture", "")
AddGlobalPropertyView("Fog Near", "")
AddGlobalPropertyView("Fog Far", "")
AddGlobalPropertyView("Fog Feather", "")
AddGlobalPropertyView("Target Type", "")
AddGlobalPropertyView("Target Object", "")
AddGlobalPropertyView("Create Target", "Target Object")
AddGlobalPropertyView("Target Near Object", "")
AddGlobalPropertyView("Target Far Object", "")
AddGlobalPropertyView("Create Near/Far Targets", "Target Near Object")
AddGlobalPropertyView("Volume Object", "")
AddGlobalPropertyView("Create Volume", "Volume Object")
def GlobalPropertyViewForFilters():
GlobalPropertyViewForFilter( 'ObjectFilter3dDecal' )
GlobalPropertyViewForFilter( 'ObjectFilter3dDOF' )
GlobalPropertyViewForFilter( 'ObjectFilter3dFog' )
GlobalForFogFilter('ObjectFilter3dFog' )
GlobalPropertyViewForFilter( 'ObjectFilterLUT' )
GlobalPropertyViewForFilter( 'ObjectFilterBlur' )
GlobalPropertyViewForFilter( 'ObjectFilterChangeColor' )
GlobalPropertyViewForFilter( 'ObjectFilterColorCorrection' )
GlobalPropertyViewForFilter( 'ObjectFilterCrossStitching' )
GlobalPropertyViewForFilter( 'ObjectFilterCrosshatch' )
GlobalPropertyViewForFilter( 'ObjectFilterFilmGrain' )
GlobalPropertyViewForFilter( 'ObjectFilterHalfTone' )
GlobalPropertyViewForFilter( 'ObjectFilterToonLines' )
GlobalPropertyViewForFilter( 'ObjectFilterPosterization' )
def GlobalPropertyViewForLayerCommon(classstr):
global gClassStr
gClassStr = classstr
AddGlobalPropertyView( "Active", "" )
AddGlobalPropertyView( "Input", "")
AddGlobalPropertyView( "Reload Shader", "" )
AddGlobalPropertyView( "Opacity", "" )
AddGlobalPropertyView( "", "Transformation", True )
AddGlobalPropertyView( "Translation", "Transformation" )
AddGlobalPropertyView( "Rotation", "Transformation" )
AddGlobalPropertyView( "Uniform Scaling", "Transformation" )
AddGlobalPropertyView( "Scaling", "Transformation" )
AddGlobalPropertyView( "Pivot Offset", "Transformation" )
AddGlobalPropertyView( "Transform Init", "Transformation" )
AddGlobalPropertyView( "Fit Image Size", "Transformation" )
AddGlobalPropertyView( "", "Masking", True )
AddGlobalPropertyView( "Use Composite Mask", "Masking" )
AddGlobalPropertyView( "Select Composite Mask", "Masking" )
AddGlobalPropertyView( "Invert Composite Mask", "Masking" )
AddGlobalPropertyView( "Custom Mask", "Masking" )
AddGlobalPropertyView("", "Info", True)
AddGlobalPropertyView("Out Width", "Info")
AddGlobalPropertyView("Out Height", "Info")
def GlobalPropertyViewForLayers():
GlobalPropertyViewForLayerCommon( 'ObjectCompositionRender' )
GlobalPropertyViewForLayerCommon( 'ObjectCompositionColor' )
GlobalPropertyViewForLayerCommon( 'ObjectCompositionShadow' )
####################################################################
#### MAIN ####
PropertyViewForShadowZone()
PropertyViewForFogVolume()
PropertyViewForFogTarget()
PropertyViewForDecal()
##
GlobalPropertyViewForComposition()
## filters
GlobalPropertyViewForFilters()
## layers
GlobalPropertyViewForLayers() | bsd-3-clause | 6,401,903,315,452,361,000 | 46.117936 | 103 | 0.728567 | false |
matthewwardrop/python-qubricks | tests/test_system.py | 1 | 1997 | import sys
sys.path.insert(0,'..')
import numpy as np
import math
import sympy
import unittest
from qubricks import QuantumSystem
class TwoLevel(QuantumSystem):
def init(self, **kwargs):
pass
def init_parameters(self):
self.p << {'c_hbar': 1.0}
self.p.B = 1
self.p.J = 1
def init_bases(self):
pass
def init_hamiltonian(self):
return self.Operator( {'J': np.array([[0,1],[1,0]]),'B':np.array([[1,0],[0,-1]])})
def init_states(self):
'''
Add the named/important states to be used by this quantum system.
'''
self.add_state("up",[1,0])
self.add_state("down",[0,1])
self.add_state("+",np.array([1,1])/math.sqrt(2))
self.add_state("-",np.array([1,-1])/math.sqrt(2))
def init_measurements(self):
'''
Add the measurements to be used by this quantum system instance.
'''
pass
@property
def default_derivative_ops(self):
return ['evolution']
def init_derivative_ops(self):
'''
Setup the derivative operators to be implemented on top of the
basic quantum evolution operator.
'''
pass
def ideal_integration(self,time,state,params={}):
t,J,B,c_hbar = sympy.var('t,J,B,c_hbar')
ps = {'B':self.p.B,'J':self.p.J,'t':time,'c_hbar':self.p.c_hbar}
ps.update(params)
op = sympy.exp( (-sympy.I/c_hbar*t*sympy.Matrix([[B,J],[J,-B]])).evalf(subs=ps) )
return np.array( op ).astype(complex).dot(self.state(state))
class TestTwoLevel(unittest.TestCase):
def setUp(self):
self.system = TwoLevel()
def test_evolution(self):
for time in [1,5,10,20]:
np.testing.assert_array_almost_equal(self.system.integrate([time], ['up'])['state'][0,0], self.system.ideal_integration(time, 'up'), 5)
np.testing.assert_array_almost_equal(self.system.integrate([time], ['up'], params={'B':0})['state'][0,0], self.system.ideal_integration(time, 'up', params={'B':0}), 5)
np.testing.assert_array_almost_equal(self.system.integrate([time], ['up'], params={'J':0})['state'][0,0], self.system.ideal_integration(time, 'up', params={'J':0}), 5)
| mit | -6,001,710,570,103,783,000 | 25.626667 | 170 | 0.654982 | false |
happy5214/manitae | manitae/units/base/SingleTileUnit.py | 1 | 1165 | # Copyright (C) 2012 Alexander Jones
#
# This file is part of Manitae.
#
# Manitae is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Manitae is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Manitae. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import pyqtProperty
from manitae.core.basetypes.Unit import Unit
from manitae.maps.MapTile import MapTile
class SingleTileUnit(Unit):
tile_type = MapTile
@pyqtProperty(MapTile)
def tile(self):
return self._tile
@tile.setter
def tile(self, new_tile):
self._tile = new_tile
self._tile.color = self._tile.tile_type.color
self._tile.__class__ = self.tile_type
| gpl-3.0 | -263,002,470,076,723,040 | 31.361111 | 72 | 0.699571 | false |
csengstock/geonames_tagger | gnt_request.py | 1 | 1998 | # geonames tagger
#
# Copyright (c) 2015 Christian Sengstock, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import sys
import json
import urllib
import urllib2
def send_post(host, port, params, path=""):
url = "http://%s:%d/%s" % (host, port, path)
data = urllib.urlencode(params)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
return response.read()
def usage():
print """Usage: gnt_request TEXT_FILE [host [port]]
TEXT_FILE: text to parse. Use '-' to read from stdin.
HOST: hostname of server (defaults to 'localhost')
PORT: port of server (defaults to 55555)
"""
sys.exit(1)
if __name__ == "__main__":
host = "localhost"
port = 55555
if len(sys.argv) < 2:
usage()
fn = sys.argv[1]
fp = None
if fn == "-":
print "reading from stdin. Use Ctrl-D (linux) or Ctrl-O > Ctrl-D (osx) for EOF"
fp = sys.stdin
else:
fp = open(fn, "r")
if len(sys.argv) == 3:
host = sys.argv[2]
if len(sys.argv) == 4:
port = int(sys.argv[3])
if len(sys.argv) > 4:
usage()
txt = fp.read()
resp = send_post(host, port, {"text": txt})
resp = json.loads(resp)
if resp["success"] == 0:
print resp
else:
print "response:", resp["n_matches"], "matches"
for rec in resp["matches"]:
print "---"
print rec
| lgpl-3.0 | 1,893,786,140,158,874,600 | 29.272727 | 87 | 0.624625 | false |
SouthForkResearch/CHaMP_Metrics | tools/topoauxmetrics/methods/sidechannel.py | 1 | 2433 | from lib.exception import MissingException
from lib.sitkaAPI import latestMetricInstances
from lib.metrics import CHaMPMetric
import numpy as np
class SidechannelMetrics(CHaMPMetric):
TEMPLATE = {
'VisitMetrics': {
'Area': None,
'AreaPercent': None,
}
}
def calc(self, apiData):
"""
Calculate side channel metrics
:param apiData: dictionary of API data. Key is API call name. Value is API data
:return: metrics dictionary
"""
self.log.info("Running SideChannelMetrics")
if 'ChannelSegments' not in apiData:
raise MissingException("ChannelSegments missing from apiData")
# Retrieve the channel segment measurements
channelSegmentVals = [val['value'] for val in apiData['ChannelSegments']['values']]
if 'ChannelUnitMetrics' not in apiData:
raise MissingException('Missing channel metric instances')
# Retrieve the channel unit metrics
channelInstances = latestMetricInstances(apiData['ChannelUnitMetrics'])
if channelInstances is None:
raise MissingException('Missing channel unit metric instances')
# calculate metrics
self.metrics = self._calc(channelSegmentVals, channelInstances)
@staticmethod
def _calc(channelSegmentVals, channelInstances):
"""
Calculate side channel metrics
:param channelInstances: dictionary of channel unit topo metrics
:return: metrics dictionary
"""
# Total area of all channel units
totalArea = np.sum([val['AreaTotal'] for val in channelInstances])
dResults = {}
# Filter channel segments to just small side channels with both length and width
sscWithMeasurements = [val for val in channelSegmentVals
if val['SegmentType'] == 'Small Side Channel' and val['SideChannelLengthM'] and val['SideChannelWidthM']]
# Sum the length and widths of filtered small side channels
dResults['Area'] = np.sum([val['SideChannelLengthM'] * val['SideChannelWidthM'] for val in sscWithMeasurements])
if dResults['Area'] == 0.0 and totalArea == 0.0:
dResults['AreaPercent'] = 0.0
else:
dResults['AreaPercent'] = 100 * dResults['Area'] / totalArea
visitMetrics = {"VisitMetrics": dResults}
return visitMetrics
| gpl-3.0 | -8,742,559,488,343,693,000 | 34.779412 | 136 | 0.65187 | false |
chromium/chromium | tools/binary_size/libsupersize/integration_test.py | 5 | 24468 | #!/usr/bin/env python3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import copy
import glob
import io
import itertools
import os
import unittest
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
import archive
import data_quality
import describe
import diff
import file_format
import models
import test_util
_SCRIPT_DIR = os.path.dirname(__file__)
_TEST_DATA_DIR = os.path.join(_SCRIPT_DIR, 'testdata')
_TEST_SDK_DIR = os.path.join(_TEST_DATA_DIR, 'mock_sdk')
_TEST_SOURCE_DIR = os.path.join(_TEST_DATA_DIR, 'mock_source_directory')
_TEST_OUTPUT_DIR = os.path.join(_TEST_SOURCE_DIR, 'out', 'Release')
_TEST_TOOL_PREFIX = os.path.join(
os.path.abspath(_TEST_DATA_DIR), 'mock_toolchain', '')
_TEST_APK_ROOT_DIR = os.path.join(_TEST_DATA_DIR, 'mock_apk')
_TEST_MAP_PATH = os.path.join(_TEST_DATA_DIR, 'test.map')
_TEST_PAK_INFO_PATH = os.path.join(
_TEST_OUTPUT_DIR, 'size-info/test.apk.pak.info')
_TEST_ELF_FILE_BEGIN = os.path.join(_TEST_OUTPUT_DIR, 'elf.begin')
_TEST_APK_LOCALE_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, 'assets/en-US.pak')
_TEST_APK_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, 'assets/resources.pak')
_TEST_ON_DEMAND_MANIFEST_PATH = os.path.join(_TEST_DATA_DIR,
'AndroidManifest_OnDemand.xml')
_TEST_ALWAYS_INSTALLED_MANIFEST_PATH = os.path.join(
_TEST_DATA_DIR, 'AndroidManifest_AlwaysInstalled.xml')
# The following files are dynamically created.
_TEST_ELF_PATH = os.path.join(_TEST_OUTPUT_DIR, 'elf')
_TEST_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.apk')
_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH = os.path.join(_TEST_OUTPUT_DIR,
'not_on_demand.apk')
_TEST_ON_DEMAND_SPLIT_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'on_demand.apk')
_TEST_MINIMAL_APKS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'Bundle.minimal.apks')
_TEST_SSARGS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.ssargs')
# Generated file paths relative to apk
_TEST_APK_SO_PATH = 'test.so'
_TEST_APK_SMALL_SO_PATH = 'smalltest.so'
_TEST_APK_DEX_PATH = 'test.dex'
_TEST_APK_OTHER_FILE_PATH = 'assets/icudtl.dat'
_TEST_APK_RES_FILE_PATH = 'res/drawable-v13/test.xml'
def _CompareWithGolden(name=None):
def real_decorator(func):
basename = name
if not basename:
basename = func.__name__.replace('test_', '')
golden_path = os.path.join(_TEST_DATA_DIR, basename + '.golden')
def inner(self):
actual_lines = func(self)
actual_lines = (re.sub(r'(elf_mtime=).*', r'\1{redacted}', l)
for l in actual_lines)
actual_lines = (re.sub(r'(Loaded from ).*', r'\1{redacted}', l)
for l in actual_lines)
test_util.Golden.CheckOrUpdate(golden_path, actual_lines)
return inner
return real_decorator
@contextlib.contextmanager
def _AddMocksToPath():
prev_path = os.environ['PATH']
os.environ['PATH'] = _TEST_TOOL_PREFIX[:-1] + os.path.pathsep + prev_path
os.environ['APK_ANALYZER'] = os.path.join(_TEST_SDK_DIR, 'tools', 'bin',
'apkanalyzer')
os.environ['AAPT2'] = os.path.join(_TEST_SDK_DIR, 'tools', 'bin', 'aapt2')
try:
yield
finally:
os.environ['PATH'] = prev_path
del os.environ['APK_ANALYZER']
del os.environ['AAPT2']
def _RunApp(name, args, debug_measures=False):
argv = [os.path.join(_SCRIPT_DIR, 'main.py'), name]
argv.extend(args)
with _AddMocksToPath():
env = None
if debug_measures:
env = os.environ.copy()
env['SUPERSIZE_DISABLE_ASYNC'] = '1'
env['SUPERSIZE_MEASURE_GZIP'] = '1'
return subprocess.check_output(argv, env=env).decode('utf-8').splitlines()
def _AllMetadata(size_info):
return [c.metadata for c in size_info.containers]
class IntegrationTest(unittest.TestCase):
maxDiff = None # Don't trucate diffs in errors.
cached_size_info = {}
@staticmethod
def _CreateBlankData(power_of_two):
data = '\0'
for _ in range(power_of_two):
data = data + data
return data
@staticmethod
def _SafeRemoveFiles(file_names):
for file_name in file_names:
if os.path.exists(file_name):
os.remove(file_name)
@classmethod
def setUpClass(cls):
shutil.copy(_TEST_ELF_FILE_BEGIN, _TEST_ELF_PATH)
# Exactly 128MB of data (2^27), extra bytes will be accounted in overhead.
with open(_TEST_ELF_PATH, 'a') as elf_file:
elf_file.write(IntegrationTest._CreateBlankData(27))
with zipfile.ZipFile(_TEST_APK_PATH, 'w') as apk_file:
apk_file.write(_TEST_ELF_PATH, _TEST_APK_SO_PATH)
# Exactly 4MB of data (2^22), with some zipalign overhead.
info = zipfile.ZipInfo(_TEST_APK_SMALL_SO_PATH)
info.extra = b'\x00' * 16
apk_file.writestr(info, IntegrationTest._CreateBlankData(22))
# Exactly 1MB of data (2^20).
apk_file.writestr(
_TEST_APK_OTHER_FILE_PATH, IntegrationTest._CreateBlankData(20))
# Exactly 1KB of data (2^10).
apk_file.writestr(
_TEST_APK_RES_FILE_PATH, IntegrationTest._CreateBlankData(10))
locale_pak_rel_path = os.path.relpath(
_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_ROOT_DIR)
apk_file.write(_TEST_APK_LOCALE_PAK_PATH, locale_pak_rel_path)
pak_rel_path = os.path.relpath(_TEST_APK_PAK_PATH, _TEST_APK_ROOT_DIR)
apk_file.write(_TEST_APK_PAK_PATH, pak_rel_path)
# Exactly 8MB of data (2^23).
apk_file.writestr(
_TEST_APK_DEX_PATH, IntegrationTest._CreateBlankData(23))
with zipfile.ZipFile(_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH, 'w') as z:
z.write(_TEST_ALWAYS_INSTALLED_MANIFEST_PATH, 'AndroidManifest.xml')
with zipfile.ZipFile(_TEST_ON_DEMAND_SPLIT_APK_PATH, 'w') as z:
z.write(_TEST_ON_DEMAND_MANIFEST_PATH, 'AndroidManifest.xml')
with zipfile.ZipFile(_TEST_MINIMAL_APKS_PATH, 'w') as apk_file:
apk_file.writestr('toc.pb', 'x' * 80)
apk_file.write(_TEST_APK_PATH, 'splits/base-master.apk')
apk_file.writestr('splits/base-en.apk', 'x' * 10)
apk_file.write(_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH,
'splits/not_on_demand-master.apk')
apk_file.write(_TEST_ON_DEMAND_SPLIT_APK_PATH,
'splits/on_demand-master.apk')
apk_file.writestr('splits/vr-en.apk', 'x' * 40)
@classmethod
def tearDownClass(cls):
IntegrationTest._SafeRemoveFiles([
_TEST_ELF_PATH,
_TEST_APK_PATH,
_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH,
_TEST_ON_DEMAND_SPLIT_APK_PATH,
_TEST_MINIMAL_APKS_PATH,
])
def _CreateTestArgs(self):
parser = argparse.ArgumentParser()
archive.AddArguments(parser)
ret = parser.parse_args(['foo'])
return ret
def _CloneSizeInfo(self,
use_output_directory=True,
use_elf=False,
use_apk=False,
use_minimal_apks=False,
use_pak=False,
use_aux_elf=False):
assert not use_elf or use_output_directory
assert not (use_apk and use_pak)
cache_key = (use_output_directory, use_elf, use_apk, use_minimal_apks,
use_pak, use_aux_elf)
if cache_key not in IntegrationTest.cached_size_info:
knobs = archive.SectionSizeKnobs()
# Override for testing. Lower the bar for compacting symbols, to allow
# smaller test cases to be created.
knobs.max_same_name_alias_count = 3
args = self._CreateTestArgs()
args.elf_file = _TEST_ELF_PATH if use_elf or use_aux_elf else None
args.map_file = _TEST_MAP_PATH
args.output_directory = _TEST_OUTPUT_DIR if use_output_directory else None
args.source_directory = _TEST_SOURCE_DIR
args.tool_prefix = _TEST_TOOL_PREFIX
apk_so_path = None
size_info_prefix = None
extracted_minimal_apk_path = None
container_name = ''
if use_apk:
args.apk_file = _TEST_APK_PATH
elif use_minimal_apks:
args.minimal_apks_file = _TEST_MINIMAL_APKS_PATH
extracted_minimal_apk_path = _TEST_APK_PATH
container_name = 'Bundle.minimal.apks'
if use_apk or use_minimal_apks:
apk_so_path = _TEST_APK_SO_PATH
if args.output_directory:
if use_apk:
orig_path = _TEST_APK_PATH
else:
orig_path = _TEST_MINIMAL_APKS_PATH.replace('.minimal.apks', '.aab')
size_info_prefix = os.path.join(args.output_directory, 'size-info',
os.path.basename(orig_path))
pak_files = None
pak_info_file = None
if use_pak:
pak_files = [_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_PAK_PATH]
pak_info_file = _TEST_PAK_INFO_PATH
linker_name = 'gold'
# For simplicity, using |args| for both params. This is okay since
# |args.ssargs_file| is unassigned.
opts = archive.ContainerArchiveOptions(args, args)
with _AddMocksToPath():
build_config = {}
metadata = archive.CreateMetadata(args, linker_name, build_config)
container_list = []
raw_symbols_list = []
container, raw_symbols = archive.CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name='{}/base.apk'.format(container_name)
if container_name else '',
metadata=metadata,
map_path=args.map_file,
tool_prefix=args.tool_prefix,
output_directory=args.output_directory,
source_directory=args.source_directory,
elf_path=args.elf_file,
apk_path=args.apk_file or extracted_minimal_apk_path,
apk_so_path=apk_so_path,
pak_files=pak_files,
pak_info_file=pak_info_file,
linker_name=linker_name,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
if use_minimal_apks:
opts.analyze_native = False
args.split_name = 'not_on_demand'
args.apk_file = _TEST_NOT_ON_DEMAND_SPLIT_APK_PATH
args.elf_file = None
args.map_file = None
metadata = archive.CreateMetadata(args, None, build_config)
container, raw_symbols = archive.CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name='{}/not_on_demand.apk'.format(container_name),
metadata=metadata,
tool_prefix=args.tool_prefix,
output_directory=args.output_directory,
source_directory=args.source_directory,
apk_path=_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
args.split_name = 'on_demand'
args.apk_file = _TEST_ON_DEMAND_SPLIT_APK_PATH
metadata = archive.CreateMetadata(args, None, build_config)
container, raw_symbols = archive.CreateContainerAndSymbols(
knobs=knobs,
opts=opts,
container_name='{}/on_demand.apk?'.format(container_name),
metadata=metadata,
tool_prefix=args.tool_prefix,
output_directory=args.output_directory,
source_directory=args.source_directory,
apk_path=_TEST_ON_DEMAND_SPLIT_APK_PATH,
size_info_prefix=size_info_prefix)
container_list.append(container)
raw_symbols_list.append(raw_symbols)
IntegrationTest.cached_size_info[cache_key] = archive.CreateSizeInfo(
build_config, container_list, raw_symbols_list)
return copy.deepcopy(IntegrationTest.cached_size_info[cache_key])
def _DoArchive(self,
archive_path,
use_output_directory=True,
use_elf=False,
use_apk=False,
use_ssargs=False,
use_minimal_apks=False,
use_pak=False,
use_aux_elf=None,
debug_measures=False,
include_padding=False):
args = [
archive_path,
'--source-directory',
_TEST_SOURCE_DIR,
# --map-file ignored for use_ssargs.
'--map-file',
_TEST_MAP_PATH,
]
if use_output_directory:
# Let autodetection find output_directory when --elf-file is used.
if not use_elf:
args += ['--output-directory', _TEST_OUTPUT_DIR]
else:
args += ['--no-output-directory']
if use_ssargs:
args += ['-f', _TEST_SSARGS_PATH]
elif use_apk:
args += ['-f', _TEST_APK_PATH]
elif use_minimal_apks:
args += ['-f', _TEST_MINIMAL_APKS_PATH]
elif use_elf:
args += ['-f', _TEST_ELF_PATH]
if use_pak:
args += ['--pak-file', _TEST_APK_LOCALE_PAK_PATH,
'--pak-file', _TEST_APK_PAK_PATH,
'--pak-info-file', _TEST_PAK_INFO_PATH]
if use_aux_elf:
args += ['--aux-elf-file', _TEST_ELF_PATH]
if include_padding:
args += ['--include-padding']
_RunApp('archive', args, debug_measures=debug_measures)
def _DoArchiveTest(self,
use_output_directory=True,
use_elf=False,
use_apk=False,
use_minimal_apks=False,
use_pak=False,
use_aux_elf=False,
debug_measures=False,
include_padding=False):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name,
use_output_directory=use_output_directory,
use_elf=use_elf,
use_apk=use_apk,
use_minimal_apks=use_minimal_apks,
use_pak=use_pak,
use_aux_elf=use_aux_elf,
debug_measures=debug_measures,
include_padding=include_padding)
size_info = archive.LoadAndPostProcessSizeInfo(temp_file.name)
# Check that saving & loading is the same as directly parsing.
expected_size_info = self._CloneSizeInfo(
use_output_directory=use_output_directory,
use_elf=use_elf,
use_apk=use_apk,
use_minimal_apks=use_minimal_apks,
use_pak=use_pak,
use_aux_elf=use_aux_elf)
self.assertEqual(_AllMetadata(expected_size_info), _AllMetadata(size_info))
# Don't cluster.
expected_size_info.symbols = expected_size_info.raw_symbols
size_info.symbols = size_info.raw_symbols
expected = list(describe.GenerateLines(expected_size_info, verbose=True))
actual = list(describe.GenerateLines(size_info, verbose=True))
self.assertEqual(expected, actual)
sym_strs = (repr(sym) for sym in size_info.symbols)
stats = data_quality.DescribeSizeInfoCoverage(size_info)
if len(size_info.containers) == 1:
# If there's only one container, merge the its metadata into build_config.
merged_data_desc = describe.DescribeDict(size_info.metadata_legacy)
else:
merged_data_desc = describe.DescribeDict(size_info.build_config)
for m in _AllMetadata(size_info):
merged_data_desc.extend(describe.DescribeDict(m))
return itertools.chain(merged_data_desc, stats, sym_strs)
@_CompareWithGolden()
def test_Archive(self):
return self._DoArchiveTest(use_output_directory=False, use_elf=False)
@_CompareWithGolden()
def test_Archive_OutputDirectory(self):
return self._DoArchiveTest()
@_CompareWithGolden()
def test_Archive_Elf(self):
return self._DoArchiveTest(use_elf=True)
@_CompareWithGolden()
def test_Archive_Apk(self):
return self._DoArchiveTest(use_apk=True, use_aux_elf=True)
@_CompareWithGolden()
def test_Archive_MinimalApks(self):
return self._DoArchiveTest(use_minimal_apks=True, use_aux_elf=True)
@_CompareWithGolden()
def test_Archive_Pak_Files(self):
return self._DoArchiveTest(use_pak=True, use_aux_elf=True)
@_CompareWithGolden(name='Archive_Elf')
def test_Archive_Elf_DebugMeasures(self):
return self._DoArchiveTest(use_elf=True, debug_measures=True)
@_CompareWithGolden(name='Archive_Apk')
def test_ArchiveSparse(self):
return self._DoArchiveTest(use_apk=True,
use_aux_elf=True,
include_padding=True)
def test_SaveDeltaSizeInfo(self):
# Check that saving & loading is the same as directly parsing.
orig_info1 = self._CloneSizeInfo(use_apk=True, use_aux_elf=True)
orig_info2 = self._CloneSizeInfo(use_elf=True)
orig_delta = diff.Diff(orig_info1, orig_info2)
with tempfile.NamedTemporaryFile(suffix='.sizediff') as sizediff_file:
file_format.SaveDeltaSizeInfo(orig_delta, sizediff_file.name)
new_info1, new_info2 = archive.LoadAndPostProcessDeltaSizeInfo(
sizediff_file.name)
new_delta = diff.Diff(new_info1, new_info2)
# File format discards unchanged symbols.
orig_delta.raw_symbols = orig_delta.raw_symbols.WhereDiffStatusIs(
models.DIFF_STATUS_UNCHANGED).Inverted()
self.assertEqual(
'\n'.join(describe.GenerateLines(orig_delta, verbose=True)),
'\n'.join(describe.GenerateLines(new_delta, verbose=True)))
@_CompareWithGolden()
def test_Console(self):
with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(use_elf=True),
size_file.name)
query = [
'ShowExamples()',
'ExpandRegex("_foo_")',
'canned_queries.CategorizeGenerated()',
'canned_queries.CategorizeByChromeComponent()',
'canned_queries.LargeFiles()',
'canned_queries.TemplatesByName()',
'canned_queries.StaticInitializers()',
'canned_queries.PakByPath()',
'Print(ReadStringLiterals(elf_path={}))'.format(repr(_TEST_ELF_PATH)),
'Print(size_info, to_file=%r)' % output_file.name,
]
ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
with open(output_file.name) as f:
ret.extend(l.rstrip() for l in f)
return ret
@_CompareWithGolden()
def test_Csv(self):
with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(use_elf=True),
size_file.name)
query = [
'Csv(size_info, to_file=%r)' % output_file.name,
]
ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
with open(output_file.name) as f:
ret.extend(l.rstrip() for l in f)
return ret
@_CompareWithGolden()
def test_Diff_NullDiff(self):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(use_elf=True),
temp_file.name)
return _RunApp('diff', [temp_file.name, temp_file.name])
# Runs archive 3 times, and asserts the contents are the same each time.
def test_Idempotent(self):
prev_contents = None
for _ in range(3):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name)
contents = temp_file.read()
self.assertTrue(prev_contents is None or contents == prev_contents)
prev_contents = contents
@_CompareWithGolden()
def test_Diff_Basic(self):
size_info1 = self._CloneSizeInfo(use_pak=True)
size_info2 = self._CloneSizeInfo(use_pak=True)
size_info2.build_config['git_revision'] = 'xyz789'
container1 = size_info1.containers[0]
container2 = size_info2.containers[0]
container1.metadata = {"foo": 1, "bar": [1, 2, 3], "baz": "yes"}
container2.metadata = {"foo": 1, "bar": [1, 3], "baz": "yes"}
size_info1.raw_symbols -= size_info1.raw_symbols.WhereNameMatches(
r'pLinuxKernelCmpxchg|pLinuxKernelMemoryBarrier')
size_info2.raw_symbols -= size_info2.raw_symbols.WhereNameMatches(
r'IDS_AW_WEBPAGE_PARENTAL_|IDS_WEB_FONT_FAMILY|IDS_WEB_FONT_SIZE')
changed_sym = size_info1.raw_symbols.WhereNameMatches('Patcher::Name_')[0]
changed_sym.size -= 10
padding_sym = size_info2.raw_symbols.WhereNameMatches('symbol gap 0')[0]
padding_sym.padding += 20
padding_sym.size += 20
# Test pak symbols changing .grd files. They should not show as changed.
pak_sym = size_info2.raw_symbols.WhereNameMatches(
r'IDR_PDF_COMPOSITOR_MANIFEST')[0]
pak_sym.full_name = pak_sym.full_name.replace('.grd', '2.grd')
# Serialize & de-serialize so that name normalization runs again for the pak
# symbol.
bytesio = io.BytesIO()
file_format.SaveSizeInfo(size_info2, 'path', file_obj=bytesio)
bytesio.seek(0)
size_info2 = archive.LoadAndPostProcessSizeInfo('path', file_obj=bytesio)
d = diff.Diff(size_info1, size_info2)
d.raw_symbols = d.raw_symbols.Sorted()
self.assertEqual((1, 2, 3), d.raw_symbols.CountsByDiffStatus()[1:])
changed_sym = d.raw_symbols.WhereNameMatches('Patcher::Name_')[0]
padding_sym = d.raw_symbols.WhereNameMatches('symbol gap 0')[0]
bss_sym = d.raw_symbols.WhereInSection(models.SECTION_BSS)[0]
# Padding-only deltas should sort after all non-padding changes.
padding_idx = d.raw_symbols.index(padding_sym)
changed_idx = d.raw_symbols.index(changed_sym)
bss_idx = d.raw_symbols.index(bss_sym)
self.assertLess(changed_idx, padding_idx)
# And before bss.
self.assertLess(padding_idx, bss_idx)
return describe.GenerateLines(d, verbose=True)
@_CompareWithGolden()
def test_FullDescription(self):
size_info = self._CloneSizeInfo(use_elf=True)
# Show both clustered and non-clustered so that they can be compared.
size_info.symbols = size_info.raw_symbols
return itertools.chain(
describe.GenerateLines(size_info, verbose=True),
describe.GenerateLines(size_info.symbols._Clustered(), recursive=True,
verbose=True),
)
@_CompareWithGolden()
def test_SymbolGroupMethods(self):
all_syms = self._CloneSizeInfo(use_elf=True).symbols
global_syms = all_syms.WhereNameMatches('GLOBAL')
# Tests Filter(), Inverted(), and __sub__().
non_global_syms = global_syms.Inverted()
self.assertEqual(non_global_syms, (all_syms - global_syms))
# Tests Sorted() and __add__().
self.assertEqual(all_syms.Sorted(),
(global_syms + non_global_syms).Sorted())
# Tests GroupedByName() and __len__().
return itertools.chain(
['GroupedByName()'],
describe.GenerateLines(all_syms.GroupedByName()),
['GroupedByName(depth=1)'],
describe.GenerateLines(all_syms.GroupedByName(depth=1)),
['GroupedByName(depth=-1)'],
describe.GenerateLines(all_syms.GroupedByName(depth=-1)),
['GroupedByName(depth=1, min_count=2)'],
describe.GenerateLines(all_syms.GroupedByName(depth=1, min_count=2)),
)
@_CompareWithGolden()
def test_ArchiveContainers(self):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
self._DoArchive(temp_file.name,
use_output_directory=True,
use_ssargs=True)
size_info = archive.LoadAndPostProcessSizeInfo(temp_file.name)
# Don't cluster.
size_info.symbols = size_info.raw_symbols
sym_strs = (repr(sym) for sym in size_info.symbols)
build_config = describe.DescribeDict(size_info.build_config)
metadata = itertools.chain.from_iterable(
itertools.chain([c.name], describe.DescribeDict(c.metadata))
for c in size_info.containers)
return itertools.chain(
['BuildConfig:'],
build_config,
['Metadata:'],
metadata,
['Symbols:'],
sym_strs,
)
def main():
argv = sys.argv
if len(argv) > 1 and argv[1] == '--update':
argv.pop(0)
test_util.Golden.EnableUpdate()
for f in glob.glob(os.path.join(_TEST_DATA_DIR, '*.golden')):
os.unlink(f)
unittest.main(argv=argv, verbosity=2)
if __name__ == '__main__':
main()
| bsd-3-clause | -8,642,651,884,959,645,000 | 38.211538 | 80 | 0.62972 | false |
praekelt/vumi-go | go/api/go_api/utils.py | 1 | 1071 | """Utilities for Go API."""
from txjsonrpc.jsonrpc import BaseSubhandler
from txjsonrpc.jsonrpclib import Fault
class GoApiError(Fault):
"""Raise this to report an error from within an action handler."""
def __init__(self, msg, fault_code=400):
super(GoApiError, self).__init__(fault_code, msg)
class GoApiSubHandler(BaseSubhandler, object):
"""Base class for Go API JSON-RPC sub-handlers."""
def __init__(self, user_account_key, vumi_api):
super(GoApiSubHandler, self).__init__()
# We could get either bytes or unicode here. Decode if necessary.
if not isinstance(user_account_key, unicode):
user_account_key = user_account_key.decode('utf8')
self.user_account_key = user_account_key
self.vumi_api = vumi_api
def get_user_api(self, campaign_key):
"""Return a user_api for a particular campaign."""
if campaign_key != self.user_account_key:
raise GoApiError("Unknown campaign key.", fault_code=404)
return self.vumi_api.get_user_api(campaign_key)
| bsd-3-clause | 6,002,199,370,488,510,000 | 35.931034 | 73 | 0.661998 | false |
Crazepony/crazepony-gitbook | wiki/changelink.py | 1 | 4252 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: deleteline.py
import os
import sys
reload(sys)
#sys.setdefaultencoding('utf8')
def ChangeLineInFile(infile,isOverwrite):
isOverwrite = isOverwrite.upper()
_dir = os.path.dirname(infile)
oldbasename = os.path.basename(infile)
newbasename = oldbasename + '-new'
extname = os.path.splitext(infile)[1]
if extname != ".md":
return
outfile = _dir+'/' + newbasename + extname
infp = open(infile, "rb")
outfp = open(outfile, "wb")
lines = infp.readlines()
title = None
for line in lines:
#print type(line)
if (line.find("# ") > -1):
line2 = line.replace("# ", "## ")
outfp.writelines(line2)
elif (line.find("## ") > -1):
line2 = line.replace("## ", "### ")
outfp.writelines(line2)
elif (line.find(" > -1):
line2 = line.replace("
outfp.writelines(line2)
else:
#print line
outfp.writelines(line)
infp.close()
outfp.close()
if isOverwrite == 'Y':
#print 'remove',infile
os.remove(infile)
os.rename(outfile, infile)
outfile = infile
#print 'read %s'%infile, 'and save as %s'%outfile
print 'read %s and save as %s'%(infile, outfile)
def DelLineInFile(infile,isOverwrite):
isOverwrite = isOverwrite.upper()
_dir = os.path.dirname(infile)
oldbasename = os.path.basename(infile)
newbasename = oldbasename + '-new'
extname = os.path.splitext(infile)[1]
if extname != ".md":
return
outfile = _dir+'/' + newbasename + extname
infp = open(infile, "rb")
outfp = open(outfile, "wb")
lines = infp.readlines()
title = None
for line in lines:
#print type(line)
if (line.find("---") > -1):
pass
elif (line.find("layout:") > -1):
pass
elif line.find("title:") > -1:
title = line.replace("title:","")
print "title"+title
elif line.find("{{ page.title }}") > -1:
if title != None:
line2 = line.replace("{{ page.title }}", title)
outfp.writelines(line2)
else:
#print line
outfp.writelines(line)
infp.close()
outfp.close()
if isOverwrite == 'Y':
#print 'remove',infile
os.remove(infile)
os.rename(outfile, infile)
outfile = infile
#print 'read %s'%infile, 'and save as %s'%outfile
print 'read %s and save as %s'%(infile, outfile)
def ChangeLineInFolders():
string = u'请输入目标文件夹路径====>'
inpath = raw_input(string.encode('utf8'))
string = u'您输入是:' + inpath
print string
string = u'是否覆盖源文件(Y/N)'
isOverwrite = raw_input(string.encode('utf8'))
isOverwrite = isOverwrite.upper()
string = u'您的选择是:' + isOverwrite
print string
for (path,dirs,files) in os.walk(inpath):
for f in files:
infile = os.path.join(path, f)
#print infile
ChangeLineInFile(infile,isOverwrite)
if __name__ == "__main__":
string = u'1 修改指定目录下所有文.md件(包括子目录)'
print string
string = u'2 修改指定md文件 '
print string
string = u'请输入数字编号====>'
index = int(raw_input(string.encode('utf8')))
if index == 1:
ChangeLineInFolders()
elif index ==2:
string = u'请输入目标文件路径====>'
infile = raw_input(string.encode('utf8'))
string = u'您输入是:' + infile
print string
string = u'是否覆盖源文件(Y/N)'
isOverwrite = raw_input(string.encode('utf8'))
string = u'您的选择是:' + isOverwrite.upper()
print string
DelLineInFile(infile, isOverwrite)
else:
string = u'编号输入错误,程序退出'
print string
sys.exit()
raw_input("press Enter to exit")
sys.exit()
| apache-2.0 | -3,856,531,406,230,458,000 | 27.41958 | 74 | 0.534449 | false |
plone/plone.server | src/plone.server/plone/server/commands/cli.py | 1 | 2320 | from plone.server import app_settings
from plone.server import logger
from plone.server.commands import Command
from plone.server.testing import PloneRequester
from plone.server.testing import TESTING_SETTINGS
from pprint import pformat
import asyncio
import logging
import threading
import time
def format_headers(headers):
return '\n'.join(['\t{}: {}'.format(n, v)
for n, v in headers.items()])
class CliCommand(Command):
description = 'Plone server CLI utility'
def get_parser(self):
parser = super(CliCommand, self).get_parser()
parser.add_argument('-m', '--method', nargs='?',
default='get', help='HTTP method')
parser.add_argument('-p', '--path', nargs='?',
default='/', help='Path to endpoint')
parser.add_argument('-b', '--body', default=u'',
help='Request body')
return parser
def run(self, arguments, settings, app):
app_settings['root_user']['password'] = TESTING_SETTINGS['root_user']['password']
loop = app.loop
handler = app.make_handler(keep_alive_on=False)
loop.run_until_complete(loop.create_server(
handler,
'127.0.0.1',
5777))
def loop_in_thread(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
t = threading.Thread(target=loop_in_thread, args=(loop,))
t.start()
req_body = arguments.body or ''
requester = PloneRequester('http://localhost:5777')
resp = requester(
arguments.method, arguments.path,
data=req_body or None)
print('''
Path: {path}
Method: {method}
Status code: {code}
Request Headers:
{request_headers}
Response Headers:
{response_headers}
Request body:
{request_body}
Response body:
{body}
'''.format(
path=arguments.path,
method=arguments.method,
code=resp.status_code,
request_headers=format_headers(resp.request.headers),
response_headers=format_headers(resp.headers),
body=pformat(resp.json()),
request_body=req_body
))
loop.call_soon_threadsafe(loop.stop)
while(loop.is_running()):
time.sleep(1)
| bsd-2-clause | 1,884,047,753,443,460,400 | 26.951807 | 89 | 0.590948 | false |
tp199911/PyTables | tables/attributeset.py | 1 | 25523 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: May 26, 2003
# Author: Francesc Alted - [email protected]
#
# $Id$
#
########################################################################
"""Here is defined the AttributeSet class."""
import re
import sys
import warnings
import cPickle
import numpy
from tables import hdf5extension
from tables.utils import SizeType
from tables.registry import class_name_dict
from tables.exceptions import ClosedNodeError, PerformanceWarning
from tables.path import check_attribute_name
from tables.undoredo import attr_to_shadow
from tables.filters import Filters
from tables._past import previous_api
# System attributes
SYS_ATTRS = ["CLASS", "VERSION", "TITLE", "NROWS", "EXTDIM",
"ENCODING", "PYTABLES_FORMAT_VERSION",
"FLAVOR", "FILTERS", "AUTO_INDEX",
"DIRTY", "NODE_TYPE", "NODE_TYPE_VERSION",
"PSEUDOATOM"]
# Prefixes of other system attributes
SYS_ATTRS_PREFIXES = ["FIELD_"]
# RO_ATTRS will be disabled and let the user modify them if they
# want to. The user is still not allowed to remove or rename
# system attributes. Francesc Alted 2004-12-19
# Read-only attributes:
# RO_ATTRS = ["CLASS", "FLAVOR", "VERSION", "NROWS", "EXTDIM",
# "PYTABLES_FORMAT_VERSION", "FILTERS",
# "NODE_TYPE", "NODE_TYPE_VERSION"]
# RO_ATTRS = []
# The next attributes are not meant to be copied during a Node copy process
SYS_ATTRS_NOTTOBECOPIED = ["CLASS", "VERSION", "TITLE", "NROWS", "EXTDIM",
"PYTABLES_FORMAT_VERSION", "FILTERS", "ENCODING"]
# Attributes forced to be copied during node copies
FORCE_COPY_CLASS = ['CLASS', 'VERSION']
# Regular expression for column default values.
_field_fill_re = re.compile('^FIELD_[0-9]+_FILL$')
# Regular expression for fixing old pickled filters.
_old_filters_re = re.compile(br'\(([ic])tables\.Leaf\n')
# Fixed version of the previous string.
_new_filters_sub = br'(\1tables.filters\n'
def issysattrname(name):
"Check if a name is a system attribute or not"
if (name in SYS_ATTRS or
numpy.prod([name.startswith(prefix)
for prefix in SYS_ATTRS_PREFIXES])):
return True
else:
return False
class AttributeSet(hdf5extension.AttributeSet, object):
"""Container for the HDF5 attributes of a Node.
This class provides methods to create new HDF5 node attributes,
and to get, rename or delete existing ones.
Like in Group instances (see :ref:`GroupClassDescr`), AttributeSet
instances make use of the *natural naming* convention, i.e. you can
access the attributes on disk as if they were normal Python
attributes of the AttributeSet instance.
This offers the user a very convenient way to access HDF5 node
attributes. However, for this reason and in order not to pollute the
object namespace, one can not assign *normal* attributes to
AttributeSet instances, and their members use names which start by
special prefixes as happens with Group objects.
.. rubric:: Notes on native and pickled attributes
The values of most basic types are saved as HDF5 native data in the
HDF5 file. This includes Python bool, int, float, complex and str
(but not long nor unicode) values, as well as their NumPy scalar
versions and homogeneous or *structured* NumPy arrays of them. When
read, these values are always loaded as NumPy scalar or array
objects, as needed.
For that reason, attributes in native HDF5 files will be always
mapped into NumPy objects. Specifically, a multidimensional
attribute will be mapped into a multidimensional ndarray and a
scalar will be mapped into a NumPy scalar object (for example, a
scalar H5T_NATIVE_LLONG will be read and returned as a numpy.int64
scalar).
However, other kinds of values are serialized using pickle, so you
only will be able to correctly retrieve them using a Python-aware
HDF5 library. Thus, if you want to save Python scalar values and
make sure you are able to read them with generic HDF5 tools, you
should make use of *scalar or homogeneous/structured array NumPy
objects* (for example, numpy.int64(1) or numpy.array([1, 2, 3],
dtype='int16')).
One more advice: because of the various potential difficulties in
restoring a Python object stored in an attribute, you may end up
getting a pickle string where a Python object is expected. If this
is the case, you may wish to run pickle.loads() on that string to
get an idea of where things went wrong, as shown in this example::
>>> import os, tempfile
>>> import tables
>>>
>>> class MyClass(object):
... foo = 'bar'
...
>>> myObject = MyClass() # save object of custom class in HDF5 attr
>>> h5fname = tempfile.mktemp(suffix='.h5')
>>> h5f = tables.open_file(h5fname, 'w')
>>> h5f.root._v_attrs.obj = myObject # store the object
>>> print(h5f.root._v_attrs.obj.foo) # retrieve it
bar
>>> h5f.close()
>>>
>>> del MyClass, myObject # delete class of object and reopen file
>>> h5f = tables.open_file(h5fname, 'r')
>>> print(repr(h5f.root._v_attrs.obj))
'ccopy_reg\\n_reconstructor...
>>> import pickle # let's unpickle that to see what went wrong
>>> pickle.loads(h5f.root._v_attrs.obj)
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'MyClass'
>>> # So the problem was not in the stored object,
... # but in the *environment* where it was restored.
... h5f.close()
>>> os.remove(h5fname)
.. rubric:: Notes on AttributeSet methods
Note that this class overrides the __getattr__(), __setattr__() and
__delattr__() special methods. This allows you to read, assign or
delete attributes on disk by just using the next constructs::
leaf.attrs.myattr = 'str attr' # set a string (native support)
leaf.attrs.myattr2 = 3 # set an integer (native support)
leaf.attrs.myattr3 = [3, (1, 2)] # a generic object (Pickled)
attrib = leaf.attrs.myattr # get the attribute ``myattr``
del leaf.attrs.myattr # delete the attribute ``myattr``
In addition, the dictionary-like __getitem__(), __setitem__() and
__delitem__() methods are available, so you may write things like
this::
for name in node._v_attrs._f_list():
print("name: %s, value: %s" % (name, node._v_attrs[name]))
Use whatever idiom you prefer to access the attributes.
If an attribute is set on a target node that already has a large
number of attributes, a PerformanceWarning will be issued.
.. rubric:: AttributeSet attributes
.. attribute:: _v_attrnames
A list with all attribute names.
.. attribute:: _v_attrnamessys
A list with system attribute names.
.. attribute:: _v_attrnamesuser
A list with user attribute names.
.. attribute:: _v_unimplemented
A list of attribute names with unimplemented native HDF5 types.
"""
def _g_getnode(self):
return self._v__nodefile._get_node(self._v__nodepath)
_v_node = property(_g_getnode, None, None,
"The :class:`Node` instance this attribute set is "
"associated with.")
def __init__(self, node):
"""Create the basic structures to keep the attribute information.
Reads all the HDF5 attributes (if any) on disk for the node "node".
Parameters
----------
node
The parent node
"""
# Refuse to create an instance of an already closed node
if not node._v_isopen:
raise ClosedNodeError("the node for attribute set is closed")
dict_ = self.__dict__
self._g_new(node)
dict_["_v__nodefile"] = node._v_file
dict_["_v__nodepath"] = node._v_pathname
dict_["_v_attrnames"] = self._g_list_attr(node)
# The list of unimplemented attribute names
dict_["_v_unimplemented"] = []
# Get the file version format. This is an optimization
# in order to avoid accessing it too much.
try:
format_version = node._v_file.format_version
except AttributeError:
parsed_version = None
else:
if format_version == 'unknown':
parsed_version = None
else:
parsed_version = tuple(map(int, format_version.split('.')))
dict_["_v__format_version"] = parsed_version
# Split the attribute list in system and user lists
dict_["_v_attrnamessys"] = []
dict_["_v_attrnamesuser"] = []
for attr in self._v_attrnames:
# put the attributes on the local dictionary to allow
# tab-completion
self.__getattr__(attr)
if issysattrname(attr):
self._v_attrnamessys.append(attr)
else:
self._v_attrnamesuser.append(attr)
# Sort the attributes
self._v_attrnames.sort()
self._v_attrnamessys.sort()
self._v_attrnamesuser.sort()
def _g_update_node_location(self, node):
"""Updates the location information about the associated `node`."""
dict_ = self.__dict__
dict_['_v__nodefile'] = node._v_file
dict_['_v__nodepath'] = node._v_pathname
# hdf5extension operations:
self._g_new(node)
_g_updateNodeLocation = previous_api(_g_update_node_location)
def _f_list(self, attrset='user'):
"""Get a list of attribute names.
The attrset string selects the attribute set to be used. A
'user' value returns only user attributes (this is the default).
A 'sys' value returns only system attributes. Finally, 'all'
returns both system and user attributes.
"""
if attrset == "user":
return self._v_attrnamesuser[:]
elif attrset == "sys":
return self._v_attrnamessys[:]
elif attrset == "all":
return self._v_attrnames[:]
def __getattr__(self, name):
"""Get the attribute named "name"."""
# If attribute does not exist, raise AttributeError
if not name in self._v_attrnames:
raise AttributeError("Attribute '%s' does not exist in node: "
"'%s'" % (name, self._v__nodepath))
# Read the attribute from disk. This is an optimization to read
# quickly system attributes that are _string_ values, but it
# takes care of other types as well as for example NROWS for
# Tables and EXTDIM for EArrays
format_version = self._v__format_version
value = self._g_getattr(self._v_node, name)
# Check whether the value is pickled
# Pickled values always seems to end with a "."
maybe_pickled = (
isinstance(value, numpy.generic) and # NumPy scalar?
value.dtype.type == numpy.bytes_ and # string type?
value.itemsize > 0 and value.endswith(b'.'))
if (maybe_pickled and value in [b"0", b"0."]):
# Workaround for a bug in many versions of Python (starting
# somewhere after Python 2.6.1). See ticket #253.
retval = value
elif (maybe_pickled and _field_fill_re.match(name)
and format_version == (1, 5)):
# This format was used during the first 1.2 releases, just
# for string defaults.
try:
retval = cPickle.loads(value)
retval = numpy.array(retval)
except ImportError:
retval = None # signal error avoiding exception
elif maybe_pickled and name == 'FILTERS' and format_version < (2, 0):
# This is a big hack, but we don't have other way to recognize
# pickled filters of PyTables 1.x files.
value = _old_filters_re.sub(_new_filters_sub, value, 1)
retval = cPickle.loads(value) # pass unpickling errors through
elif maybe_pickled:
try:
retval = cPickle.loads(value)
# except cPickle.UnpicklingError:
# It seems that pickle may raise other errors than UnpicklingError
# Perhaps it would be better just an "except:" clause?
# except (cPickle.UnpicklingError, ImportError):
# Definitely (see SF bug #1254636)
except:
# ivb (2005-09-07): It is too hard to tell
# whether the unpickling failed
# because of the string not being a pickle one at all,
# because of a malformed pickle string,
# or because of some other problem in object reconstruction,
# thus making inconvenient even the issuing of a warning here.
# The documentation contains a note on this issue,
# explaining how the user can tell where the problem was.
retval = value
# Additional check for allowing a workaround for #307
if isinstance(retval, unicode) and retval == u'':
retval = numpy.array(retval)[()]
elif name == 'FILTERS' and format_version >= (2, 0):
retval = Filters._unpack(value)
elif name == 'TITLE' and not isinstance(value, str):
if sys.version_info[0] < 3:
# unicode is OK for TITLE
retval = value
else:
retval = value.decode('utf-8')
elif (issysattrname(name) and isinstance(value, (bytes, unicode)) and
not isinstance(value, str) and not _field_fill_re.match(name)):
# system attributes should always be str
if sys.version_info[0] < 3:
retval = value.encode()
else:
# python 3, bytes and not "FIELD_[0-9]+_FILL"
retval = value.decode('utf-8')
else:
retval = value
# Put this value in local directory
self.__dict__[name] = retval
return retval
def _g__setattr(self, name, value):
"""Set a PyTables attribute.
Sets a (maybe new) PyTables attribute with the specified `name`
and `value`. If the attribute already exists, it is simply
replaced.
It does not log the change.
"""
# Save this attribute to disk
# (overwriting an existing one if needed)
stvalue = value
if issysattrname(name):
if name in ["EXTDIM", "AUTO_INDEX", "DIRTY", "NODE_TYPE_VERSION"]:
stvalue = numpy.array(value, dtype=numpy.int32)
value = stvalue[()]
elif name == "NROWS":
stvalue = numpy.array(value, dtype=SizeType)
value = stvalue[()]
elif name == "FILTERS" and self._v__format_version >= (2, 0):
stvalue = value._pack()
# value will remain as a Filters instance here
# Convert value from a Python scalar into a NumPy scalar
# (only in case it has not been converted yet)
# Fixes ticket #59
if (stvalue is value and
type(value) in (bool, bytes, int, float, complex, unicode,
numpy.unicode_)):
# Additional check for allowing a workaround for #307
if isinstance(value, unicode) and len(value) == 0:
stvalue = numpy.array(u'')
else:
stvalue = numpy.array(value)
value = stvalue[()]
self._g_setattr(self._v_node, name, stvalue)
# New attribute or value. Introduce it into the local
# directory
self.__dict__[name] = value
# Finally, add this attribute to the list if not present
attrnames = self._v_attrnames
if not name in attrnames:
attrnames.append(name)
attrnames.sort()
if issysattrname(name):
attrnamessys = self._v_attrnamessys
attrnamessys.append(name)
attrnamessys.sort()
else:
attrnamesuser = self._v_attrnamesuser
attrnamesuser.append(name)
attrnamesuser.sort()
def __setattr__(self, name, value):
"""Set a PyTables attribute.
Sets a (maybe new) PyTables attribute with the specified `name`
and `value`. If the attribute already exists, it is simply
replaced.
A ``ValueError`` is raised when the name starts with a reserved
prefix or contains a ``/``. A `NaturalNameWarning` is issued if
the name is not a valid Python identifier. A
`PerformanceWarning` is issued when the recommended maximum
number of attributes in a node is going to be exceeded.
"""
nodefile = self._v__nodefile
attrnames = self._v_attrnames
# Check for name validity
check_attribute_name(name)
nodefile._check_writable()
# Check if there are too many attributes.
max_node_attrs = nodefile.params['MAX_NODE_ATTRS']
if len(attrnames) >= max_node_attrs:
warnings.warn("""\
node ``%s`` is exceeding the recommended maximum number of attributes (%d);\
be ready to see PyTables asking for *lots* of memory and possibly slow I/O"""
% (self._v__nodepath, max_node_attrs),
PerformanceWarning)
undo_enabled = nodefile.is_undo_enabled()
# Log old attribute removal (if any).
if undo_enabled and (name in attrnames):
self._g_del_and_log(name)
# Set the attribute.
self._g__setattr(name, value)
# Log new attribute addition.
if undo_enabled:
self._g_log_add(name)
def _g_log_add(self, name):
self._v__nodefile._log('ADDATTR', self._v__nodepath, name)
_g_logAdd = previous_api(_g_log_add)
def _g_del_and_log(self, name):
nodefile = self._v__nodefile
node_pathname = self._v__nodepath
# Log *before* moving to use the right shadow name.
nodefile._log('DELATTR', node_pathname, name)
attr_to_shadow(nodefile, node_pathname, name)
_g_delAndLog = previous_api(_g_del_and_log)
def _g__delattr(self, name):
"""Delete a PyTables attribute.
Deletes the specified existing PyTables attribute.
It does not log the change.
"""
# Delete the attribute from disk
self._g_remove(self._v_node, name)
# Delete the attribute from local lists
self._v_attrnames.remove(name)
if name in self._v_attrnamessys:
self._v_attrnamessys.remove(name)
else:
self._v_attrnamesuser.remove(name)
# Delete the attribute from the local directory
# closes (#1049285)
del self.__dict__[name]
def __delattr__(self, name):
"""Delete a PyTables attribute.
Deletes the specified existing PyTables attribute from the
attribute set. If a nonexistent or system attribute is
specified, an ``AttributeError`` is raised.
"""
nodefile = self._v__nodefile
# Check if attribute exists
if name not in self._v_attrnames:
raise AttributeError(
"Attribute ('%s') does not exist in node '%s'"
% (name, self._v__nodepath))
nodefile._check_writable()
# Remove the PyTables attribute or move it to shadow.
if nodefile.is_undo_enabled():
self._g_del_and_log(name)
else:
self._g__delattr(name)
def __getitem__(self, name):
"""The dictionary like interface for __getattr__()."""
try:
return self.__getattr__(name)
except AttributeError:
# Capture the AttributeError an re-raise a KeyError one
raise KeyError(
"Attribute ('%s') does not exist in node '%s'"
% (name, self._v__nodepath))
def __setitem__(self, name, value):
"""The dictionary like interface for __setattr__()."""
self.__setattr__(name, value)
def __delitem__(self, name):
"""The dictionary like interface for __delattr__()."""
try:
self.__delattr__(name)
except AttributeError:
# Capture the AttributeError an re-raise a KeyError one
raise KeyError(
"Attribute ('%s') does not exist in node '%s'"
% (name, self._v__nodepath))
def __contains__(self, name):
"""Is there an attribute with that name?
A true value is returned if the attribute set has an attribute
with the given name, false otherwise.
"""
return name in self._v_attrnames
def _f_rename(self, oldattrname, newattrname):
"""Rename an attribute from oldattrname to newattrname."""
if oldattrname == newattrname:
# Do nothing
return
# First, fetch the value of the oldattrname
attrvalue = getattr(self, oldattrname)
# Now, create the new attribute
setattr(self, newattrname, attrvalue)
# Finally, remove the old attribute
delattr(self, oldattrname)
def _g_copy(self, newset, set_attr=None, copyclass=False):
"""Copy set attributes.
Copies all user and allowed system PyTables attributes to the
given attribute set, replacing the existing ones.
You can specify a *bound* method of the destination set that
will be used to set its attributes. Else, its `_g__setattr`
method will be used.
Changes are logged depending on the chosen setting method. The
default setting method does not log anything.
.. versionchanged:: 3.0
The *newSet* parameter has been renamed into *newset*.
.. versionchanged:: 3.0
The *copyClass* parameter has been renamed into *copyclass*.
"""
copysysattrs = newset._v__nodefile.params['PYTABLES_SYS_ATTRS']
if set_attr is None:
set_attr = newset._g__setattr
for attrname in self._v_attrnamesuser:
# Do not copy the unimplemented attributes.
if attrname not in self._v_unimplemented:
set_attr(attrname, getattr(self, attrname))
# Copy the system attributes that we are allowed to.
if copysysattrs:
for attrname in self._v_attrnamessys:
if ((attrname not in SYS_ATTRS_NOTTOBECOPIED) and
# Do not copy the FIELD_ attributes in tables as this can
# be really *slow* (don't know exactly the reason).
# See #304.
not attrname.startswith("FIELD_")):
set_attr(attrname, getattr(self, attrname))
# Copy CLASS and VERSION attributes if requested
if copyclass:
for attrname in FORCE_COPY_CLASS:
if attrname in self._v_attrnamessys:
set_attr(attrname, getattr(self, attrname))
def _f_copy(self, where):
"""Copy attributes to the where node.
Copies all user and certain system attributes to the given where
node (a Node instance - see :ref:`NodeClassDescr`), replacing
the existing ones.
"""
# AttributeSet must be defined in order to define a Node.
# However, we need to know Node here.
# Using class_name_dict avoids a circular import.
if not isinstance(where, class_name_dict['Node']):
raise TypeError("destination object is not a node: %r" % (where,))
self._g_copy(where._v_attrs, where._v_attrs.__setattr__)
def _g_close(self):
# Nothing will be done here, as the existing instance is completely
# operative now.
pass
def __str__(self):
"""The string representation for this object."""
# The pathname
pathname = self._v__nodepath
# Get this class name
classname = self.__class__.__name__
# The attribute names
attrnumber = len([n for n in self._v_attrnames])
return "%s._v_attrs (%s), %s attributes" % \
(pathname, classname, attrnumber)
def __repr__(self):
"""A detailed string representation for this object."""
# print additional info only if there are attributes to show
attrnames = [n for n in self._v_attrnames]
if len(attrnames):
rep = ['%s := %r' % (attr, getattr(self, attr))
for attr in attrnames]
attrlist = '[%s]' % (',\n '.join(rep))
return "%s:\n %s" % (str(self), attrlist)
else:
return str(self)
class NotLoggedAttributeSet(AttributeSet):
def _g_log_add(self, name):
pass
_g_logAdd = previous_api(_g_log_add)
def _g_del_and_log(self, name):
self._g__delattr(name)
_g_delAndLog = previous_api(_g_del_and_log)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| bsd-3-clause | 4,261,707,770,689,709,000 | 35.989855 | 78 | 0.592368 | false |
mxklabs/mxklabs-python | mxklabs/dimacs/dimacs.py | 1 | 10444 | import sys
import string
import unittest
class Dimacs(object):
def __init__(self, clauses):
self.clauses = clauses
self.num_clauses = len(self.clauses)
self.num_vars = max([0] + [max([0] + [abs(l) for l in c]) for c in self.clauses])
class DimacsParser(object):
def __init__(self, filename=None, file=None, string=None):
self.in_filename = filename
self.in_file = file
self.in_string = string
self.line_no = None
self.num_vars = 0
self.num_clauses = 0
self.problem_statement_line = 0
self.problem_statement_num_vars_column = 0
self.problem_statement_num_clauses_column = 0
self.seen_clause = False
self.seen_problem_statement = False
self.clauses = []
self.max_var = 0
self.__parse()
def get_num_vars(self):
return self.num_vars
def get_num_clauses(self):
return len(self.clauses)
def get_clauses(self):
return self.clauses
def __process_problem_statement(self, num_vars, num_clauses):
self.num_vars = num_vars
self.num_clauses = num_clauses
self.seen_problem_statement = True
def __process_start_of_clause(self):
self.clauses.append([])
def __process_clause_literal(self, literal):
self.clauses[-1].append(literal)
abs_literal = abs(literal)
if abs_literal > self.max_var:
self.max_var = abs_literal
def __process_end_of_clause(self ):
pass
def __parse(self):
self.line_no = 1
if self.in_filename is not None:
file = open(self.in_filename, 'r')
for line in file:
self.__process_line(line)
file.close()
if self.in_file is not None:
for line in self.in_file:
self.__process_line(line)
if self.in_string is not None:
for line in self.in_string.split('\n'):
self.__process_line(line)
#max_var = max([max([abs(l) for l in c]) for c in self.clauses])
if self.num_vars < self.max_var:
self.__process_error_with_location("the declared number of variables (%d) is smaller than the actual number of variables (%d)" % (
self.num_vars,
self.max_var),
self.problem_statement_line,
self.problem_statement_num_vars_column)
if self.num_clauses != len(self.clauses):
self.__process_error_with_location("the declared number of clauses (%d) does not match the actual number of clauses (%d)" % (
self.num_clauses,
len(self.clauses)),
self.problem_statement_line,
self.problem_statement_num_clauses_column)
if not self.seen_problem_statement:
self.__process_error("missing problem statement")
def __process_line(self, line):
if len(line) > 0:
if line[0] == 'c':
pass
elif not self.seen_problem_statement and line[0] == 'p':
#tokens = line.split()
line_frags = self.__split_string(line)
if len(line_frags) != 4:
self.__raise_syntax_error(self.line_no, 1)
elif line_frags[0][0] != 'p':
self.__raise_syntax_error(self.line_no, line_frags[0][1][0]+1)
elif line_frags[1][0] != 'cnf':
self.__raise_syntax_error(self.line_no, line_frags[1][1][0]+1)
else:
self.problem_statement_line = self.line_no
self.problem_statement_num_vars_column = line_frags[2][1][0]+1
self.problem_statement_num_clauses_column = line_frags[3][1][0]+1
try:
num_vars = int(line_frags[2][0])
try:
num_clauses = int(line_frags[3][0])
self.__process_problem_statement(num_vars, num_clauses)
except ValueError:
self.__raise_syntax_error(self.line_no, line_frags[3][1][0]+1)
except ValueError:
self.__raise_syntax_error(self.line_no, line_frags[2][1][0]+1)
elif self.seen_problem_statement:
line_frags = self.__split_string(line)
for token, (col_start, _) in line_frags:
try:
literal = int(token)
if literal == 0:
if self.seen_clause:
self.__process_end_of_clause()
self.seen_clause = False
else: # literal != 0
if not self.seen_clause:
self.__process_start_of_clause()
self.seen_clause = True
self.__process_clause_literal(literal)
except ValueError:
self.__raise_syntax_error(self.line_no, col_start+1)
else:
self.__process_error_with_location("expected a problem statement or comment on this line", self.line_no, 1)
self.line_no += 1
def __process_error(self, error_msg):
raise Exception("error: %s" % error_msg)
def __process_error_with_location(self, msg, line, col):
self.__process_error("%s (line %d, column %d)" % (msg, line, col))
def __raise_syntax_error(self, line, col):
self.__process_error_with_location("invalid syntax", line, col)
''' Tokenise tokens in a string as seperated by whitespace. Return
list of tuples of words and the column that word starts. '''
@staticmethod
def __split_string(s):
result = []
was_in_token = False
token_start = None
for i in range(len(s)):
is_in_token = s[i] not in string.whitespace
if not was_in_token and is_in_token:
token_start = i
elif was_in_token and not is_in_token:
result.append((s[token_start:i], (token_start,i)))
was_in_token = is_in_token
if was_in_token:
result.append((s[token_start], (token_start,len(s))))
return result
def read(filename=None, file=None, string=None):
dimacs_parser = DimacsParser(filename=filename, file=file, string=string)
return Dimacs(clauses=dimacs_parser.clauses)
class Tests(unittest.TestCase):
''' Check a good instance parses without problems. '''
def test_pos1(self):
string = \
"c simple_v3_c2.cnf\n" + \
"p cnf 3 2\n" + \
"1 -3 0\n" + \
"2 3 -1 0\n"
sat = DimacsParser(string=string)
self.assertEqual(3, sat.get_num_vars())
self.assertEqual(2, sat.get_num_clauses())
self.assertEqual([[1,-3],[2,3,-1]], sat.get_clauses())
''' Invalid first token. '''
def test_invalid_problem_statement_1(self):
string = "pt cnf 3 1\n2 3 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 1, column 1)", str(c.exception))
''' Invalid second token. '''
def test_invalid_problem_statement_2(self):
string = "p dnf 3 1\n2 3 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 1, column 3)", str(c.exception))
''' Letter instead of num_vars. '''
def test_invalid_problem_statement_3(self):
string = "p cnf a 1\n2 3 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 1, column 7)", str(c.exception))
''' Letter instead of num_clauses. '''
def test_invalid_problem_statement_4(self):
string = "p cnf 3 a\n2 3 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 1, column 9)", str(c.exception))
''' Too many tokens. '''
def test_invalid_problem_statement_5(self):
string = "p cnf 3 1 4\n2 3 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 1, column 1)", str(c.exception))
''' Using letters. '''
def test_invalid_syntax_3(self):
string = "p cnf 3 1\n2 a -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 2, column 3)", str(c.exception))
''' Num clauses mismatch. '''
def test_invalid_num_clauses(self):
string = "p cnf 3 7\n2 3 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: the declared number of clauses (7) does not match the actual number of clauses (1) (line 1, column 9)", str(c.exception))
''' Invalid variable number. '''
def test_invalid_variable_num(self):
string = "p cnf 3 2\n2 4 -1 0\n2 5 -8 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: the declared number of variables (3) is smaller than the actual number of variables (8) (line 1, column 7)", str(c.exception))
''' Clause before problem statement. '''
def test_clause_before_problem_statement(self):
string = "c comment\n2 -1 0\np cnf 1 2\n1 -2 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: expected a problem statement or comment on this line (line 2, column 1)", str(c.exception))
''' Too many problem statements. '''
def test_too_many_problem_statements(self):
string = "p cnf 3 2\np cnf 3 2\n2 4 -1 0\n2 5 -1 0\n"
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: invalid syntax (line 2, column 1)", str(c.exception))
''' Empty. '''
def test_missing_problem_statement(self):
string = ""
with self.assertRaises(Exception) as c:
sat = DimacsParser(string=string)
self.assertEqual("error: missing problem statement", str(c.exception))
''' From web site. '''
def test_website_instance(self):
string = \
"c This is an example Boolean Satisfiability instance with 3 variables ('1', '2' and '3') consisting of a\n" + \
"c logical AND of 4 clauses ('1 OR NOT 2', '2 OR NOT 1', '1 OR 3' and '1 OR NOT 3'). A line starting with 'c'\n" + \
"c indicates a comment line whereas the line starting with 'p' declares the size of the Boolean Satisifiability\n" + \
"c problem by stating number of variables (3) and clauses (4) involved. The remaining lines each specify a\n" + \
"c clause (terminated with a '0' character).\n" + \
" \n" + \
" s\n" + \
"p cnf 3 4\n" + \
"1 -2 0\n" + \
"2 -1 0\n" + \
"1 3 0\n" + \
"1 -3 0\n"
sat = DimacsParser(string=string)
self.assertEqual(3, sat.get_num_vars())
self.assertEqual(4, sat.get_num_clauses())
self.assertEqual([[1,-2],[2, -1],[1, 3],[1,-3]], sat.get_clauses())
| gpl-3.0 | 8,759,660,011,890,471,000 | 35.013793 | 155 | 0.619207 | false |
marzig76/blexplor | block.py | 1 | 1804 | """
This module contains a class for parsing a bitcoin block.
Author: Mike Marzigliano
"""
import datetime
from tx import tx
from blockutil import *
class block(object):
"""This class is for parsing the block header and block data fields."""
def __init__(self, blockstream):
"""
Parse the values for all fields in the block header and block.
Arguemnts:
blockstream - the stream of data to parse
"""
self.magic_number = parse_int(blockstream, 4)
self.block_size = parse_int(blockstream, 4)
self.version = parse_int(blockstream, 4)
self.prev_hash = parse_hash(blockstream)
self.merkel_root = parse_hash(blockstream)
self.time = parse_int(blockstream, 4)
self.target = parse_int(blockstream, 4)
self.nonce = parse_int(blockstream, 4)
self.txcount = compact_size(blockstream)
self.txs = []
for i in range(0, self.txcount):
self.txs.append(tx(blockstream))
def __str__(self):
"""Build and return a string representing the block data."""
blockstring = (
'\nMagic Number:\t' + hex(self.magic_number) +
'\nBlock Size:\t' + str(self.block_size) +
'\nBlock Version:\t' + str(self.version) +
'\nPrevious Hash:\t' + self.prev_hash +
'\nMerkel Root:\t' + self.merkel_root +
'\nTime:\t' + (
datetime.datetime.fromtimestamp(
self.time
).strftime('%Y-%m-%d %H:%M:%S')) +
'\nTarget:\t' + hex(self.target) +
'\nNonce:\t' + str(self.nonce) +
'\nTransaction Count:\t' + str(self.txcount)
)
for i in self.txs:
blockstring += str(i)
return blockstring
| gpl-3.0 | -5,536,994,658,962,147,000 | 31.214286 | 75 | 0.563747 | false |
NINAnor/QGIS | python/plugins/processing/algs/qgis/VoronoiPolygons.py | 1 | 9670 | # -*- coding: utf-8 -*-
"""
***************************************************************************
VoronoiPolygons.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sets import Set
from qgis.core import QGis, QgsFeatureRequest, QgsFeature, QgsGeometry, QgsPoint
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
import voronoi
from processing.tools import dataobjects, vector
class VoronoiPolygons(GeoAlgorithm):
INPUT = 'INPUT'
BUFFER = 'BUFFER'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Voronoi polygons')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterNumber(self.BUFFER,
self.tr('Buffer region'), 0.0, 100.0, 0.0))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Voronoi polygons')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
buf = self.getParameterValue(self.BUFFER)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
layer.pendingFields().toList(), QGis.WKBPolygon, layer.crs())
inFeat = QgsFeature()
outFeat = QgsFeature()
extent = layer.extent()
extraX = extent.height() * (buf / 100.0)
extraY = extent.width() * (buf / 100.0)
height = extent.height()
width = extent.width()
c = voronoi.Context()
pts = []
ptDict = {}
ptNdx = -1
features = vector.features(layer)
for inFeat in features:
geom = QgsGeometry(inFeat.geometry())
point = geom.asPoint()
x = point.x() - extent.xMinimum()
y = point.y() - extent.yMinimum()
pts.append((x, y))
ptNdx += 1
ptDict[ptNdx] = inFeat.id()
if len(pts) < 3:
raise GeoAlgorithmExecutionException(
self.tr('Input file should contain at least 3 points. Choose '
'another file and try again.'))
uniqueSet = Set(item for item in pts)
ids = [pts.index(item) for item in uniqueSet]
sl = voronoi.SiteList([voronoi.Site(i[0], i[1], sitenum=j) for (j,
i) in enumerate(uniqueSet)])
voronoi.voronoi(sl, c)
inFeat = QgsFeature()
current = 0
total = 100.0 / float(len(c.polygons))
for (site, edges) in c.polygons.iteritems():
request = QgsFeatureRequest().setFilterFid(ptDict[ids[site]])
inFeat = layer.getFeatures(request).next()
lines = self.clip_voronoi(edges, c, width, height, extent, extraX, extraY)
geom = QgsGeometry.fromMultiPoint(lines)
geom = QgsGeometry(geom.convexHull())
outFeat.setGeometry(geom)
outFeat.setAttributes(inFeat.attributes())
writer.addFeature(outFeat)
current += 1
progress.setPercentage(int(current * total))
del writer
def clip_voronoi(self, edges, c, width, height, extent, exX, exY):
"""Clip voronoi function based on code written for Inkscape.
Copyright (C) 2010 Alvin Penner, [email protected]
"""
def clip_line(x1, y1, x2, y2, w, h, x, y):
if x1 < 0 - x and x2 < 0 - x:
return [0, 0, 0, 0]
if x1 > w + x and x2 > w + x:
return [0, 0, 0, 0]
if x1 < 0 - x:
y1 = (y1 * x2 - y2 * x1) / (x2 - x1)
x1 = 0 - x
if x2 < 0 - x:
y2 = (y1 * x2 - y2 * x1) / (x2 - x1)
x2 = 0 - x
if x1 > w + x:
y1 = y1 + (w + x - x1) * (y2 - y1) / (x2 - x1)
x1 = w + x
if x2 > w + x:
y2 = y1 + (w + x - x1) * (y2 - y1) / (x2 - x1)
x2 = w + x
if y1 < 0 - y and y2 < 0 - y:
return [0, 0, 0, 0]
if y1 > h + y and y2 > h + y:
return [0, 0, 0, 0]
if x1 == x2 and y1 == y2:
return [0, 0, 0, 0]
if y1 < 0 - y:
x1 = (x1 * y2 - x2 * y1) / (y2 - y1)
y1 = 0 - y
if y2 < 0 - y:
x2 = (x1 * y2 - x2 * y1) / (y2 - y1)
y2 = 0 - y
if y1 > h + y:
x1 = x1 + (h + y - y1) * (x2 - x1) / (y2 - y1)
y1 = h + y
if y2 > h + y:
x2 = x1 + (h + y - y1) * (x2 - x1) / (y2 - y1)
y2 = h + y
return [x1, y1, x2, y2]
lines = []
hasXMin = False
hasYMin = False
hasXMax = False
hasYMax = False
for edge in edges:
if edge[1] >= 0 and edge[2] >= 0:
# Two vertices
[x1, y1, x2, y2] = clip_line(
c.vertices[edge[1]][0],
c.vertices[edge[1]][1],
c.vertices[edge[2]][0],
c.vertices[edge[2]][1],
width,
height,
exX,
exY,
)
elif edge[1] >= 0:
# Only one vertex
if c.lines[edge[0]][1] == 0:
# Vertical line
xtemp = c.lines[edge[0]][2] / c.lines[edge[0]][0]
if c.vertices[edge[1]][1] > (height + exY) / 2:
ytemp = height + exY
else:
ytemp = 0 - exX
else:
xtemp = width + exX
ytemp = (c.lines[edge[0]][2] - (width + exX)
* c.lines[edge[0]][0]) / c.lines[edge[0]][1]
[x1, y1, x2, y2] = clip_line(
c.vertices[edge[1]][0],
c.vertices[edge[1]][1],
xtemp,
ytemp,
width,
height,
exX,
exY,
)
elif edge[2] >= 0:
# Only one vertex
if c.lines[edge[0]][1] == 0:
# Vertical line
xtemp = c.lines[edge[0]][2] / c.lines[edge[0]][0]
if c.vertices[edge[2]][1] > (height + exY) / 2:
ytemp = height + exY
else:
ytemp = 0.0 - exY
else:
xtemp = 0.0 - exX
ytemp = c.lines[edge[0]][2] / c.lines[edge[0]][1]
[x1, y1, x2, y2] = clip_line(
xtemp,
ytemp,
c.vertices[edge[2]][0],
c.vertices[edge[2]][1],
width,
height,
exX,
exY,
)
if x1 or x2 or y1 or y2:
lines.append(QgsPoint(x1 + extent.xMinimum(), y1
+ extent.yMinimum()))
lines.append(QgsPoint(x2 + extent.xMinimum(), y2
+ extent.yMinimum()))
if 0 - exX in (x1, x2):
hasXMin = True
if 0 - exY in (y1, y2):
hasYMin = True
if height + exY in (y1, y2):
hasYMax = True
if width + exX in (x1, x2):
hasXMax = True
if hasXMin:
if hasYMax:
lines.append(QgsPoint(extent.xMinimum() - exX, height
+ extent.yMinimum() + exY))
if hasYMin:
lines.append(QgsPoint(extent.xMinimum() - exX,
extent.yMinimum() - exY))
if hasXMax:
if hasYMax:
lines.append(QgsPoint(width + extent.xMinimum() + exX, height
+ extent.yMinimum() + exY))
if hasYMin:
lines.append(QgsPoint(width + extent.xMinimum() + exX,
extent.yMinimum() - exY))
return lines
| gpl-2.0 | -1,948,047,915,561,625,900 | 37.525896 | 103 | 0.431748 | false |
jimporter/bfg9000 | bfg9000/tools/cc/compiler.py | 1 | 5277 | from itertools import chain
from ... import options as opts, safe_str
from .flags import optimize_flags
from ..common import BuildCommand
from ...file_types import ObjectFile, PrecompiledHeader
from ...iterutils import iterate
from ...path import Path
from ...versioning import SpecifierSet
class CcBaseCompiler(BuildCommand):
@property
def deps_flavor(self):
return None if self.lang in ('f77', 'f95') else 'gcc'
@property
def needs_libs(self):
return False
@property
def needs_package_options(self):
return True
def search_dirs(self, strict=False):
return self.env.variables.getpaths('CPATH')
def _call(self, cmd, input, output, deps=None, flags=None):
result = list(chain(
cmd, self._always_flags, iterate(flags), ['-c', input]
))
if deps:
result.extend(['-MMD', '-MF', deps])
result.extend(['-o', output])
return result
@property
def _always_flags(self):
flags = ['-x', self._langs[self.lang]]
# Force color diagnostics on Ninja, since it's off by default. See
# <https://github.com/ninja-build/ninja/issues/174> for more
# information.
if self.env.backend == 'ninja':
if self.brand == 'clang':
flags.append('-fcolor-diagnostics')
elif (self.brand == 'gcc' and self.version and
self.version in SpecifierSet('>=4.9')):
flags.append('-fdiagnostics-color')
return flags
def _include_dir(self, directory):
is_default = directory.path in self.env.host_platform.include_dirs
# Don't include default directories as system dirs (e.g. /usr/include).
# Doing so would break GCC 6 when #including stdlib.h:
# <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129>.
if directory.system and not is_default:
return ['-isystem', directory.path]
else:
return ['-I' + directory.path]
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, opts.include_dir):
flags.extend(self._include_dir(i.directory))
elif isinstance(i, opts.define):
if i.value:
flags.append('-D' + i.name + '=' + i.value)
else:
flags.append('-D' + i.name)
elif isinstance(i, opts.std):
flags.append('-std=' + i.value)
elif isinstance(i, opts.warning):
for j in i.value:
if j == opts.WarningValue.disable:
flags.append('-w')
else:
flags.append('-W' + j.name)
elif isinstance(i, opts.debug):
flags.append('-g')
elif isinstance(i, opts.static):
pass
elif isinstance(i, opts.optimize):
for j in i.value:
flags.append(optimize_flags[j])
elif isinstance(i, opts.pthread):
flags.append('-pthread')
elif isinstance(i, opts.pic):
flags.append('-fPIC')
elif isinstance(i, opts.pch):
flags.extend(['-include', i.header.path.stripext()])
elif isinstance(i, opts.sanitize):
flags.append('-fsanitize=address')
elif isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
class CcCompiler(CcBaseCompiler):
_langs = {
'c' : 'c',
'c++' : 'c++',
'objc' : 'objective-c',
'objc++': 'objective-c++',
'f77' : 'f77',
'f95' : 'f95',
'java' : 'java',
}
def __init__(self, builder, env, *, command, flags):
super().__init__(builder, env, command=command, flags=flags)
@property
def accepts_pch(self):
return True
def default_name(self, input, step):
return input.path.stripext().suffix
def output_file(self, name, step):
# XXX: MinGW's object format doesn't appear to be COFF...
return ObjectFile(Path(name + '.o'), self.builder.object_format,
self.lang)
class CcPchCompiler(CcBaseCompiler):
_langs = {
'c' : 'c-header',
'c++' : 'c++-header',
'objc' : 'objective-c-header',
'objc++': 'objective-c++-header',
}
def __init__(self, builder, env, *, command, flags):
if builder.lang not in self._langs:
raise ValueError('{} has no precompiled headers'
.format(builder.lang))
super().__init__(builder, env, command[0] + '_pch', command=command,
flags=flags)
@property
def accepts_pch(self):
# You can't pass a PCH to a PCH compiler!
return False
def default_name(self, input, step):
return input.path.suffix
def output_file(self, name, step):
ext = '.gch' if self.brand == 'gcc' else '.pch'
return PrecompiledHeader(Path(name + ext), self.lang)
| bsd-3-clause | 754,488,575,035,242,800 | 33.266234 | 79 | 0.540838 | false |
recap/pumpkin | examples/fasta/fastainject.py | 1 | 2143 | ###START-CONF
##{
##"object_name": "fastainject",
##"object_poi": "qpwo-2345",
##"auto-load": true,
##"remoting" : false,
##"parameters": [
##
## ],
##"return": [
## {
## "name": "fasta",
## "description": "raw fasta",
## "required": true,
## "type": "FastaString",
## "format": "",
## "state" : "RAW"
## }
##
## ] }
##END-CONF
from os import listdir
from os.path import isfile, join
import pika
from os.path import expanduser
from Bio import SeqIO
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
from pumpkin import *
class fastainject(PmkSeed.Seed):
def __init__(self, context, poi=None):
PmkSeed.Seed.__init__(self, context,poi)
def on_load(self):
print "Loading: " + self.__class__.__name__
def run(self, pkt):
matrix = matlist.blosum62
gap_open = -10
gap_extend = -0.5
dir = expanduser("~")+"/fasta/"
onlyfiles = [ f for f in listdir(dir) if isfile(join(dir,f)) ]
for fl in onlyfiles:
fullpath = dir+fl
if( fl[-5:] == "fasta"):
print "File: "+str(fl)
pp = SeqIO.parse(open(fullpath, "rU"), "fasta")
first_record = pp.next()
pp.close()
for second_record in SeqIO.parse(fullpath, "fasta"):
#print "First: "+first_record.seq
#print "Second: "+second_record.seq
#SeqIO.parse(open(fullpath, "rU"), "fasta")
data = str(first_record.seq)+"|,|"+str(second_record.seq)
npkt = self.duplicate_pkt_new_container(pkt)
self.dispatch(npkt, data, "RAW")
#alns = pairwise2.align.globalds(first_record.seq, second_record.seq, matrix, gap_open, gap_extend)
#top_aln = alns[0]
#aln_human, aln_mouse, score, begin, end = top_aln
#print aln_human+'\n'+aln_mouse
| mit | 1,529,830,487,479,376,100 | 27.959459 | 119 | 0.486701 | false |
Unicorn-rzl/pretix | src/pretix/control/middleware.py | 1 | 3347 | from urllib.parse import urlparse
from django.conf import settings
from django.core.urlresolvers import resolve, get_script_prefix
from django.utils.encoding import force_str
from django.shortcuts import resolve_url
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.http import HttpResponseNotFound
from django.utils.translation import ugettext as _
from pretix.base.models import Event, Organizer, EventPermission
class PermissionMiddleware:
"""
This middleware enforces all requests to the control app to require login.
Additionally, it enforces all requests to "control:event." URLs
to be for an event the user has basic access to.
"""
EXCEPTIONS = (
"auth.login",
"auth.register"
)
def process_request(self, request):
url = resolve(request.path_info)
url_name = url.url_name
if not request.path.startswith(get_script_prefix() + 'control') or url_name in self.EXCEPTIONS:
return
if not request.user.is_authenticated():
# Taken from django/contrib/auth/decorators.py
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(settings.LOGIN_URL_CONTROL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, REDIRECT_FIELD_NAME)
request.user.events_cache = request.user.events.current.order_by(
"organizer", "date_from").prefetch_related("organizer")
if 'event' in url.kwargs and 'organizer' in url.kwargs:
try:
request.event = Event.objects.current.filter(
slug=url.kwargs['event'],
permitted__id__exact=request.user.id,
organizer__slug=url.kwargs['organizer'],
).select_related('organizer')[0]
request.eventperm = EventPermission.objects.current.get(
event=request.event,
user=request.user
)
request.organizer = request.event.organizer
except IndexError:
return HttpResponseNotFound(_("The selected event was not found or you "
"have no permission to administrate it."))
elif 'organizer' in url.kwargs:
try:
request.organizer = Organizer.objects.current.filter(
slug=url.kwargs['organizer'],
permitted__id__exact=request.user.id,
)[0]
except IndexError:
return HttpResponseNotFound(_("The selected organizer was not found or you "
"have no permission to administrate it."))
| apache-2.0 | 202,406,622,099,669,630 | 44.849315 | 103 | 0.603824 | false |
ndp-systemes/odoo-addons | bus_integration/models/bus_configuration.py | 1 | 5545 | # -*- coding: utf8 -*-
#
# Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ipaddress
import socket
import logging
from urlparse import urlparse
from requests import get, ConnectionError
from openerp import models, fields, api, exceptions, _
_logger = logging.getLogger(__name__)
class BaseUrlMatchException(exceptions.except_orm):
def __init__(self, msg):
super(BaseUrlMatchException, self).__init__(_(u"Error!"), msg)
class BusConfiguration(models.Model):
_name = 'bus.configuration'
_inherit = 'bus.send.message'
name = fields.Char(u"Name")
sender_id = fields.Many2one('bus.base', u"Sender")
bus_configuration_export_ids = fields.One2many('bus.configuration.export', 'configuration_id', string=u"Fields")
reception_treatment = fields.Selection([('simple_reception', u"Simple reception")], u"Message Reception Treatment",
required=True)
code = fields.Selection([('ODOO_SYNCHRONIZATION', u"Odoo synchronization")], u"Code exchange", required=True)
connexion_state = fields.Char(u"Connexion status")
module_disabled_mapping = fields.Char(u"Module disabled mapping", help=u"Module not used for mapping by xml id",
default='__export__')
keep_messages_for = fields.Integer(string=u"Keep messages for", help=u"In days", default=7)
@api.model
def _get_host_name_or_raise(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
if not base_url:
return False
hostname = urlparse(base_url).hostname
ip = socket.gethostbyname(hostname)
# localhost ip base_url matches host device
if ip == '127.0.0.1':
return hostname
# ip might be the public ip
ipify_url = self.env['ir.config_parameter'].get_param('bus_integration.ipify')
try:
public_ip = get(ipify_url).text
except ConnectionError as ex:
# fall back behaviour if ipify.org is not available don't block
_logger.error(repr(ex))
return hostname
if ip == public_ip:
return hostname
# if ipify.org returns an error => throws a user friendly error
try:
ipaddress.ip_address(public_ip)
except ipaddress.AddressValueError as ex:
_logger.error(repr(ex))
raise BaseUrlMatchException('Public ip of %s has not been retreived by %s, '
'please try again later' % (hostname, ipify_url))
error_message = 'hostname %s resolved as %s does neither match local nor public IP (127.0.0.1 and %s)'
raise BaseUrlMatchException(error_message % (hostname, ip, public_ip))
# static var
_singleton_host_name = False
@api.multi
def _get_cached_host_name(self):
"""
singleton makes _singleton_host_name computed a single time to speed up treatments
"""
if not BusConfiguration._singleton_host_name:
BusConfiguration._singleton_host_name = self._get_host_name_or_raise()
return BusConfiguration._singleton_host_name
@api.model
def _is_not_allowed_error_message(self, server, login, hostname):
"""
see if this odoo instance is known by the bus. return an error if unknown.
:return: error message
"""
res = self.send_search_read(server, login,
model='bus.subscriber',
domain=[('url', 'ilike', hostname), ('database', 'ilike', self.env.cr.dbname)],
fields=['url', 'database'])
if res: # no error to return..
return False
return "Connection refused ! The host \"%s\" with db \"%s\" are not known by the distant bus, " \
"please check the bus configuration" % (hostname, self.env.cr.dbname)
@api.multi
def try_connexion(self, raise_error=False):
server, result, login = super(BusConfiguration, self).try_connexion(raise_error)
try:
hostname = self._get_cached_host_name()
except BaseUrlMatchException as e:
if raise_error:
raise BaseUrlMatchException(e.value)
else:
return server, e.message, 0
error_message = self._is_not_allowed_error_message(server, login, hostname)
if error_message:
# web.base.url might be wrongly. we need to recompute cached host name in case the config.parameter change
BusConfiguration._singleton_host_name = False
if raise_error:
raise exceptions.except_orm(_(u"Error!"), error_message)
return server, error_message, 0
# all ok
return server, result, login
| agpl-3.0 | 8,341,019,391,396,605,000 | 41.320611 | 119 | 0.624278 | false |
neuroidss/nupic.vision | nupicvision/mnist/run_mnist_experiment.py | 1 | 9526 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import time
import yaml
import numpy
import os
from nupic.bindings.math import GetNTAReal
from nupic.engine import Network
from nupic.research import fdrutilities
from nupicvision.regions.ImageSensor import ImageSensor
"""
Setups a simple Network and runs it on the MNIST dataset. Assumes you have
the mnist data in subdirectories called mnist/training and mnist/testing.
The current network, using a random SP, gets about 95.5% correct on the test set
if trained with the full training set. By no means is this a complete HTM
vision system. There is no sequence memory or temporal pooling here. In
addition there is a single SP whose receptive field is the entire image. A
system with local receptive fields, temporal pooling, and some minimal
hierarchy would be required to get really good invariance and recognition rates.
Best SP params so far:
------------CPP SpatialPooler Parameters ------------------
numInputs = 1024
numColumns = 4096
numActiveColumnsPerInhArea = 240
potentialPct = 0.9
globalInhibition = 1
localAreaDensity = -1
stimulusThreshold = 0
synPermActiveInc = 0
synPermInactiveDec = 0
synPermConnected = 0.2
minPctOverlapDutyCycles = 0.001
minPctActiveDutyCycles = 0.001
dutyCyclePeriod = 1000
maxBoost = 1
wrapAround = 1
CPP SP seed = 1956
"""
DEFAULT_IMAGESENSOR_PARAMS ={
"width": 32,
"height": 32,
"mode": "bw",
"background": 0,
"explorer": yaml.dump(["RandomFlash", {"replacement": False}])
}
DEFAULT_SP_PARAMS = {
"columnCount": 4096,
"spatialImp": "cpp",
"inputWidth": 1024,
"spVerbosity": 1,
"synPermConnected": 0.2,
"synPermActiveInc": 0.0,
"synPermInactiveDec": 0.0,
"seed": 1956,
"numActiveColumnsPerInhArea": 240,
"globalInhibition": 1,
"potentialPct": 0.9,
"maxBoost": 1.0
}
DEFAULT_CLASSIFIER_PARAMS = {
"distThreshold": 0.000001,
"maxCategoryCount": 10,
#"distanceMethod": "rawOverlap", # Default is Euclidean distance
}
def createNetwork():
"""
Set up the following simple network and return it:
ImageSensorRegion -> SP -> KNNClassifier Region
"""
net = Network()
# Register the ImageSensor region with the network
Network.registerRegion(ImageSensor)
# Add the three regions
net.addRegion("sensor", "py.ImageSensor",
yaml.dump(DEFAULT_IMAGESENSOR_PARAMS))
net.addRegion("SP", "py.SPRegion", yaml.dump(DEFAULT_SP_PARAMS))
net.addRegion("classifier","py.KNNClassifierRegion",
yaml.dump(DEFAULT_CLASSIFIER_PARAMS))
# Link up the regions. Note that we need to create a link from the sensor
# to the classifier to send in the category labels.
net.link("sensor", "SP", "UniformLink", "",
srcOutput = "dataOut", destInput = "bottomUpIn")
net.link("SP", "classifier", "UniformLink", "",
srcOutput = "bottomUpOut", destInput = "bottomUpIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput = "categoryOut", destInput = "categoryIn")
return net
def trainNetwork(net, networkFile="mnist_net.nta"):
# Some stuff we will need later
sensor = net.regions["sensor"]
sp = net.regions["SP"]
pysp = sp.getSelf()
classifier = net.regions["classifier"]
dutyCycles = numpy.zeros(DEFAULT_SP_PARAMS["columnCount"], dtype=GetNTAReal())
print "============= Loading training images ================="
t1 = time.time()
sensor.executeCommand(["loadMultipleImages", "mnist/training"])
numTrainingImages = sensor.getParameter("numImages")
start = time.time()
print "Load time for training images:",start-t1
print "Number of training images",numTrainingImages
# First train just the SP
print "============= SP training ================="
classifier.setParameter("inferenceMode", 0)
classifier.setParameter("learningMode", 0)
sp.setParameter("learningMode", 1)
sp.setParameter("inferenceMode", 0)
nTrainingIterations = numTrainingImages
for i in range(nTrainingIterations):
net.run(1)
dutyCycles += pysp._spatialPoolerOutput
if i%(nTrainingIterations/100)== 0:
print "Iteration",i,"Category:",sensor.getOutputData("categoryOut")
# Now train just the classifier sequentially on all training images
print "============= Classifier training ================="
sensor.setParameter("explorer",yaml.dump(["Flash"]))
classifier.setParameter("inferenceMode", 0)
classifier.setParameter("learningMode", 1)
sp.setParameter("learningMode", 0)
sp.setParameter("inferenceMode", 1)
for i in range(numTrainingImages):
net.run(1)
if i%(numTrainingImages/100)== 0:
print "Iteration",i,"Category:",sensor.getOutputData("categoryOut")
# Save the trained network
net.save(networkFile)
# Print various statistics
print "============= Training statistics ================="
print "Training time:",time.time() - start
tenPct= nTrainingIterations/10
print "My duty cycles:",fdrutilities.numpyStr(dutyCycles, format="%g")
print "Number of nonzero duty cycles:",len(dutyCycles.nonzero()[0])
print "Mean/Max duty cycles:",dutyCycles.mean(), dutyCycles.max()
print "Number of columns that won for > 10% patterns",\
(dutyCycles>tenPct).sum()
print "Number of columns that won for > 20% patterns",\
(dutyCycles>2*tenPct).sum()
print "Num categories learned",classifier.getParameter("categoryCount")
print "Number of patterns stored",classifier.getParameter("patternCount")
return net
def testNetwork(testPath="mnist/testing", savedNetworkFile="mnist_net.nta"):
net = Network(savedNetworkFile)
sensor = net.regions["sensor"]
sp = net.regions["SP"]
classifier = net.regions["classifier"]
print "Reading test images"
sensor.executeCommand(["loadMultipleImages",testPath])
numTestImages = sensor.getParameter("numImages")
print "Number of test images",numTestImages
start = time.time()
# Various region parameters
sensor.setParameter("explorer", yaml.dump(["RandomFlash",
{"replacement": False}]))
classifier.setParameter("inferenceMode", 1)
classifier.setParameter("learningMode", 0)
sp.setParameter("inferenceMode", 1)
sp.setParameter("learningMode", 0)
numCorrect = 0
for i in range(numTestImages):
net.run(1)
inferredCategory = classifier.getOutputData("categoriesOut").argmax()
if sensor.getOutputData("categoryOut") == inferredCategory:
numCorrect += 1
if i%(numTestImages/100)== 0:
print "Iteration",i,"numCorrect=",numCorrect
# Some interesting statistics
print "Testing time:",time.time()-start
print "Number of test images",numTestImages
print "num correct=",numCorrect
print "pct correct=",(100.0*numCorrect) / numTestImages
def checkNet(net):
# DEBUG: Verify we set parameters correctly
# This is the "correct" way to access internal region parameters. It will
# work across all languages
sensor = net.regions["sensor"]
classifier = net.regions["classifier"]
sp = net.regions["SP"]
width = sensor.getParameter("width")
height = sensor.getParameter("height")
print "width/height=",width,height
print "Classifier distance threshold",classifier.getParameter("distThreshold")
print "Log path:",sp.getParameter("logPathInput")
print "min/max phase",net.getMinEnabledPhase(), net.getMaxEnabledPhase()
# This is a convenient method that only works for Python regions.
# Here we get a pointer to the actual Python instance of that region.
pysensor = sensor.getSelf()
print "Python width/height",pysensor.height, pysensor.width
print "Explorer:",pysensor.getParameter("explorer")
print "Filters:",pysensor.getParameter("filters")
if __name__ == "__main__":
net = createNetwork()
datetimestr = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
networkDirName = "networks"
if not os.path.exists(networkDirName):
os.makedirs(networkDirName)
netName = "%s/%s_mnist_net.nta" % (networkDirName, datetimestr)
trainNetwork(net, netName)
checkNet(net)
# As a debugging step, verify we've learned the training set well
# This assumes you have a small subset of the training images in
# mnist/small_training
print "Test on small part of training set"
testNetwork("mnist/small_training", netName)
checkNet(net)
print "Test on full test set"
testNetwork(savedNetworkFile=netName)
| gpl-3.0 | 3,696,308,238,580,711,000 | 33.514493 | 80 | 0.683813 | false |
jamiepg1/clojurehelper | setup.py | 1 | 1132 | from subprocess import call
from distutils.core import setup
from distutils.command.build import build
class build_lein_xml(build):
def run(self):
print "Building lein xml"
call('./build-lein-xml', shell=True)
build.run(self)
setup(
name='clojurehelper',
version='0.2',
scripts=['lein_makepkg', 'lein_build', 'lein_clean', 'lein_builddocs',
'lein_configure', 'lein_create_profile'],
packages=['leinpkg'],
platforms=['any'],
description='Make debian packages from Leiningen projects',
author="Eugenio Cano-Manuel Mendoza",
author_email="[email protected]",
url="https://github.com/Debian/clojurehelper",
cmdclass={'build': build_lein_xml},
package_data={
'leinpkg': ['templates/*']
},
data_files=[('/usr/share/perl5/Debian/Debhelper/Sequence/', ['lein2.pm']),
('/usr/share/java/', ['lein-xml/lein-xml.jar'])],
classifiers=[
"Development Status :: 1 - Planning",
"Programming Language :: Python",
"Topic :: System :: Archiving :: Packaging",
"Topic :: Utilities",
]
)
| mit | -3,171,557,891,851,203,000 | 30.444444 | 78 | 0.617491 | false |
uogbuji/akara | lib/resource/web.py | 1 | 3263 | #Useful: http://docs.python.org/library/wsgiref.html
#
import httplib
import sqlite3
from datetime import datetime
from wsgiref.util import shift_path_info, request_uri
from string import Template
from cStringIO import StringIO
from akara.resource import *
from akara.resource.repository import driver
from akara.resource.index import simple_xpath_index
# Templates
wrapper = Template("""\
<html><head><title>$title</title></head><body>
$body
</body></html>
""")
four_oh_four = Template("""\
<html><body>
<h1>404-ed!</h1>
The requested URL <i>$url</i> was not found.
</body></html>""")
def alias(environ, start_response):
'''
GET - retrieve the resource with the specified alias
POST - create a resource with the specified alias
'''
key = environ['PATH_INFO']
print 'key', key
if not key in APPS:
#404 error
start_response('404 Not Found', [('content-type', 'text/html')])
response = four_oh_four.substitute(url=request_uri(environ))
return [response]
next = APPS[key]
return next(environ, start_response)
def response(code):
return '%i %s'%(code, httplib.responses[code])
def store(environ, start_response):
dbfile = environ['akara.DBFILE']
drv = driver(sqlite3.connect(dbfile))
def head_resource():
get_resource()
return ''
def get_resource():
key = shift_path_info(environ)
content1, metadata = drv.get_resource(key)
if content1 is None:
#404 error
start_response('404 Not Found', [('content-type', 'text/html')])
response = four_oh_four.substitute(url=request_uri(environ))
return response
start_response('200 OK', [('content-type', str(metadata[CONTENT_TYPE]))])
return content1.encode('utf-8')
def post_resource():
ctype = environ.get('CONTENT_TYPE', 'application/unknown')
clen = int(environ.get('CONTENT_LENGTH', None))
if not clen:
start_response("411 Length Required", [('Content-Type','text/plain')])
return ["Length Required"]
key = shift_path_info(environ)
now = datetime.now().isoformat()
md = {
CREATED: now,
UPDATED: now,
CONTENT_LENGTH: clen,
CONTENT_TYPE: ctype,
}
#md = self.standard_index
content = environ['wsgi.input'].read(clen)
id = drv.create_resource(content, metadata=md)
msg = 'Adding %i' % id
new_uri = str(id)
headers = [('Content-Type', 'text/plain')]
headers.append(('Location', new_uri))
headers.append(('Content-Location', new_uri))
#environ['akara.etag'] = compute_etag(content)
headers.append(('Content-Length', str(len(msg))))
start_response("201 Created", headers)
return msg
dispatch = {
'GET': get_resource,
'HEAD': head_resource,
'POST': post_resource,
}
method = dispatch.get(environ['REQUEST_METHOD'])
if not method:
response_headers = [('Content-type','text/plain')]
start_response(response(httplib.METHOD_NOT_ALLOWED), response_headers)
return ['Method Not Allowed']
else:
return [method()]
| apache-2.0 | -8,775,797,675,565,607,000 | 28.396396 | 82 | 0.609562 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.