text
stringlengths 4
1.02M
| meta
dict |
---|---|
def test_hello_world(workspace):
workspace.src('pom.xml', r'''
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>com.mycompany.app</groupId>
<artifactId>my-app</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
''')
workspace.src('src/main/java/com/mycompany/app/App.java', r'''
package com.mycompany.app;
public class App {
public static void main(String[] args) {
System.out.print("Hello, World!");
}
}
''')
workspace.run('mvn package');
assert workspace.run('java -cp target/my-app-1.0-SNAPSHOT.jar com.mycompany.app.App').out == 'Hello, World!'
| {
"content_hash": "61a870735974070cd349a7fab78e707c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 112,
"avg_line_length": 27.972972972972972,
"alnum_prop": 0.5990338164251208,
"repo_name": "imsardine/learning",
"id": "0ddfb41f73a8d1f812f28efc88f88a1a23ad591e",
"size": "1035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maven/tests/test_start.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5865"
},
{
"name": "HTML",
"bytes": "1033"
},
{
"name": "Java",
"bytes": "90086"
},
{
"name": "JavaScript",
"bytes": "2052"
},
{
"name": "Makefile",
"bytes": "17149"
},
{
"name": "Python",
"bytes": "328931"
},
{
"name": "Rust",
"bytes": "899"
},
{
"name": "Shell",
"bytes": "610"
},
{
"name": "Swift",
"bytes": "3004"
}
],
"symlink_target": ""
} |
"""
Ex. 7: Write a python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded form.
Creates a list. One of the elements of the list is a dictionary with two keys. This list
is written to a YAML file and a JSON file
"""
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output format easier to read
:param my_list: a list of the items to print
:param my_str: the name of the list
:return:
'''
print "\n\n"
print "#" * 3
print "#" * 3 + my_str
print "#" * 3
pprint(my_list)
def main():
'''
Read YAML and JSON files, pretty print to standard out
:return:
'''
yaml_file = "list_output.yml"
json_file = "list_output.json"
with open(yaml_file) as f:
yaml_list=yaml.load(f)
with open(json_file) as f:
json_list=json.load(f)
output_format(yaml_list, " YAML")
output_format(json_list, " JSON")
print "\n"
if __name__ == "__main__":
main()
| {
"content_hash": "dc34c10373bf8b44e453ee35be5ba32c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 88,
"avg_line_length": 24.595744680851062,
"alnum_prop": 0.634083044982699,
"repo_name": "jrslocum17/pynet_test",
"id": "4c0495ea9db8deac963b706997d02993225c2cb4",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Week1/read_and_print_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "73175"
}
],
"symlink_target": ""
} |
from pagediterator import PagedIterator
from unittest import TestCase
from mock import MagicMock, call
class PagedIterator1(PagedIterator):
def __init__(self):
PagedIterator.__init__(self)
self.fetch_page(0)
@property
def total_size(self):
return self._total_size
@property
def page_size(self):
return self._page_size
def fetch_page(self, page_number):
self._total_size = 8
self._page_size = 3
pages = [
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8 ],
]
return pages[page_number]
def get_page(self, page_number):
return self.fetch_page(page_number)
class PagedIteratorTest(TestCase):
def setUp(self):
self.iterator = PagedIterator1()
def test_count_returns_an_integer(self):
self.assertEqual(len(self.iterator), 8)
def test_fetching_a_negative_index_throws_an_exception(self):
self.assertRaises(IndexError, self.iterator.__getitem__, -1)
def test_fetching_an_out_of_bounds_index_throws_exception(self):
self.assertRaises(IndexError, self.iterator.__getitem__, 15)
def test_a_page_size_must_be_set(self):
self.assertEqual(self.iterator.page_size, 3)
def test_get_first_element(self):
self.assertEqual(self.iterator[0], 1)
def test_get_second_element(self):
self.assertEqual(self.iterator[1], 2)
def test_get_first_element_on_second_page(self):
self.assertEqual(self.iterator[3], 4)
def test_get_second_element_on_third_page(self):
self.assertEqual(self.iterator[7], 8)
def test_fetching_a_string_index_is_not_allowed(self):
self.assertRaises(ValueError, self.iterator.__getitem__, 'foo')
def test_offset_that_is_valid_returns_true(self):
self.assertEqual(self.iterator[0], True)
def test_offset_that_is_out_of_bounds_returns_false(self):
self.assertEqual(15 in self.iterator, False)
def test_offset_that_is_after_the_last_element_returns_false(self):
self.assertEqual(10 in self.iterator, False)
def test_values_of_the_same_index_are_cached(self):
self.iterator.get_page = MagicMock(return_value=[1])
self.assertEqual(self.iterator[0], 1)
self.assertEqual(self.iterator[0], 1)
self.iterator.get_page.assert_called_once_with(0)
def test_values_of_the_same_pages_are_cached(self):
self.iterator.get_page = MagicMock(return_value=[1, 2])
self.assertEqual(self.iterator[0], 1)
self.assertEqual(self.iterator[1], 2)
self.iterator.get_page.assert_called_once_with(0)
def test_values_from_another_page_must_be_requested(self):
self.iterator.get_page = MagicMock(side_effect=[
[1, 2, 3],
[4, 5, 6],
])
self.assertEqual(self.iterator[0], 1)
self.assertEqual(self.iterator[3], 4)
def test_values_from_multiple_pages_are_simultaneously_cached(self):
self.iterator.get_page = MagicMock(side_effect=[
[1, 2, 3],
[4, 5, 6],
])
self.assertEqual(self.iterator[0], 1)
self.assertEqual(self.iterator[3], 4)
self.assertEqual(self.iterator[0], 1)
self.assertEqual(self.iterator[3], 4)
self.iterator.get_page.assert_has_calls([call(0), call(1)])
def test_traverse_list_in_loop(self):
result = []
for item in self.iterator:
result.append(item)
self.assertEqual(result, [1, 2, 3, 4, 5, 6, 7, 8])
def test_traverse_list_in_multiple_loops(self):
result = []
for item in self.iterator:
result.append(item)
for item in self.iterator:
result.append(item)
self.assertEqual(result, [
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8
])
def test_setting_an_element_raises_an_exception(self):
self.assertRaises(RuntimeError, self.iterator.__setitem__, 0, 0)
def test_unsetting_an_element_raises_an_exception(self):
self.assertRaises(RuntimeError, self.iterator.__delitem__, 0)
| {
"content_hash": "d68812a844208873579b786a44eb12cf",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 72,
"avg_line_length": 33.29838709677419,
"alnum_prop": 0.6224267377088883,
"repo_name": "elliotchance/pagediterator-python",
"id": "b0e7c367853b25e447aa12fee34c7049c9c8e9cd",
"size": "4129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_pagediterator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9602"
}
],
"symlink_target": ""
} |
from __future__ import division
from vistrails.core.bundles.pyimport import py_import
from vistrails.core.modules.vistrails_module import ModuleError
from ..common import get_numpy, TableObject, Table
def get_xlrd():
try:
return py_import('xlrd', {
'pip': 'xlrd',
'linux-debian': 'python-xlrd',
'linux-ubuntu': 'python-xlrd',
'linux-fedora': 'python-xlrd'},
True)
except ImportError: # pragma: no cover
return None
class ExcelTable(TableObject):
def __init__(self, sheet, header_present):
self.sheet = sheet
self.header_present = header_present
if self.header_present:
self.names = [c.value for c in self.sheet.row(0)]
else:
self.names = None
self.rows = self.sheet.nrows
if self.header_present:
self.rows -= 1
self.columns = self.sheet.ncols
self.column_cache = {}
def get_column(self, index, numeric=False):
if (index, numeric) in self.column_cache:
return self.column_cache[(index, numeric)]
numpy = get_numpy(False)
result = [c.value for c in self.sheet.col(index)]
if self.header_present:
result = result[1:]
if numeric and numpy is not None:
result = numpy.array(result, dtype=numpy.float32)
elif numeric:
result = [float(e) for e in result]
self.column_cache[(index, numeric)] = result
return result
class ExcelSpreadsheet(Table):
"""Reads a table from a Microsoft Excel file.
This module uses xlrd from the python-excel.org project to read a XLS or
XLSX file.
"""
_input_ports = [
('file', '(org.vistrails.vistrails.basic:File)'),
('sheet_name', '(org.vistrails.vistrails.basic:String)',
{'optional': True}),
('sheet_index', '(org.vistrails.vistrails.basic:Integer)',
{'optional': True}),
('header_present', '(org.vistrails.vistrails.basic:Boolean)',
{'optional': True, 'defaults': "['False']"})]
_output_ports = [
('column_count', '(org.vistrails.vistrails.basic:Integer)'),
('column_names', '(org.vistrails.vistrails.basic:String)'),
('value', Table)]
def compute(self):
xlrd = get_xlrd()
if xlrd is None: # pragma: no cover
raise ModuleError(self, "xlrd is not available")
workbook = self.get_input('file')
workbook = xlrd.open_workbook(workbook.name)
if self.has_input('sheet_index'):
sheet_index = self.get_input('sheet_index')
if self.has_input('sheet_name'):
name = self.get_input('sheet_name')
try:
index = workbook.sheet_names().index(name)
except Exception:
raise ModuleError(self, "Sheet name not found")
if self.has_input('sheet_index'):
if sheet_index != index:
raise ModuleError(self,
"Both sheet_name and sheet_index were "
"specified, and they don't agree")
elif self.has_input('sheet_index'):
index = sheet_index
else:
index = 0
sheet = workbook.sheet_by_index(index)
header_present = self.get_input('header_present')
table = ExcelTable(sheet, header_present)
self.set_output('value', table)
if table.names is not None:
self.set_output('column_names', table.names)
self.set_output('column_count', table.columns)
_modules = [ExcelSpreadsheet]
###############################################################################
import itertools
import unittest
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
from ..common import ExtractColumn
@unittest.skipIf(get_xlrd() is None, "xlrd not available")
class ExcelTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
import os
cls._test_dir = os.path.join(
os.path.dirname(__file__),
os.pardir,
'test_files')
def assertAlmostEqual_lists(self, a, b):
for i, j in itertools.izip(a, b):
self.assertAlmostEqual(i, j, places=5)
def test_xls_numeric(self):
"""Uses ExcelSpreadsheet to load a numeric array.
"""
with intercept_result(ExtractColumn, 'value') as results:
with intercept_result(ExcelSpreadsheet, 'column_count') as cols:
self.assertFalse(execute([
('read|ExcelSpreadsheet', identifier, [
('file', [('File', self._test_dir + '/xl.xls')]),
('sheet_index', [('Integer', '1')]),
('sheet_name', [('String', 'Feuil2')]),
('header_present', [('Boolean', 'False')])
]),
('ExtractColumn', identifier, [
('column_index', [('Integer', '0')]),
('numeric', [('Boolean', 'True')]),
]),
],
[
(0, 'value', 1, 'table'),
]))
self.assertEqual(cols, [1])
self.assertEqual(len(results), 1)
self.assertAlmostEqual_lists(list(results[0]), [1, 2, 2, 3, -7.6])
def test_xls_sheet_mismatch(self):
"""Uses ExcelSpreadsheet with mismatching sheets.
"""
err = execute([
('read|ExcelSpreadsheet', identifier, [
('file', [('File', self._test_dir + '/xl.xls')]),
('sheet_index', [('Integer', '0')]),
('sheet_name', [('String', 'Feuil2')]),
]),
])
self.assertEqual(list(err.keys()), [0])
self.assertEqual(
err[0].msg,
"Both sheet_name and sheet_index were specified, and they "
"don't agree")
def test_xls_sheetname_missing(self):
"""Uses ExcelSpreadsheet with a missing sheet.
"""
err = execute([
('read|ExcelSpreadsheet', identifier, [
('file', [('File', self._test_dir + '/xl.xls')]),
('sheet_name', [('String', 'Sheet12')]),
]),
])
self.assertEqual(list(err.keys()), [0])
self.assertEqual(err[0].msg, "Sheet name not found")
def test_xls_header_nonnumeric(self):
"""Uses ExcelSpreadsheet to load data.
"""
with intercept_result(ExtractColumn, 'value') as results:
with intercept_result(ExcelSpreadsheet, 'column_count') as cols:
self.assertFalse(execute([
('read|ExcelSpreadsheet', identifier, [
('file', [('File', self._test_dir + '/xl.xls')]),
('sheet_name', [('String', 'Feuil1')]),
('header_present', [('Boolean', 'True')])
]),
('ExtractColumn', identifier, [
('column_index', [('Integer', '0')]),
('column_name', [('String', 'data1')]),
('numeric', [('Boolean', 'False')]),
]),
],
[
(0, 'value', 1, 'table'),
]))
self.assertEqual(cols, [2])
self.assertEqual(len(results), 1)
self.assertEqual(list(results[0]), ['here', 'is', 'some', 'text'])
def test_xls_header_numeric(self):
"""Uses ExcelSpreadsheet to load a numeric array.
"""
with intercept_result(ExtractColumn, 'value') as results:
with intercept_result(ExcelSpreadsheet, 'column_count') as cols:
self.assertFalse(execute([
('read|ExcelSpreadsheet', identifier, [
('file', [('File', self._test_dir + '/xl.xls')]),
# Will default to first sheet
('header_present', [('Boolean', 'True')])
]),
('ExtractColumn', identifier, [
('column_name', [('String', 'data2')]),
('numeric', [('Boolean', 'True')]),
]),
],
[
(0, 'value', 1, 'table'),
]))
self.assertEqual(cols, [2])
self.assertEqual(len(results), 1)
self.assertAlmostEqual_lists(list(results[0]), [1, -2.8, 3.4, 3.3])
| {
"content_hash": "d362b6ac9d63a7f0cefa81063fb656b6",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 79,
"avg_line_length": 38.3862660944206,
"alnum_prop": 0.48669499105545616,
"repo_name": "hjanime/VisTrails",
"id": "9dcfd6c2a1a82a2f9d39de384bae0804ed3561a3",
"size": "10810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/packages/tabledata/read/read_excel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['Lag1Trend'] , ['Seasonal_DayOfMonth'] , ['SVR'] ); | {
"content_hash": "87f0736102bd8e4f8199b08d61afa15f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 86,
"avg_line_length": 39.75,
"alnum_prop": 0.710691823899371,
"repo_name": "antoinecarme/pyaf",
"id": "90e7dc5c9c6696e98170384be44352032a9dc698",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_Lag1Trend_Seasonal_DayOfMonth_SVR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""Commands for reading and manipulating users."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Users(base.Group):
"""Read and manipulate Google Compute Engine users."""
Users.detailed_help = {
'brief': 'Read and manipulate Google Compute Engine users',
}
| {
"content_hash": "89b0941ca9b2ef8d9e8a3a865e1e1994",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.7566765578635015,
"repo_name": "KaranToor/MA450",
"id": "eed7cc43924f7c62987c13955afd53bc7751c135",
"size": "932",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/compute/users/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import numpy as np
from progressivis.core.module import Module, ReturnRunStep
from progressivis.core.slot import SlotDescriptor
from progressivis.core.bitmap import bitmap
from progressivis.core.utils import indices_len
from ..io import Variable
from ..stats import Min, Max
from ..utils.psdict import PsDict
from . import BaseTable, Table, TableSelectedView
from .module import TableModule
from .hist_index import HistogramIndex
# from .mod_impl import ModuleImpl
from typing import Optional, Any, cast, Iterable
# def _get_physical_table(t):
# return t if t.base is None else _get_physical_table(t.base)
class _Selection:
def __init__(self, values: Optional[bitmap] = None):
self._values = bitmap([]) if values is None else values
def update(self, values: Iterable[int]) -> None:
self._values.update(values)
def remove(self, values: Iterable[int]) -> None:
self._values = self._values - bitmap(values)
def assign(self, values: Iterable[int]) -> None:
self._values = bitmap(values)
def add(self, values: Iterable[int]) -> None:
self._values |= bitmap(values)
class RangeQueryImpl: # (ModuleImpl):
def __init__(self, column: list[str], approximate: bool):
super(RangeQueryImpl, self).__init__()
self._table: Optional[BaseTable] = None
self._column = column
# self.bins = None
# self._hist_index = hist_index
self._approximate = approximate
self.result: Optional[_Selection] = None
self.is_started = False
def resume(
self,
hist_index: HistogramIndex,
lower: float,
upper: float,
limit_changed: bool,
created: Optional[bitmap] = None,
updated: Optional[bitmap] = None,
deleted: Optional[bitmap] = None,
) -> None:
assert self.result
if limit_changed:
new_sel = hist_index.range_query(
lower, upper, approximate=self._approximate
)
self.result.assign(new_sel)
return
if updated:
self.result.remove(updated)
# res = self._eval_to_ids(limit, updated)
res = hist_index.restricted_range_query(
lower, upper, only_locs=updated, approximate=self._approximate
)
self.result.add(res)
if created:
res = hist_index.restricted_range_query(
lower, upper, only_locs=created, approximate=self._approximate
)
self.result.update(res)
if deleted:
self.result.remove(deleted)
def start(
self,
table: BaseTable,
hist_index: HistogramIndex,
lower: float,
upper: float,
limit_changed: bool,
created: Optional[bitmap] = None,
updated: Optional[bitmap] = None,
deleted: Optional[bitmap] = None,
) -> None:
self._table = table
self.result = _Selection()
self.is_started = True
self.resume(hist_index, lower, upper, limit_changed, created, updated, deleted)
class RangeQuery(TableModule):
""" """
parameters = [
("column", np.dtype(object), "unknown"),
("watched_key_lower", np.dtype(object), ""),
("watched_key_upper", np.dtype(object), ""),
# ('hist_index', object, None) # to improve ...
]
inputs = [
SlotDescriptor("table", type=Table, required=True),
SlotDescriptor("lower", type=Table, required=False),
SlotDescriptor("upper", type=Table, required=False),
SlotDescriptor("min", type=PsDict, required=False),
SlotDescriptor("max", type=PsDict, required=False),
SlotDescriptor("hist", type=Table, required=True),
]
outputs = [
SlotDescriptor("min", type=Table, required=False),
SlotDescriptor("max", type=Table, required=False),
]
def __init__(
self,
# hist_index: Optional[HistogramIndex] = None,
approximate: bool = False,
**kwds: Any
) -> None:
super(RangeQuery, self).__init__(**kwds)
self._impl: RangeQueryImpl = RangeQueryImpl(self.params.column, approximate)
# self._hist_index: Optional[HistogramIndex] = hist_index
self._approximate = approximate
self.default_step_size = 1000
self.input_module: Optional[Module] = None
self._min_table: Optional[PsDict] = None
self._max_table: Optional[PsDict] = None
self.hist_index: Optional[HistogramIndex] = None
# @property
# def hist_index(self) -> Optional[HistogramIndex]:
# return self._hist_index
# @hist_index.setter
# def hist_index(self, hi: HistogramIndex) -> None:
# self._hist_index = hi
# self._impl = RangeQueryImpl(self._column, hi, approximate=self._approximate)
@property
def column(self) -> str:
return str(self.params.column)
@property
def watched_key_lower(self) -> str:
return self.params.watched_key_lower or self.column
@property
def watched_key_upper(self) -> str:
return self.params.watched_key_upper or self.column
def create_dependent_modules(
self,
input_module: Module,
input_slot: str,
min_: Optional[Module] = None,
max_: Optional[Module] = None,
min_value: Optional[Module] = None,
max_value: Optional[Module] = None,
hist_index: Optional[HistogramIndex] = None,
**kwds: Any
) -> RangeQuery:
if self.input_module is not None: # test if already called
return self
scheduler = self.scheduler()
params = self.params
self.input_module = input_module
self.input_slot = input_slot
with scheduler:
if hist_index is None:
hist_index = HistogramIndex(
column=params.column, group=self.name, scheduler=scheduler
)
hist_index.input.table = input_module.output[input_slot]
if min_ is None:
min_ = Min(group=self.name, columns=[self.column], scheduler=scheduler)
min_.input.table = hist_index.output.min_out
if max_ is None:
max_ = Max(group=self.name, columns=[self.column], scheduler=scheduler)
max_.input.table = hist_index.output.max_out
if min_value is None:
min_value = Variable(group=self.name, scheduler=scheduler)
min_value.input.like = min_.output.result
if max_value is None:
max_value = Variable(group=self.name, scheduler=scheduler)
max_value.input.like = max_.output.result
range_query = self
range_query.hist_index = hist_index
range_query.input.hist = hist_index.output.result
range_query.input.table = input_module.output[input_slot]
if min_value:
range_query.input.lower = min_value.output.result
if max_value:
range_query.input.upper = max_value.output.result
range_query.input.min = min_.output.result
range_query.input.max = max_.output.result
self.min = min_
self.max = max_
self.min_value = min_value
self.max_value = max_value
return range_query
def _create_min_max(self) -> None:
if self._min_table is None:
self._min_table = PsDict({self.column: np.inf})
if self._max_table is None:
self._max_table = PsDict({self.column: -np.inf})
def _set_minmax_out(self, attr_: str, val: float) -> None:
d = {self.column: val}
if getattr(self, attr_) is None:
setattr(self, attr_, PsDict(d))
else:
getattr(self, attr_).update(d)
def _set_min_out(self, val: float) -> None:
return self._set_minmax_out("_min_table", val)
def _set_max_out(self, val: float) -> None:
return self._set_minmax_out("_max_table", val)
def get_data(self, name: str) -> Any:
if name == "min":
return self._min_table
if name == "max":
return self._max_table
return super(RangeQuery, self).get_data(name)
def run_step(
self, run_number: int, step_size: int, howlong: float
) -> ReturnRunStep:
input_slot = self.get_input_slot("table")
self._create_min_max()
#
# lower/upper
#
lower_slot = self.get_input_slot("lower")
# lower_slot.update(run_number)
upper_slot = self.get_input_slot("upper")
limit_changed = False
if lower_slot.deleted.any():
lower_slot.deleted.next()
if lower_slot.updated.any():
lower_slot.updated.next()
limit_changed = True
if lower_slot.created.any():
lower_slot.created.next()
limit_changed = True
if not (lower_slot is upper_slot):
# upper_slot.update(run_number)
if upper_slot.deleted.any():
upper_slot.deleted.next()
if upper_slot.updated.any():
upper_slot.updated.next()
limit_changed = True
if upper_slot.created.any():
upper_slot.created.next()
limit_changed = True
#
# min/max
#
min_slot = self.get_input_slot("min")
min_slot.clear_buffers()
# min_slot.update(run_number)
# min_slot.created.next()
# min_slot.updated.next()
# min_slot.deleted.next()
max_slot = self.get_input_slot("max")
max_slot.clear_buffers()
# max_slot.update(run_number)
# max_slot.created.next()
# max_slot.updated.next()
# max_slot.deleted.next()
if (
lower_slot.data() is None
or upper_slot.data() is None
or len(lower_slot.data()) == 0
or len(upper_slot.data()) == 0
):
return self._return_run_step(self.state_blocked, steps_run=0)
lower_value = lower_slot.data().get(self.watched_key_lower)
upper_value = upper_slot.data().get(self.watched_key_upper)
if (
lower_slot.data() is None
or upper_slot.data() is None
or min_slot.data() is None
or max_slot.data() is None
or len(min_slot.data()) == 0
or len(max_slot.data()) == 0
):
return self._return_run_step(self.state_blocked, steps_run=0)
minv = min_slot.data().get(self.watched_key_lower)
maxv = max_slot.data().get(self.watched_key_upper)
if lower_value == "*":
lower_value = minv
elif (
lower_value is None
or np.isnan(lower_value)
or lower_value < minv
or lower_value >= maxv
):
lower_value = minv
limit_changed = True
if upper_value == "*":
upper_value = maxv
elif (
upper_value is None
or np.isnan(upper_value)
or upper_value > maxv
or upper_value <= minv
or upper_value <= lower_value
):
upper_value = maxv
limit_changed = True
self._set_min_out(lower_value)
self._set_max_out(upper_value)
# input_slot.update(run_number)
if not input_slot.has_buffered() and not limit_changed:
return self._return_run_step(self.state_blocked, steps_run=0)
# ...
steps = 0
deleted: Optional[bitmap] = None
if input_slot.deleted.any():
deleted = input_slot.deleted.next(length=step_size, as_slice=False)
steps += indices_len(deleted)
created: Optional[bitmap] = None
if input_slot.created.any():
created = input_slot.created.next(length=step_size, as_slice=False)
steps += indices_len(created)
updated: Optional[bitmap] = None
if input_slot.updated.any():
updated = input_slot.updated.next(length=step_size, as_slice=False)
steps += indices_len(updated)
input_table = input_slot.data()
if self.result is None:
self.result = TableSelectedView(input_table, bitmap([]))
assert self._impl
hist_slot = self.get_input_slot("hist")
hist_slot.clear_buffers()
if not self._impl.is_started:
self._impl.start(
input_table,
cast(HistogramIndex, hist_slot.output_module),
lower_value,
upper_value,
limit_changed,
created=created,
updated=updated,
deleted=deleted,
)
else:
self._impl.resume(
cast(HistogramIndex, hist_slot.output_module),
lower_value,
upper_value,
limit_changed,
created=created,
updated=updated,
deleted=deleted,
)
assert self._impl.result
self.selected.selection = self._impl.result._values
return self._return_run_step(self.next_state(input_slot), steps)
| {
"content_hash": "691b619c7ab35fb5aabdeefaa39a7760",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 87,
"avg_line_length": 35.68181818181818,
"alnum_prop": 0.5668040464593481,
"repo_name": "jdfekete/progressivis",
"id": "23550942289a2395eaaa486953a05c9838345539",
"size": "13345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "progressivis/table/range_query.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C++",
"bytes": "13874"
},
{
"name": "CSS",
"bytes": "20470"
},
{
"name": "Cython",
"bytes": "1747"
},
{
"name": "HTML",
"bytes": "34701"
},
{
"name": "JavaScript",
"bytes": "305156"
},
{
"name": "Jupyter Notebook",
"bytes": "277333"
},
{
"name": "Python",
"bytes": "1812925"
},
{
"name": "Shell",
"bytes": "905"
}
],
"symlink_target": ""
} |
from ioant_mysqlhelper.db import db
from ioant import utils
import sys
import logging
logger = logging.getLogger(__name__)
def main( configFile, schemaFile ):
configuration = utils.fetch_json_file_as_dict( configFile )
schema = utils.fetch_json_file_as_dict( schemaFile )
db_host = configuration['mysqlDatabase']['host']
db_user = configuration['mysqlDatabase']['user']
db_password = configuration['mysqlDatabase']['password']
db_name = configuration['mysqlDatabase']['name']
global db_helper
db_helper = db.DatabaseHelper(db_name,
schema,
db_host,
db_user,
db_password)
if db_helper.connect_to_mysql_database():
logger.info("Connected to database")
db_helper.create_database_tables()
if __name__ == "__main__":
args = sys.argv
if ( len(args) != 3 ):
print("Invalid arguments.");
print("usage: createtables.py <configfile> <schemafile>");
sys.exit( 1 );
main( args[1], args[2] )
| {
"content_hash": "ec7e52e1157dcd4ee2963a3133b8cbdd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 31.6,
"alnum_prop": 0.581374321880651,
"repo_name": "ioants/ioant",
"id": "221818ee6fd694c324f1aab48e99b4af5e1c00d1",
"size": "1344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "storage/collector-python/createtables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "435"
},
{
"name": "C",
"bytes": "18467"
},
{
"name": "C++",
"bytes": "122584"
},
{
"name": "CSS",
"bytes": "20261"
},
{
"name": "HTML",
"bytes": "7881"
},
{
"name": "JavaScript",
"bytes": "426649"
},
{
"name": "Makefile",
"bytes": "78"
},
{
"name": "PHP",
"bytes": "8160"
},
{
"name": "Protocol Buffer",
"bytes": "131289"
},
{
"name": "Python",
"bytes": "99623"
},
{
"name": "Shell",
"bytes": "907"
}
],
"symlink_target": ""
} |
"""Tests for the RC4 decrypter object."""
import unittest
from dfvfs.encryption import rc4_decrypter
from tests.encryption import test_lib
class RC4DecrypterTestCase(test_lib.DecrypterTestCase):
"""Tests for the RC4 decrypter object."""
def testInitialize(self):
"""Tests the __init__ method."""
decrypter = rc4_decrypter.RC4Decrypter(key=b'test1')
self.assertIsNotNone(decrypter)
with self.assertRaises(ValueError):
rc4_decrypter.RC4Decrypter()
def testDecrypt(self):
"""Tests the Decrypt method."""
decrypter = rc4_decrypter.RC4Decrypter(key=b'test1')
decrypted_data, _ = decrypter.Decrypt(b'\xef6\xcd\x14\xfe\xf5+y')
expected_decrypted_data = b'\x01\x02\x03\x04\x05\x06\x07\x08'
self.assertEqual(decrypted_data, expected_decrypted_data)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "40bfd391bcf76221d23255f220f15a48",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 27.225806451612904,
"alnum_prop": 0.7085308056872038,
"repo_name": "joachimmetz/dfvfs",
"id": "e5f1f0c3cfb8bb7841a4a0f4b8efc665075bdd04",
"size": "890",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/encryption/rc4_decrypter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
import argparse
import calendar
import json
import os
import sys
import time
import uuid
gcp_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
_PROJECT_ID='grpc-testing'
def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
with open(result_file, 'r') as f:
(col1, col2, col3) = f.read().split(',')
latency50 = float(col1.strip()) * 1000
latency90 = float(col2.strip()) * 1000
latency99 = float(col3.strip()) * 1000
scenario_result = {
'scenario': {
'name': 'netperf_tcp_rr'
},
'summary': {
'latency50': latency50,
'latency90': latency90,
'latency99': latency99
}
}
bq = big_query_utils.create_big_query()
_create_results_table(bq, dataset_id, table_id)
if not _insert_result(bq, dataset_id, table_id, scenario_result, flatten=False):
print 'Error uploading result to bigquery.'
sys.exit(1)
def _upload_scenario_result_to_bigquery(dataset_id, table_id, result_file):
with open(result_file, 'r') as f:
scenario_result = json.loads(f.read())
bq = big_query_utils.create_big_query()
_create_results_table(bq, dataset_id, table_id)
if not _insert_result(bq, dataset_id, table_id, scenario_result):
print 'Error uploading result to bigquery.'
sys.exit(1)
def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
if flatten:
_flatten_result_inplace(scenario_result)
_populate_metadata_inplace(scenario_result)
row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
return big_query_utils.insert_rows(bq,
_PROJECT_ID,
dataset_id,
table_id,
[row])
def _create_results_table(bq, dataset_id, table_id):
with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
table_schema = json.loads(f.read())
desc = 'Results of performance benchmarks.'
return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id,
table_id, table_schema, desc)
def _flatten_result_inplace(scenario_result):
"""Bigquery is not really great for handling deeply nested data
and repeated fields. To maintain values of some fields while keeping
the schema relatively simple, we artificially leave some of the fields
as JSON strings.
"""
scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies'])
scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
def _populate_metadata_inplace(scenario_result):
"""Populates metadata based on environment variables set by Jenkins."""
# NOTE: Grabbing the Jenkins environment variables will only work if the
# driver is running locally on the same machine where Jenkins has started
# the job. For our setup, this is currently the case, so just assume that.
build_number = os.getenv('BUILD_NUMBER')
build_url = os.getenv('BUILD_URL')
job_name = os.getenv('JOB_NAME')
git_commit = os.getenv('GIT_COMMIT')
# actual commit is the actual head of PR that is getting tested
git_actual_commit = os.getenv('ghprbActualCommit')
utc_timestamp = str(calendar.timegm(time.gmtime()))
metadata = {'created': utc_timestamp}
if build_number:
metadata['buildNumber'] = build_number
if build_url:
metadata['buildUrl'] = build_url
if job_name:
metadata['jobName'] = job_name
if git_commit:
metadata['gitCommit'] = git_commit
if git_actual_commit:
metadata['gitActualCommit'] = git_actual_commit
scenario_result['metadata'] = metadata
argp = argparse.ArgumentParser(description='Upload result to big query.')
argp.add_argument('--bq_result_table', required=True, default=None, type=str,
help='Bigquery "dataset.table" to upload results to.')
argp.add_argument('--file_to_upload', default='scenario_result.json', type=str,
help='Report file to upload.')
argp.add_argument('--file_format',
choices=['scenario_result','netperf_latency_csv'],
default='scenario_result',
help='Format of the file to upload.')
args = argp.parse_args()
dataset_id, table_id = args.bq_result_table.split('.', 2)
if args.file_format == 'netperf_latency_csv':
_upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, args.file_to_upload)
else:
_upload_scenario_result_to_bigquery(dataset_id, table_id, args.file_to_upload)
print 'Successfully uploaded %s to BigQuery.\n' % args.file_to_upload
| {
"content_hash": "2167eeaf428ffb65af7112c50563b2e3",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 103,
"avg_line_length": 36.864285714285714,
"alnum_prop": 0.6688626235225732,
"repo_name": "andrewpollock/grpc",
"id": "2a99499843adccc35bf50a33a018eb717fe76c12",
"size": "6773",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "tools/run_tests/performance/bq_upload_result.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27784"
},
{
"name": "C",
"bytes": "5899875"
},
{
"name": "C#",
"bytes": "1323346"
},
{
"name": "C++",
"bytes": "1974130"
},
{
"name": "CMake",
"bytes": "65901"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "338059"
},
{
"name": "M4",
"bytes": "37400"
},
{
"name": "Makefile",
"bytes": "683411"
},
{
"name": "Objective-C",
"bytes": "289415"
},
{
"name": "PHP",
"bytes": "149989"
},
{
"name": "Protocol Buffer",
"bytes": "104798"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1151370"
},
{
"name": "Ruby",
"bytes": "582230"
},
{
"name": "Shell",
"bytes": "55789"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.option.arg_splitter import GLOBAL_SCOPE
class OptionsError(Exception):
"""An options system-related error."""
pass
class RegistrationError(OptionsError):
"""An error at option registration time."""
def __init__(self, msg, scope, option):
super(RegistrationError, self).__init__(
'{} [option {} in {}].'.format(msg, option,
'global scope' if scope == GLOBAL_SCOPE else 'scope {}'.format(scope)))
class ParseError(OptionsError):
"""An error at flag parsing time."""
pass
# Subclasses of RegistrationError. The distinction between them is useful mainly for testing
# that the error we get is the one we expect.
# TODO: Similar thing for ParseError.
def mk_registration_error(msg):
class Anon(RegistrationError):
def __init__(self, scope, option, **msg_format_args):
super(Anon, self).__init__(msg.format(**msg_format_args), scope, option)
return Anon
BooleanOptionImplicitVal = mk_registration_error('Boolean option cannot specify an implicit value.')
BooleanOptionNameWithNo = mk_registration_error('Boolean option names cannot start with --no.')
BooleanOptionType = mk_registration_error('Boolean option cannot specify a type.')
FrozenRegistration = mk_registration_error('Cannot register an option on a scope after registering '
'on any of its inner scopes.')
ImplicitValIsNone = mk_registration_error('Implicit value cannot be None.')
InvalidAction = mk_registration_error('Invalid action {action}.')
InvalidKwarg = mk_registration_error('Invalid registration kwarg {kwarg}.')
NoOptionNames = mk_registration_error('No option names provided.')
OptionNameDash = mk_registration_error('Option name must begin with a dash.')
OptionNameDoubleDash = mk_registration_error('Long option name must begin with a double-dash.')
RecursiveSubsystemOption = mk_registration_error("Subsystem option cannot specify 'recursive'. "
"Subsystem options are always recursive.")
Shadowing = mk_registration_error('Option shadows an option in scope {outer_scope}')
| {
"content_hash": "ed1b7787563fcdcf58f54c0ff31fa46b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 100,
"avg_line_length": 45.97959183673469,
"alnum_prop": 0.7141588992454505,
"repo_name": "dturner-tw/pants",
"id": "59c703fc674aede0941b38ffcbe4abc2bdf296a2",
"size": "2400",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/option/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11538"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1849"
},
{
"name": "HTML",
"bytes": "70358"
},
{
"name": "Java",
"bytes": "293253"
},
{
"name": "JavaScript",
"bytes": "31042"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4404984"
},
{
"name": "Scala",
"bytes": "85217"
},
{
"name": "Shell",
"bytes": "50774"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
} |
import pandas as pd
df = pd.read_csv("train_ridership_one_month.csv")
df.head()
# In[64]:
# Below code removes unwanted passenger activity codes for parking
valid_codes = [1, 9, 10, 11, 12]
df = df[df.use_type != 13 ]
df = df[df.use_type != 14 ]
clean_df = df[df.use_type != 15 ]
cols = ["transit_day", "transit_time", "station_id", "use_type", "serial_number"]
clean_df = clean_df[cols]
# In[13]:
#seperates entries and exits into two tables
entry_values = [1, 9, 12]
exit_values = [10, 11]
entry_df = clean_df.loc[df['use_type'].isin(entry_values)]
entry_df.rename(columns = {'transit_time':'entry_time'}, inplace = True)
exit_df = clean_df.loc[df['use_type'].isin(exit_values)]
exit_df.rename(columns = {'transit_time':'exit_time'}, inplace = True)
entry_df.head()
# In[60]:
#creates table of entries by day and station and then exports to csv
entry_df = entry_df.drop(['entry_time','use_type'],axis=1)
station_entry = entry_df.groupby(['station_id', 'transit_day']).count()
station_entry.rename(columns = {'serial_number':'traveler_count'}, inplace = True)
station_entry.to_csv('station_entry.csv')
# In[61]:
#creates table of exits by day and station and then exports to csv
exit_df = exit_df.drop(['exit_time','use_type'],axis=1)
station_exit = exit_df.groupby(['station_id', 'transit_day']).count()
station_exit.rename(columns = {'serial_number':'traveler_count'}, inplace = True)
station_exit.to_csv('station_exit.csv')
# In[ ]:
| {
"content_hash": "fa59050353b12fb3ad0fb68c5e4695d9",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 82,
"avg_line_length": 27,
"alnum_prop": 0.6803840877914952,
"repo_name": "loren138/martaOptimize",
"id": "bf160a6c342fc58c7a9df775ccbe1e53a785c716",
"size": "1466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "553"
},
{
"name": "HTML",
"bytes": "15738"
},
{
"name": "JavaScript",
"bytes": "1132"
},
{
"name": "PHP",
"bytes": "93323"
},
{
"name": "Python",
"bytes": "4248"
},
{
"name": "Vue",
"bytes": "563"
}
],
"symlink_target": ""
} |
"""
__ParIndexed.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sat Aug 30 18:23:40 2014
____________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_ParIndexed import *
class ParIndexed(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['Proc', 'MetaModelElement_T']
self.graphClass_ = graph_ParIndexed
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.generatedAttributes = {'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ) }
self.realOrder = ['cardinality','cardinality','classtype','classtype','name','name']
self.directEditing = [1,1,1,1,1,1]
def clone(self):
cloneObject = ParIndexed( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
| {
"content_hash": "a5d3b9dc611b5fa35e71671bd0c6d381",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 90,
"avg_line_length": 37.13978494623656,
"alnum_prop": 0.6048060220034742,
"repo_name": "levilucio/SyVOLT",
"id": "3737e03509d8d6775452f2c8c70c39a0ac181660",
"size": "3454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/ParIndexed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import sys
import pymongo
def connect(uri):
client = pymongo.MongoClient(uri)
db = client.get_default_database()
return db
def insert(db, documentName, dataArray):
document = db[documentName]
val = document.insert(dataArray)
if (val):
print("Successed!")
else:
print("Something went wrong.")
def read(db):
try:
data_column = db.data1.find()
for d in data_column:
print d
except Exception, e:
print str(e) | {
"content_hash": "1a8b9dbe589e9fd39800d8424dc07548",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 40,
"avg_line_length": 19.153846153846153,
"alnum_prop": 0.606425702811245,
"repo_name": "hasa93/DroneMap",
"id": "92503d8824548e2a3fc1208cc6828bf10afc73ba",
"size": "498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "db_connection/db_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "952738"
},
{
"name": "HTML",
"bytes": "3151277"
},
{
"name": "JavaScript",
"bytes": "3996758"
},
{
"name": "PHP",
"bytes": "4192"
},
{
"name": "Python",
"bytes": "16215"
}
],
"symlink_target": ""
} |
"""Accuracy tests for ICRS transformations, primarily to/from AltAz.
"""
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.coordinates import (
EarthLocation, ICRS, CIRS, AltAz, HADec, SkyCoord)
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates import frame_transform_graph
def test_icrs_altaz_consistency():
"""
Check ICRS<->AltAz for consistency with ICRS<->CIRS<->AltAz
The latter is extensively tested in test_intermediate_transformations.py
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.km * 1e5
icoo = SkyCoord(ra=usph.lon, dec=usph.lat, distance=dist)
observer = EarthLocation(28*u.deg, 23*u.deg, height=2000.*u.km)
obstime = Time('J2010')
aa_frame = AltAz(obstime=obstime, location=observer)
# check we are going direct!
trans = frame_transform_graph.get_transform(ICRS, AltAz).transforms
assert(len(trans) == 1)
# check that ICRS-AltAz and ICRS->CIRS->AltAz are consistent
aa1 = icoo.transform_to(aa_frame)
aa2 = icoo.transform_to(CIRS()).transform_to(aa_frame)
assert_allclose(aa1.separation_3d(aa2), 0*u.mm, atol=1*u.mm)
# check roundtrip
roundtrip = icoo.transform_to(aa_frame).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0*u.mm, atol=1*u.mm)
# check there and back via CIRS mish-mash
roundtrip = icoo.transform_to(aa_frame).transform_to(
CIRS()).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0*u.mm, atol=1*u.mm)
def test_icrs_hadec_consistency():
"""
Check ICRS<->HADec for consistency with ICRS<->CIRS<->HADec
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.km * 1e5
icoo = SkyCoord(ra=usph.lon, dec=usph.lat, distance=dist)
observer = EarthLocation(28*u.deg, 23*u.deg, height=2000.*u.km)
obstime = Time('J2010')
hd_frame = HADec(obstime=obstime, location=observer)
# check we are going direct!
trans = frame_transform_graph.get_transform(ICRS, HADec).transforms
assert(len(trans) == 1)
# check that ICRS-HADec and ICRS->CIRS->HADec are consistent
aa1 = icoo.transform_to(hd_frame)
aa2 = icoo.transform_to(CIRS()).transform_to(hd_frame)
assert_allclose(aa1.separation_3d(aa2), 0*u.mm, atol=1*u.mm)
# check roundtrip
roundtrip = icoo.transform_to(hd_frame).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0*u.mm, atol=1*u.mm)
# check there and back via CIRS mish-mash
roundtrip = icoo.transform_to(hd_frame).transform_to(
CIRS()).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0*u.mm, atol=1*u.mm)
| {
"content_hash": "d33ba57a01c47f403b38be70c366211f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 76,
"avg_line_length": 36.77922077922078,
"alnum_prop": 0.695268361581921,
"repo_name": "aleksandr-bakanov/astropy",
"id": "23150b7ca6fba39a983666ff8f89113a6eba4fa4",
"size": "2896",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "astropy/coordinates/tests/test_icrs_observed_transformations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898093"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
"""autogenerated by genpy from aruco/marker_info.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class marker_info(genpy.Message):
_md5sum = "e5991c0caab369f6cfa37dfa3bc945ad"
_type = "aruco/marker_info"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int8 numberOfMarkers
int8[] id
float32[] area
float32[] perimeter
float32[] centerX
float32[] centerY
float32[] rotX
float32[] rotY
float32[] rotZ
float32[] distance
float32[] trsX
float32[] trsY
"""
__slots__ = ['numberOfMarkers','id','area','perimeter','centerX','centerY','rotX','rotY','rotZ','distance','trsX','trsY']
_slot_types = ['int8','int8[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32[]','float32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
numberOfMarkers,id,area,perimeter,centerX,centerY,rotX,rotY,rotZ,distance,trsX,trsY
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(marker_info, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.numberOfMarkers is None:
self.numberOfMarkers = 0
if self.id is None:
self.id = []
if self.area is None:
self.area = []
if self.perimeter is None:
self.perimeter = []
if self.centerX is None:
self.centerX = []
if self.centerY is None:
self.centerY = []
if self.rotX is None:
self.rotX = []
if self.rotY is None:
self.rotY = []
if self.rotZ is None:
self.rotZ = []
if self.distance is None:
self.distance = []
if self.trsX is None:
self.trsX = []
if self.trsY is None:
self.trsY = []
else:
self.numberOfMarkers = 0
self.id = []
self.area = []
self.perimeter = []
self.centerX = []
self.centerY = []
self.rotX = []
self.rotY = []
self.rotZ = []
self.distance = []
self.trsX = []
self.trsY = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_b.pack(self.numberOfMarkers))
length = len(self.id)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(struct.pack(pattern, *self.id))
length = len(self.area)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.area))
length = len(self.perimeter)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.perimeter))
length = len(self.centerX)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.centerX))
length = len(self.centerY)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.centerY))
length = len(self.rotX)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.rotX))
length = len(self.rotY)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.rotY))
length = len(self.rotZ)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.rotZ))
length = len(self.distance)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.distance))
length = len(self.trsX)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.trsX))
length = len(self.trsY)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(struct.pack(pattern, *self.trsY))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.numberOfMarkers,) = _struct_b.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.id = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.area = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.perimeter = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.centerX = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.centerY = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.rotX = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.rotY = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.rotZ = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.distance = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.trsX = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.trsY = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_b.pack(self.numberOfMarkers))
length = len(self.id)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(self.id.tostring())
length = len(self.area)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.area.tostring())
length = len(self.perimeter)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.perimeter.tostring())
length = len(self.centerX)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.centerX.tostring())
length = len(self.centerY)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.centerY.tostring())
length = len(self.rotX)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.rotX.tostring())
length = len(self.rotY)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.rotY.tostring())
length = len(self.rotZ)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.rotZ.tostring())
length = len(self.distance)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.distance.tostring())
length = len(self.trsX)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.trsX.tostring())
length = len(self.trsY)
buff.write(_struct_I.pack(length))
pattern = '<%sf'%length
buff.write(self.trsY.tostring())
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.numberOfMarkers,) = _struct_b.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.id = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.area = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.perimeter = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.centerX = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.centerY = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.rotX = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.rotY = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.rotZ = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.distance = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.trsX = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sf'%length
start = end
end += struct.calcsize(pattern)
self.trsY = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_b = struct.Struct("<b")
| {
"content_hash": "c0e5e9658a5d89bcc412c7368a7f18bb",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 153,
"avg_line_length": 33.55613577023499,
"alnum_prop": 0.6022408963585434,
"repo_name": "miguelolivaresmendez/ArUco_ROS-QR-code-detection",
"id": "29d7020c85fab897bfdbac97af7ef8e645239d30",
"size": "12852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/aruco/msg/_marker_info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2013"
},
{
"name": "C++",
"bytes": "191833"
},
{
"name": "Common Lisp",
"bytes": "27107"
},
{
"name": "Python",
"bytes": "12927"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_sindra_lintikoor_q2_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_rori_n","sindra_lintikoor_q2_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "5dcf12ff75824441279f5f057df63c54",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.7105263157894737,
"repo_name": "obi-two/Rebelion",
"id": "b3ddec384c790683510548cc55199e92bef5cc38",
"size": "487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_sindra_lintikoor_q2_needed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import logging
import queue
import time
logger = logging.getLogger()
def process_queue(q, pool, func):
""" Process a priority queue based on time
The priority within the queue is the time at which the item can execute.
Continually poll the queue, popping an item only when the current time
is greater than the time at which the item is permitted to execute. The
queue is designed in this manner in order to deal with API rate limiting.
Parameters
----------
q : an instance of a priority queue
pool : a thread pool
func : the function to be executed by a thread in the threadpool
Returns
-------
null
"""
# import global
from fc import (QUEUE_TIMEOUT,
TEST_FLAG)
# get first item in the queue
priority, count, id_val, dt, email = q.get()
# loop through until the queue is empty for __ seconds
while True:
diff = priority - time.time()
# if we have crossed the timing threshold
if diff <= 0:
logger.info(('Submit | email: {_email} id: {_id}'
' | submit {_email} for execution')
.format(_email=email, _id=id_val))
if TEST_FLAG:
# submit to print_email - useful for testing
a = pool.submit(func, email)
else:
# submit to process_one_email
a = pool.submit(func, q, count, id_val, dt, email)
try:
priority, count, id_val, dt, \
email = q.get(timeout=QUEUE_TIMEOUT)
except queue.Empty:
break
# sliding scale for sleeping
# based on an idea from the pause package
# https://pypi.python.org/pypi/pause
if diff <= 0.1:
time.sleep(0.001)
elif diff <= 0.5:
time.sleep(0.01)
elif diff <= 1.5:
time.sleep(0.1)
else:
time.sleep(1)
| {
"content_hash": "2fea66f373d3bf387d70daaa8470b5be",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 29.70149253731343,
"alnum_prop": 0.550251256281407,
"repo_name": "curtisalexander/fc",
"id": "a4706b6c7dbd302890d095803d3e8e91de57dc64",
"size": "2009",
"binary": false,
"copies": "1",
"ref": "refs/heads/public",
"path": "fc/scheduler.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32372"
}
],
"symlink_target": ""
} |
import sys
#variables
new_input = sys.argv[1]
counter = 0
#initialize for loop
for i in new_input:
if counter % 2:
fun = int(new_input[counter]) * new_input[counter - 1]
sys.stdout.write(fun)
counter += 1
print("")
| {
"content_hash": "2eeee609377b70030d3f2d4d000a7893",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 17.153846153846153,
"alnum_prop": 0.672645739910314,
"repo_name": "Ca2Patton/PythonStuff",
"id": "f9243ae637c914b0933c220a5173d9e3a7afb4c8",
"size": "241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letters2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30997"
}
],
"symlink_target": ""
} |
import os
import dj_database_url
here = lambda * x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
PROJECT_ROOT = here("..")
root = lambda * x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Kim Desrosiers', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': dj_database_url.config()
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = root("..", "static")
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ha^htba4__ebr7+^^jg%=+b8--i%_mdj9y*8hia*5x21is9*1$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wsgi.application'
TEMPLATE_DIRS = (
root("templates"),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
'allauth',
'allauth.account',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'rest_framework_swagger',
'corsheaders',
)
LOCAL_APPS = (
'recipes',
'menus',
'users',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
CORS_ORIGIN_ALLOW_ALL = True
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "00c43c6d44d4bf71b84fb2b77b6d5e7b",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 88,
"avg_line_length": 30.69148936170213,
"alnum_prop": 0.6960138648180243,
"repo_name": "kimond/miamm",
"id": "0fc4ecf8654f437f197b0b944ac7739541447d57",
"size": "5813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miamm/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2712"
},
{
"name": "Python",
"bytes": "44200"
}
],
"symlink_target": ""
} |
import base64
import hashlib
import re
import urllib2
from PyQt4 import QtCore
from urlparse import urlparse
class MessageRenderer(QtCore.QThread):
MESSAGES = {
"alert": u'<div class="alert"><span class="time">[{time}]</span> <span class="author">{user}</span>: {message}</div>',
"image": u'<span id="{url_md5}" class="upload image"><a href="{url}"><img src="data:image/{type};base64,{data}" title="{name}" {attribs} /></a></span><input type="button" onClick=\'{js}\' value="Toggle Image" />',
"image_url": u'<span id="{url_md5}" class="upload image"><a href="{url}"><img src="{url}" title="{name}" {attribs} /></a></span><input id="hide" type=button onClick=\'{js}\' value="Toggle Image" />',
"join": u'<div class="joined">--> {user} joined {room}</div>',
"leave": u'<div class="left"><-- {user} has left {room}</div>',
"message_self": u'<div class="message"><span class="time">[{time}]</span> <span class="author self">{user}</span>: {message}</div>',
"no_time_message_self": u'<div class="message"><span class="author self">{user}</span>: {message}</div>',
"message": u'<div class="message"><span class="time">[{time}]</span> <span class="author">{user}</span>: {message}</div>',
"no_time_message": u'<div class="message"><span class="author">{user}</span>: {message}</div>',
"paste": u'<div class="paste"><pre>{message}</pre></div>',
"upload": u'<span class="upload"><a href="{url}">{name}</a></span>',
"link": u'<a href="{url}">{name}</a>',
"topic": u'<div class="topic">{user} changed topic to <span class="new_topic">{topic}</span></div>',
"tweet": u'<div class="tweet"><a href="{url_user}">{user}</a> <a href="{url}">tweeted</a>: {message}</div>'
}
def __init__(self, apiToken, maximumImageWidth, room, message, live=True, updateRoom=True, showTimestamps=True, alert=False, alertIsDirectPing=False, parent=None):
super(MessageRenderer, self).__init__(parent)
self._apiToken = apiToken
self._maximumImageWidth = maximumImageWidth
self._room = room
self._message = message
self._live = live
self._updateRoom = updateRoom
self._showTimestamps = showTimestamps
self._alert = alert
self._alertIsDirectPing = alertIsDirectPing
def run(self):
html = self.render()
self.emit(QtCore.SIGNAL("render(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)"), html, self._room, self._message, self._live, self._updateRoom, self._alert, self._alertIsDirectPing)
def needsThread(self):
return self._message.is_upload() or (self._message.body and self._isInlineLink(self._message.body))
def render(self):
html = None
if self._message.is_joining():
html = self.MESSAGES["join"].format(user=self._message.user.name, room=self._room.name)
elif (self._message.is_leaving() or self._message.is_kick()):
html = self.MESSAGES["leave"].format(user=self._message.user.name, room=self._room.name)
elif self._message.is_text() or self._message.is_upload():
if self._message.body:
body = self._plainTextToHTML(self._message.tweet["tweet"] if self._message.is_tweet() else self._message.body)
if self._message.is_tweet():
body = self.MESSAGES["tweet"].format(
url_user = "http://twitter.com/{user}".format(user=self._message.tweet["user"]),
user = self._message.tweet["user"],
url = self._message.tweet["url"],
message = body
)
elif self._message.is_paste():
body = self.MESSAGES["paste"].format(message=body)
elif self._message.is_upload():
body = self._displayUpload()
elif self._isInlineLink(body):
body = self._displayInline(body)
else:
body = self._autoLink(body)
created = QtCore.QDateTime(
self._message.created_at.year,
self._message.created_at.month,
self._message.created_at.day,
self._message.created_at.hour,
self._message.created_at.minute,
self._message.created_at.second
)
created.setTimeSpec(QtCore.Qt.UTC)
createdFormat = "h:mm ap"
if created.daysTo(QtCore.QDateTime.currentDateTime()):
createdFormat = "MMM d, {createdFormat}".format(createdFormat=createdFormat)
key = "message"
if self._message.is_by_current_user():
if self._showTimestamps:
key = "message_self"
else:
key = "no_time_message_self"
elif self._alert:
key = "alert"
elif not self._showTimestamps:
key = "no_time_message"
html = self.MESSAGES[key].format(
time = created.toLocalTime().toString(createdFormat),
user = self._message.user.name,
message = body
)
elif self._message.is_topic_change():
html = self.MESSAGES["topic"].format(user=self._message.user.name, topic=self._message.body)
return unicode(html)
def _displayInline(self, message_url):
request = urllib2.Request(message_url)
try:
response = urllib2.urlopen(request)
except:
return self._renderInlineLink(message_url, message_url)
headers = response.info()
url = message_url
if response.getcode == '200':
url = response.geturl()
meta = {
'name': url,
'type': headers["Content-Type"]
}
return self._renderInline(url=url, meta=meta)
def _displayUpload(self):
request = urllib2.Request(self._message.upload['url'])
auth_header = base64.encodestring('{}:{}'.format(self._apiToken, 'X')).replace('\n', '')
request.add_header("Authorization", "Basic {}".format(auth_header))
try:
response = urllib2.urlopen(request)
except:
return self._renderInlineLink(self._message.upload['url'], self._message.upload['name'])
data = response.read()
meta = {
'name': self._message.upload['name'],
'type': self._message.upload['content_type'],
}
return self._renderInline(url=self._message.upload['url'], data=data, meta=meta)
def _renderInline(self, url=None, data=None, meta=None):
if not url and not data:
raise Exception("Missing image data")
if self._isImage(meta["type"], meta["name"]):
attribs = "style=\"max-width: {maxWidth}px;\" ".format(maxWidth=self._maximumImageWidth)
if data:
url_md5 = hashlib.md5(url).hexdigest()
return self.MESSAGES["image"].format(
type = meta["type"],
data = base64.encodestring(data),
url = url,
url_md5 = url_md5,
name = meta["name"],
js = 'if (document.getElementById("'+url_md5+'").style.visibility == "hidden") { document.getElementById("'+url_md5+'").style.visibility="visible"} else {document.getElementById("'+url_md5+'").style.visibility="hidden"}',
attribs = attribs
)
else:
url_md5 = hashlib.md5(url).hexdigest()
return self.MESSAGES["image_url"].format(
url = url,
name = meta["name"],
url_md5 = url_md5,
js = 'if (document.getElementById("'+url_md5+'").style.visibility == "hidden") { document.getElementById("'+url_md5+'").style.visibility="visible"} else {document.getElementById("'+url_md5+'").style.visibility="hidden"}',
attribs = attribs
)
return self._renderInlineLink(url, meta["name"])
def _renderInlineLink(self, url, name):
return self.MESSAGES["link"].format(url = url, name = name)
def _isImage(self, content_type, name):
if content_type.startswith("image/"):
return True
elif content_type == "application/octet-stream" and re.search(".(gif|jpg|jpeg|png)$", name, re.IGNORECASE):
return True
return False
def _plainTextToHTML(self, string):
return string.replace("<", "<").replace(">", ">").replace("\n", "<br />")
def _autoLink(self, string):
urlre = re.compile("(\(?https?://[-A-Za-z0-9+&@#/%?=~_()|!:,.;]*[-A-Za-z0-9+&@#/%=~_()|])(\">|</a>)?")
urls = urlre.findall(string)
cleanUrls = []
for url in urls:
if url[1]:
continue
currentUrl = url[0]
if currentUrl[0] == '(' and currentUrl[-1] == ')':
currentUrl = currentUrl[1:-1]
if currentUrl in cleanUrls:
continue
cleanUrls.append(currentUrl)
string = re.sub("(?<!(=\"|\">))" + re.escape(currentUrl),
"<a href=\"" + currentUrl + "\">" + currentUrl + "</a>",
string)
return string
def _isInlineLink(self, string):
try:
url = urlparse(string)
if url.scheme is not '' and url.netloc is not '':
return True
except:
pass
return False
| {
"content_hash": "d596abb51f9689c08669c69ab5ec5959",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 248,
"avg_line_length": 44.38990825688074,
"alnum_prop": 0.5489304536529916,
"repo_name": "mariano/snakefire",
"id": "6f0918d095cdb7def616ba2fd20d0b14cae4d327",
"size": "9677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snakefire/renderers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "339434"
},
{
"name": "Shell",
"bytes": "871"
}
],
"symlink_target": ""
} |
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a byte string using Base64.
s is the byte string to encode. Optional altchars must be a byte
string of length 2 which specifies an alternative alphabet for the
'+' and '/' characters. This allows an application to
e.g. generate url or filesystem safe Base64 strings.
The encoded byte string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode a Base64 encoded byte string.
s is the byte string to decode. Optional altchars must be a
string of length 2 which specifies the alternative alphabet used
instead of the '+' and '/' characters.
The decoded string is returned. A binascii.Error is raised if s is
incorrectly padded.
If validate is False (the default), non-base64-alphabet characters are
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode a byte string using the standard Base64 alphabet.
s is the byte string to encode. The encoded byte string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a byte string using a url-safe Base64 alphabet.
s is the byte string to encode. The encoded byte string is
returned. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode a byte string using Base32.
s is the byte string to encode. The encoded byte string is returned.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The decoded byte string is returned. binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if padchars:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
if padchars == 1:
decoded[-5:] = last[:-1]
elif padchars == 3:
decoded[-5:] = last[:-2]
elif padchars == 4:
decoded[-5:] = last[:-3]
elif padchars == 6:
decoded[-5:] = last[:-4]
else:
raise binascii.Error('Incorrect padding')
return bytes(decoded)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a byte string using Base16.
s is the byte string to encode. The encoded byte string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
The decoded byte string is returned. binascii.Error is raised if
s were incorrectly padded or if there are non-alphabet characters
present in the string.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
#
# Ascii85 encoding/decoding
#
_a85chars = None
_a85chars2 = None
_A85START = b"<~"
_A85END = b"~>"
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
# Helper function for a85encode and b85encode
if not isinstance(b, bytes_types):
b = memoryview(b).tobytes()
padding = (-len(b)) % 4
if padding:
b = b + b'\0' * padding
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
chunks = [b'z' if foldnuls and not word else
b'y' if foldspaces and word == 0x20202020 else
(chars2[word // 614125] +
chars2[word // 85 % 7225] +
chars[word % 85])
for word in words]
if padding and not pad:
if chunks[-1] == b'z':
chunks[-1] = chars[0] * 5
chunks[-1] = chunks[-1][:-padding]
return b''.join(chunks)
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
"""Encode a byte string using Ascii85.
b is the byte string to encode. The encoded byte string is returned.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline ('\\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long.
pad controls whether the input string is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
global _a85chars, _a85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _a85chars is None:
_a85chars = [bytes((i,)) for i in range(33, 118)]
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
if adobe:
result = _A85START + result
if wrapcol:
wrapcol = max(2 if adobe else 1, wrapcol)
chunks = [result[i: i + wrapcol]
for i in range(0, len(result), wrapcol)]
if adobe:
if len(chunks[-1]) + 2 > wrapcol:
chunks.append(b'')
result = b'\n'.join(chunks)
if adobe:
result += _A85END
return result
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
"""Decode an Ascii85 encoded byte string.
s is the byte string to decode.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
"""
b = _bytes_from_decode_data(b)
if adobe:
if not (b.startswith(_A85START) and b.endswith(_A85END)):
raise ValueError("Ascii85 encoded byte sequences must be bracketed "
"by {!r} and {!r}".format(_A85START, _A85END))
b = b[2:-2] # Strip off start/end markers
#
# We have to go through this stepwise, so as to ignore spaces and handle
# special short sequences
#
packI = struct.Struct('!I').pack
decoded = []
decoded_append = decoded.append
curr = []
curr_append = curr.append
curr_clear = curr.clear
for x in b + b'u' * 4:
if b'!'[0] <= x <= b'u'[0]:
curr_append(x)
if len(curr) == 5:
acc = 0
for x in curr:
acc = 85 * acc + (x - 33)
try:
decoded_append(packI(acc))
except struct.error:
raise ValueError('Ascii85 overflow') from None
curr_clear()
elif x == b'z'[0]:
if curr:
raise ValueError('z inside Ascii85 5-tuple')
decoded_append(b'\0\0\0\0')
elif foldspaces and x == b'y'[0]:
if curr:
raise ValueError('y inside Ascii85 5-tuple')
decoded_append(b'\x20\x20\x20\x20')
elif x in ignorechars:
# Skip whitespace
continue
else:
raise ValueError('Non-Ascii85 digit found: %c' % x)
result = b''.join(decoded)
padding = 4 - len(curr)
if padding:
# Throw away the extra padding
result = result[:-padding]
return result
# The following code is originally taken (with permission) from Mercurial
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars = None
_b85chars2 = None
_b85dec = None
def b85encode(b, pad=False):
"""Encode an ASCII-encoded byte array in base85 format.
If pad is true, the input is padded with "\\0" so its length is a multiple of
4 characters before encoding.
"""
global _b85chars, _b85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85chars is None:
_b85chars = [bytes((i,)) for i in _b85alphabet]
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
return _85encode(b, _b85chars, _b85chars2, pad)
def b85decode(b):
"""Decode base85-encoded byte array"""
global _b85dec
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85dec is None:
_b85dec = [None] * 256
for i, c in enumerate(_b85alphabet):
_b85dec[c] = i
b = _bytes_from_decode_data(b)
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in chunk:
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(chunk):
if _b85dec[c] is None:
raise ValueError('bad base85 character at position %d'
% (i + j)) from None
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i) from None
result = b''.join(out)
if padding:
result = result[:-padding]
return result
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def _input_type_check(s):
try:
m = memoryview(s)
except TypeError as err:
msg = "expected bytes-like object, not %s" % s.__class__.__name__
raise TypeError(msg) from err
if m.format not in ('c', 'b', 'B'):
msg = ("expected single byte elements, not %r from %s" %
(m.format, s.__class__.__name__))
raise TypeError(msg)
if m.ndim != 1:
msg = ("expected 1-D data, not %d-D data from %s" %
(m.ndim, s.__class__.__name__))
raise TypeError(msg)
def encodebytes(s):
"""Encode a bytestring into a bytestring containing multiple lines
of base-64 data."""
_input_type_check(s)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytestring."""
_input_type_check(s)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
| {
"content_hash": "2faf62f39dda2f45ba1008e536727ddd",
"timestamp": "",
"source": "github",
"line_count": 600,
"max_line_length": 84,
"avg_line_length": 33.57333333333333,
"alnum_prop": 0.6076747418586179,
"repo_name": "Kamik423/uni_plan",
"id": "eede5e8406b1ca60fbbde30a4346465dc188e581",
"size": "20167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plan/plan/lib64/python3.4/base64.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "55214"
},
{
"name": "CSS",
"bytes": "21814"
},
{
"name": "HTML",
"bytes": "4516"
},
{
"name": "JavaScript",
"bytes": "12528"
},
{
"name": "Makefile",
"bytes": "128416"
},
{
"name": "Python",
"bytes": "11543363"
},
{
"name": "Shell",
"bytes": "3586"
}
],
"symlink_target": ""
} |
import os
import pytest
import pushbullet
from binascii import a2b_base64
API_KEY = os.environ["PUSHBULLET_API_KEY"]
def test_decryption():
pb = pushbullet.Pushbullet(API_KEY, encryption_password="hunter2")
pb._encryption_key = a2b_base64("1sW28zp7CWv5TtGjlQpDHHG4Cbr9v36fG5o4f74LsKg=")
test_data = "MSfJxxY5YdjttlfUkCaKA57qU9SuCN8+ZhYg/xieI+lDnQ=="
decrypted = pb._decrypt_data(test_data)
assert decrypted == "meow!"
| {
"content_hash": "3ef750b695e6758dd578ed1eeb5a348b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 83,
"avg_line_length": 27.6875,
"alnum_prop": 0.7471783295711061,
"repo_name": "Saturn/pushbullet.py",
"id": "7221dcdcccef4b6acec15560657134c03682d92d",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_e2e.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33649"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mooc', '0007_lesson_order'),
]
operations = [
migrations.CreateModel(
name='Classification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.AddField(
model_name='course',
name='classification',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='classification', to='mooc.Classification'),
preserve_default=False,
),
]
| {
"content_hash": "2eb35640abea22055d96aab92cba3c67",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 149,
"avg_line_length": 31.296296296296298,
"alnum_prop": 0.6011834319526628,
"repo_name": "Zing22/Moogle",
"id": "294cd2383665770fa7c4aa3ae3638960ddd23330",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moogle/mooc/migrations/0008_auto_20161115_1552.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "778393"
},
{
"name": "HTML",
"bytes": "17299"
},
{
"name": "JavaScript",
"bytes": "785695"
},
{
"name": "Python",
"bytes": "22978"
}
],
"symlink_target": ""
} |
__author__ = "Simone Campagna"
__all__ = [
'RubikTestInputOutput',
]
import numpy as np
from rubik.cubes import api as cb
from rubik.shape import Shape
from ...rubik_test_case import RubikTestCase, testmethod
class RubikTestStats(RubikTestCase):
METHOD_NAMES = []
def impl_stats_random_file(self, shape, dtype, buffer_size):
dtype = cb.get_dtype(dtype)
shape = Shape(shape)
file_format = 'raw'
filename_format = "stats_random_{shape}_{dtype}.{format}"
filename = filename_format.format(shape=shape, dtype=dtype, format=file_format)
dmin = 3
for d in shape:
assert d >= dmin, "d={} < {}".format(d, dmin)
cube = cb.random_cube(shape=shape, dtype=dtype)
stats_info_cube = cb.stats_info(cube)
cube.tofile(filename)
self.assertFileExistsAndHasShape(filename, shape=shape, dtype=dtype)
stats_info_oc = cb.stats_file(filename, shape=shape, dtype=dtype, file_format=file_format,
out_of_core=False)
self.assertAlmostEqualStatsInfo(stats_info_oc, stats_info_cube)
stats_info_ooc = cb.stats_file(filename, shape=shape, dtype=dtype, file_format=file_format,
out_of_core=True, progress_frequency=-1.0, buffer_size=buffer_size)
self.assertAlmostEqualStatsInfo(stats_info_ooc, stats_info_cube)
self.assertEqual(stats_info_oc.report(), stats_info_ooc.report())
def impl_stats_const_file(self, shape, dtype, buffer_size):
dtype = cb.get_dtype(dtype)
shape = Shape(shape)
file_format = 'raw'
filename_format = "stats_const_{shape}_{dtype}.{format}"
filename = filename_format.format(shape=shape, dtype=dtype, format=file_format)
dmin = 3
for d in shape:
assert d >= dmin, "d={} < {}".format(d, dmin)
cube_max_index = tuple(0 for i in shape)
cube_min_index = tuple(1 for i in shape)
cube_zero_index = tuple(2 for i in shape)
cube_value = 1.0
cube_max = 10.0
cube_min = -23.0
cube_zero = 0.0
cube_sum = cube_max + cube_min + cube_value * (shape.count() - dmin)
cube_ave = cube_sum / float(shape.count())
cube = cb.const_cube(shape=shape, dtype=dtype, value=cube_value)
cube[cube_max_index] = cube_max
cube[cube_min_index] = cube_min
cube[cube_zero_index] = cube_zero
cube_count_zero = 1
cube_count_nonzero = shape.count() - cube_count_zero
cube_count_nan = 0
cube_count_inf = 0
stats_info_cube = cb.stats_info(cube)
self.assertEqual(stats_info_cube.cube_sum, cube_sum)
self.assertEqual(stats_info_cube.cube_ave, cube_ave)
self.assertEqual(stats_info_cube.cube_max, cube_max)
self.assertEqual(stats_info_cube.cube_min, cube_min)
self.assertEqual(stats_info_cube.cube_max_index, cube_max_index)
self.assertEqual(stats_info_cube.cube_min_index, cube_min_index)
self.assertEqual(stats_info_cube.cube_count_zero, cube_count_zero)
self.assertEqual(stats_info_cube.cube_count_nonzero, cube_count_nonzero)
self.assertEqual(stats_info_cube.cube_count_nan, cube_count_nan)
self.assertEqual(stats_info_cube.cube_count_inf, cube_count_inf)
cube.tofile(filename)
self.assertFileExistsAndHasShape(filename, shape=shape, dtype=dtype)
stats_info_oc = cb.stats_file(filename, shape=shape, dtype=dtype, file_format=file_format,
out_of_core=False)
self.assertEqual(stats_info_oc, stats_info_cube)
stats_info_ooc = cb.stats_file(filename, shape=shape, dtype=dtype, file_format=file_format,
out_of_core=True, progress_frequency=-1.0, buffer_size=buffer_size)
self.assertEqual(stats_info_ooc, stats_info_cube)
def get_buffer_size(self, shape, dtype, buffer_size=None, chunks=2):
if buffer_size is None:
buffer_size = int(shape.count() * dtype().itemsize / chunks)
return buffer_size
### tests
# random
# 4x4, float32, buffer_size=(total_size // 2)
@testmethod
def stats_random_file_4x4_float32_2chunks(self):
dtype = np.float32
shape = Shape("4x4")
self.impl_stats_random_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=2))
# 4x4, float64, buffer_size=(total_size // 3)
@testmethod
def stats_random_file_4x4_float64_2chunks(self):
dtype = np.float64
shape = Shape("4x4")
self.impl_stats_random_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=3))
# 12x8x19x5, float32, buffer_size=(total_size // 2)
@testmethod
def stats_random_file_12x8x19x5_float32_2chunks(self):
dtype = np.float32
shape = Shape("12x8x19x5")
self.impl_stats_random_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=2))
# 12x8x19x5, float64, buffer_size=(total_size // 3)
@testmethod
def stats_random_file_12x8x19x5_float64_3chunks(self):
dtype = np.float64
shape = Shape("12x8x19x5")
self.impl_stats_random_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=3))
# const
# 4x4, float32, buffer_size=(total_size // 2)
@testmethod
def stats_const_file_4x4_float32_2chunks(self):
dtype = np.float32
shape = Shape("4x4")
self.impl_stats_const_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=2))
# 4x4, float64, buffer_size=(total_size // 3)
@testmethod
def stats_const_file_4x4_float64_3chunks(self):
dtype = np.float64
shape = Shape("4x4")
self.impl_stats_const_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=3))
# 12x8x19x5, float32, buffer_size=(total_size // 2)
@testmethod
def stats_const_file_12x8x19x5_float32_2chunks(self):
dtype = np.float32
shape = Shape("12x8x19x5")
self.impl_stats_const_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=2))
# 12x8x19x5, float64, buffer_size=(total_size // 3)
@testmethod
def stats_const_file_12x8x19x5_float64_3chunks(self):
dtype = np.float64
shape = Shape("12x8x19x5")
self.impl_stats_const_file(shape=shape, dtype=dtype,
buffer_size=self.get_buffer_size(shape=shape, dtype=dtype, chunks=3))
| {
"content_hash": "18d0375910433ab399bb384b7fa5945c",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 106,
"avg_line_length": 40.34117647058824,
"alnum_prop": 0.6259842519685039,
"repo_name": "simone-campagna/rubik",
"id": "4cf5adeefe6ba19c99f10388ce1e42be9476d3e0",
"size": "7465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/rubik_testing/tests/test_cubes/rubik_test_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "546787"
}
],
"symlink_target": ""
} |
"""Invertible 1x1 Convolution used in GLOW."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math.linalg import lu_reconstruct
from tensorflow_probability.python.math.linalg import lu_reconstruct_assertions
from tensorflow_probability.python.math.linalg import lu_solve
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'MatvecLU', # Deprecated
'ScaleMatvecLU',
]
class ScaleMatvecLU(bijector.AutoCompositeTensorBijector):
"""Matrix-vector multiply using LU decomposition.
This bijector is identical to the 'Convolution1x1' used in Glow
[(Kingma and Dhariwal, 2018)[1].
#### Examples
Here's an example of initialization via random weights matrix:
```python
def trainable_lu_factorization(
event_size, batch_shape=(), seed=None, dtype=tf.float32, name=None):
with tf.name_scope(name or 'trainable_lu_factorization'):
event_size = tf.convert_to_tensor(
event_size, dtype_hint=tf.int32, name='event_size')
batch_shape = tf.convert_to_tensor(
batch_shape, dtype_hint=event_size.dtype, name='batch_shape')
random_matrix = tf.random.uniform(
shape=tf.concat([batch_shape, [event_size, event_size]], axis=0),
dtype=dtype,
seed=seed)
random_orthonormal = tf.linalg.qr(random_matrix)[0]
lower_upper, permutation = tf.linalg.lu(random_orthonormal)
lower_upper = tf.Variable(
initial_value=lower_upper,
trainable=True,
name='lower_upper')
# Initialize a non-trainable variable for the permutation indices so
# that its value isn't re-sampled from run-to-run.
permutation = tf.Variable(
initial_value=permutation,
trainable=False,
name='permutation')
return lower_upper, permutation
channels = 3
conv1x1 = tfb.ScaleMatvecLU(*trainable_lu_factorization(channels),
validate_args=True)
x = tf.random.uniform(shape=[2, 28, 28, channels])
fwd = conv1x1.forward(x)
rev_fwd = conv1x1.inverse(fwd)
# ==> x
```
To initialize this variable outside of TensorFlow, one can also use SciPy,
e.g.,
```python
def lu_factorized_random_orthonormal_matrix(channels, dtype=np.float32):
random_matrix = np.random.rand(channels, channels).astype(dtype)
lower_upper = scipy.linalg.qr(random_matrix)[0]
permutation = scipy.linalg.lu(lower_upper, overwrite_a=True)[0]
permutation = np.argmax(permutation, axis=-2)
return lower_upper, permutation
```
#### References
[1]: Diederik P. Kingma, Prafulla Dhariwal. Glow: Generative Flow with
Invertible 1x1 Convolutions. _arXiv preprint arXiv:1807.03039_, 2018.
https://arxiv.org/abs/1807.03039
"""
def __init__(self,
lower_upper,
permutation,
validate_args=False,
name=None):
"""Creates the ScaleMatvecLU bijector.
Args:
lower_upper: The LU factorization as returned by `tf.linalg.lu`.
permutation: The LU factorization permutation as returned by
`tf.linalg.lu`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Default value: `False`.
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'ScaleMatvecLU').
Raises:
ValueError: If both/neither `channels` and `lower_upper`/`permutation` are
specified.
"""
parameters = dict(locals())
with tf.name_scope(name or 'ScaleMatvecLU') as name:
self._lower_upper = tensor_util.convert_nonref_to_tensor(
lower_upper, dtype_hint=tf.float32, name='lower_upper')
self._permutation = tensor_util.convert_nonref_to_tensor(
permutation, dtype_hint=tf.int32, name='permutation')
super(ScaleMatvecLU, self).__init__(
dtype=self._lower_upper.dtype,
is_constant_jacobian=True,
forward_min_event_ndims=1,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
# pylint: disable=g-long-lambda
return dict(
lower_upper=parameter_properties.ParameterProperties(
event_ndims=2,
shape_fn=lambda sample_shape: ps.concat(
[sample_shape, sample_shape[-1:]], axis=0),
),
permutation=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED))
# pylint: enable=g-long-lambda
@property
def lower_upper(self):
return self._lower_upper
@property
def permutation(self):
return self._permutation
def _broadcast_params(self):
lower_upper = tf.convert_to_tensor(self.lower_upper)
perm = tf.convert_to_tensor(self.permutation)
shape = ps.broadcast_shape(ps.shape(lower_upper)[:-1],
ps.shape(perm))
lower_upper = tf.broadcast_to(
lower_upper, ps.concat([shape, shape[-1:]], 0))
perm = tf.broadcast_to(perm, shape)
return lower_upper, perm
def _forward(self, x):
lu, perm = self._broadcast_params()
w = lu_reconstruct(lower_upper=lu,
perm=perm,
validate_args=self.validate_args)
return tf.linalg.matvec(w, x)
def _inverse(self, y):
lu, perm = self._broadcast_params()
return lu_solve(
lower_upper=lu,
perm=perm,
rhs=y[..., tf.newaxis],
validate_args=self.validate_args)[..., 0]
def _forward_log_det_jacobian(self, unused_x):
return tf.reduce_sum(
tf.math.log(tf.abs(tf.linalg.diag_part(self.lower_upper))),
axis=-1)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
lu, perm = None, None
assertions = []
if (is_init != tensor_util.is_ref(self.lower_upper) or
is_init != tensor_util.is_ref(self.permutation)):
lu, perm = self._broadcast_params()
assertions.extend(lu_reconstruct_assertions(
lu, perm, self.validate_args))
if is_init != tensor_util.is_ref(self.lower_upper):
lu = tf.convert_to_tensor(self.lower_upper) if lu is None else lu
assertions.append(assert_util.assert_none_equal(
tf.linalg.diag_part(lu), tf.zeros([], dtype=lu.dtype),
message='Invertible `lower_upper` must have nonzero diagonal.'))
return assertions
class MatvecLU(ScaleMatvecLU):
"""Matrix-vector multiply using LU decomposition.
This bijector is identical to the 'Convolution1x1' used in Glow
[(Kingma and Dhariwal, 2018)[1].
#### Examples
Here's an example of initialization via random weights matrix:
```python
def trainable_lu_factorization(
event_size, batch_shape=(), seed=None, dtype=tf.float32, name=None):
with tf.name_scope(name or 'trainable_lu_factorization'):
event_size = tf.convert_to_tensor(
event_size, dtype_hint=tf.int32, name='event_size')
batch_shape = tf.convert_to_tensor(
batch_shape, dtype_hint=event_size.dtype, name='batch_shape')
random_matrix = tf.random.uniform(
shape=tf.concat([batch_shape, [event_size, event_size]], axis=0),
dtype=dtype,
seed=seed)
random_orthonormal = tf.linalg.qr(random_matrix)[0]
lower_upper, permutation = tf.linalg.lu(random_orthonormal)
lower_upper = tf.Variable(
initial_value=lower_upper,
trainable=True,
name='lower_upper')
# Initialize a non-trainable variable for the permutation indices so
# that its value isn't re-sampled from run-to-run.
permutation = tf.Variable(
initial_value=permutation,
trainable=False,
name='permutation')
return lower_upper, permutation
channels = 3
conv1x1 = tfb.MatvecLU(*trainable_lu_factorization(channels),
validate_args=True)
x = tf.random.uniform(shape=[2, 28, 28, channels])
fwd = conv1x1.forward(x)
rev_fwd = conv1x1.inverse(fwd)
# ==> x
```
To initialize this variable outside of TensorFlow, one can also use SciPy,
e.g.,
```python
def lu_factorized_random_orthonormal_matrix(channels, dtype=np.float32):
random_matrix = np.random.rand(channels, channels).astype(dtype)
lower_upper = scipy.linalg.qr(random_matrix)[0]
permutation = scipy.linalg.lu(lower_upper, overwrite_a=True)[0]
permutation = np.argmax(permutation, axis=-2)
return lower_upper, permutation
```
#### References
[1]: Diederik P. Kingma, Prafulla Dhariwal. Glow: Generative Flow with
Invertible 1x1 Convolutions. _arXiv preprint arXiv:1807.03039_, 2018.
https://arxiv.org/abs/1807.03039
"""
@deprecation.deprecated(
'2020-01-01',
'`MatvecLU` has been deprecated and renamed `ScaleMatvecLU`; please use '
'that symbol instead.')
def __init__(self,
lower_upper,
permutation,
validate_args=False,
name=None):
"""Creates the MatvecLU bijector.
Args:
lower_upper: The LU factorization as returned by `tf.linalg.lu`.
permutation: The LU factorization permutation as returned by
`tf.linalg.lu`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Default value: `False`.
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'MatvecLU').
Raises:
ValueError: If both/neither `channels` and `lower_upper`/`permutation` are
specified.
"""
super(MatvecLU, self).__init__(
lower_upper, permutation, validate_args=False, name=name or 'MatvecLU')
| {
"content_hash": "af8b4603268eef7b5b4738e45d044354",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 92,
"avg_line_length": 35.83157894736842,
"alnum_prop": 0.6555033294163729,
"repo_name": "tensorflow/probability",
"id": "28f48d2bdff459aead1de6af4f565eeae513f4f9",
"size": "10890",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/bijectors/scale_matvec_lu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
} |
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 12705
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| {
"content_hash": "9a8a7fa25e39d654da8142f1ee57007e",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 90,
"avg_line_length": 28.91891891891892,
"alnum_prop": 0.666588785046729,
"repo_name": "ionomy/ion",
"id": "14c6524140edc438ad1601ca08aed2c33a900730",
"size": "4630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/linearize/linearize-hashes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1392620"
},
{
"name": "C++",
"bytes": "6603677"
},
{
"name": "CMake",
"bytes": "41658"
},
{
"name": "CSS",
"bytes": "44782"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "201267"
},
{
"name": "Makefile",
"bytes": "119240"
},
{
"name": "Objective-C",
"bytes": "13448"
},
{
"name": "Objective-C++",
"bytes": "6627"
},
{
"name": "Python",
"bytes": "1137651"
},
{
"name": "QMake",
"bytes": "26274"
},
{
"name": "Shell",
"bytes": "73927"
}
],
"symlink_target": ""
} |
import re
import requests
from bs4 import BeautifulSoup
from metro.parser.base import BaseRuDataProvider
class DataProvider(BaseRuDataProvider):
metro_data_src = "http://ru.wikipedia.org/wiki/\
Список_станций_Киевского_метрополитена"
def download_all(self):
self.parse_usual_big_table()
| {
"content_hash": "0b228197210709b09b16a93019d8cd76",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 25.384615384615383,
"alnum_prop": 0.7121212121212122,
"repo_name": "xfenix/django-metro",
"id": "6e9556c73586ebcd994983242e80f2c48181a987",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metro/parser/providers/kiev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27822"
}
],
"symlink_target": ""
} |
"""FDTD cell phantom with tilted axis of rotation
The *in silico* data set was created with the
:abbr:`FDTD (Finite Difference Time Domain)` software `meep`_. The data
are 2D projections of a 3D refractive index phantom that is rotated
about an axis which is tilted by 0.2 rad (11.5 degrees) with respect
to the imaging plane. The example showcases the method
:func:`odtbrain.backpropagate_3d_tilted` which takes into account
such a tilted axis of rotation. The data are downsampled by a factor
of two. A total of 220 projections are used for the reconstruction.
Note that the information required for reconstruction decreases as the
tilt angle increases. If the tilt angle is 90 degrees w.r.t. the
imaging plane, then we get a rotating image of a cell (not images of a
rotating cell) and tomographic reconstruction is impossible. A brief
description of this algorithm is given in :cite:`Mueller2015tilted`.
The first column shows the measured phase, visualizing the
tilt (compare to other examples). The second column shows a
reconstruction that does not take into account the tilted axis of
rotation; the result is a blurry reconstruction. The third column
shows the improved reconstruction; the known tilted axis of rotation
is used in the reconstruction process.
.. _`meep`: http://ab-initio.mit.edu/wiki/index.php/Meep
"""
import matplotlib.pylab as plt
import numpy as np
import odtbrain as odt
from example_helper import load_data
sino, angles, phantom, cfg = \
load_data("fdtd_3d_sino_A220_R6.500_tiltyz0.2.tar.lzma")
A = angles.shape[0]
print("Example: Backpropagation from 3D FDTD simulations")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Axis tilt in y-z direction:", cfg["tilt_yz"])
print("Number of projections:", A)
print("Performing normal backpropagation.")
# Apply the Rytov approximation
sinoRytov = odt.sinogram_as_rytov(sino)
# Perform naive backpropagation
f_naiv = odt.backpropagate_3d(uSin=sinoRytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"]
)
print("Performing tilted backpropagation.")
# Determine tilted axis
tilted_axis = [0, np.cos(cfg["tilt_yz"]), np.sin(cfg["tilt_yz"])]
# Perform tilted backpropagation
f_tilt = odt.backpropagate_3d_tilted(uSin=sinoRytov,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=cfg["lD"],
tilted_axis=tilted_axis,
)
# compute refractive index n from object function
n_naiv = odt.odt_to_ri(f_naiv, res=cfg["res"], nm=cfg["nm"])
n_tilt = odt.odt_to_ri(f_tilt, res=cfg["res"], nm=cfg["nm"])
sx, sy, sz = n_tilt.shape
px, py, pz = phantom.shape
sino_phase = np.angle(sino)
# compare phantom and reconstruction in plot
fig, axes = plt.subplots(2, 3, figsize=(8, 4.5))
kwri = {"vmin": n_tilt.real.min(), "vmax": n_tilt.real.max()}
kwph = {"vmin": sino_phase.min(), "vmax": sino_phase.max(),
"cmap": "coolwarm"}
# Sinogram
axes[0, 0].set_title("phase projection")
phmap = axes[0, 0].imshow(sino_phase[A // 2, :, :], **kwph)
axes[0, 0].set_xlabel("detector x")
axes[0, 0].set_ylabel("detector y")
axes[1, 0].set_title("sinogram slice")
axes[1, 0].imshow(sino_phase[:, :, sino.shape[2] // 2],
aspect=sino.shape[1] / sino.shape[0], **kwph)
axes[1, 0].set_xlabel("detector y")
axes[1, 0].set_ylabel("angle [rad]")
# set y ticks for sinogram
labels = np.linspace(0, 2 * np.pi, len(axes[1, 1].get_yticks()))
labels = ["{:.2f}".format(i) for i in labels]
axes[1, 0].set_yticks(np.linspace(0, len(angles), len(labels)))
axes[1, 0].set_yticklabels(labels)
axes[0, 1].set_title("normal (center)")
rimap = axes[0, 1].imshow(n_naiv[sx // 2].real, **kwri)
axes[0, 1].set_xlabel("x")
axes[0, 1].set_ylabel("y")
axes[1, 1].set_title("normal (nucleolus)")
axes[1, 1].imshow(n_naiv[int(sx / 2 + 2 * cfg["res"])].real, **kwri)
axes[1, 1].set_xlabel("x")
axes[1, 1].set_ylabel("y")
axes[0, 2].set_title("tilt correction (center)")
axes[0, 2].imshow(n_tilt[sx // 2].real, **kwri)
axes[0, 2].set_xlabel("x")
axes[0, 2].set_ylabel("y")
axes[1, 2].set_title("tilt correction (nucleolus)")
axes[1, 2].imshow(n_tilt[int(sx / 2 + 2 * cfg["res"])].real, **kwri)
axes[1, 2].set_xlabel("x")
axes[1, 2].set_ylabel("y")
# color bars
cbkwargs = {"fraction": 0.045,
"format": "%.3f"}
plt.colorbar(phmap, ax=axes[0, 0], **cbkwargs)
plt.colorbar(phmap, ax=axes[1, 0], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 1], **cbkwargs)
plt.colorbar(rimap, ax=axes[0, 2], **cbkwargs)
plt.colorbar(rimap, ax=axes[1, 2], **cbkwargs)
plt.tight_layout()
plt.show()
| {
"content_hash": "ee4b4927fbcaf9187b92196dea23b455",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 71,
"avg_line_length": 37.1865671641791,
"alnum_prop": 0.6449929761188039,
"repo_name": "paulmueller/ODTbrain",
"id": "c5238e9ab45131c890005bb4bfc93793b03d11be",
"size": "4983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/backprop_from_fdtd_3d_tilted.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "150751"
}
],
"symlink_target": ""
} |
import os
from oslo_privsep import capabilities as c
from oslo_privsep import priv_context
capabilities = [c.CAP_SYS_ADMIN]
# On virtual environments libraries are not owned by the Daemon user (root), so
# the Daemon needs the capability to bypass file read permission checks in
# order to dynamically load the code to run.
if os.environ.get('VIRTUAL_ENV'):
capabilities.append(c.CAP_DAC_READ_SEARCH)
# It is expected that most (if not all) os-brick operations can be
# executed with these privileges.
default = priv_context.PrivContext(
__name__,
cfg_section='privsep_osbrick',
pypath=__name__ + '.default',
capabilities=capabilities,
)
| {
"content_hash": "3f21be46a810e5e38dce1e873c5d94da",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 30.136363636363637,
"alnum_prop": 0.7405731523378583,
"repo_name": "openstack/os-brick",
"id": "c41ff533c2a4b16d28ca70710a1fbe94fdbe60d6",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_brick/privileged/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1136394"
},
{
"name": "Shell",
"bytes": "3226"
}
],
"symlink_target": ""
} |
from cliff import show
from gnocchiclient import utils
class CliCapabilitiesList(show.ShowOne):
"""List capabilities."""
def take_action(self, parsed_args):
caps = utils.get_client(self).capabilities.list()
return self.dict2columns(caps)
| {
"content_hash": "d87eeab8145711af5a0b2c55dde38f8c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 57,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.7105263157894737,
"repo_name": "gnocchixyz/python-gnocchiclient",
"id": "bc22dd9b89113f40de4b60b82fe712a4adbe1177",
"size": "841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnocchiclient/v1/capabilities_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "196841"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
} |
def parse_args():
""" Parsing commandline arguments. """
from argparse import ArgumentParser
parser = ArgumentParser("./cluster.py")
parser.add_argument("input", help="input file")
parser.add_argument("output",
nargs="?",
help="output file",
default="out.txt")
parser.add_argument("labels", type=int, help="number of labels")
parser.add_argument("iterations",
type=int,
help="number of iterations")
parser.add_argument("alpha",
type=float,
help="transition hyperparameter")
parser.add_argument("beta", type=float, help="emission hyperparameter")
return parser.parse_args()
def change_count(matrix, x, y, i):
""" Change the count in a matrix.
Arguments:
matrix - transition or emission matrix
x - emission or label
y - label
i - change in count
"""
matrix["%s|%s" % (x, y)] += i
matrix["%s" % y] += i
def get_value(matrix, *args):
""" Returns the value to a key.
Arguments:
*args arbitrary number of arguments
"""
if len(args) == 2:
return matrix["%s|%s" % (args[0], args[1])]
elif len(args) == 1:
return matrix["%s" % (args[0])]
else:
raise Exception("Invalid argument list: " + str(args))
| {
"content_hash": "b940170b6f39e781d2b7838e71a58c43",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 30.456521739130434,
"alnum_prop": 0.5438972162740899,
"repo_name": "akullpp/ClusterPy",
"id": "dc5529defee43d430266ec3f6b6cda309f65a18a",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11009"
}
],
"symlink_target": ""
} |
"""
@package mi.dataset.driver.pco2w_abc.imodem
@file mi-dataset/mi/dataset/driver/pco2w_abc/imodem/pco2w_abc_imodem_recovered_driver.py
@author Mark Worden
@brief Driver for the pco2w_abc_imodem instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.pco2w_abc_imodem import Pco2wAbcImodemParser
from mi.dataset.parser.pco2w_abc_particles import \
Pco2wAbcParticleClassKey, \
Pco2wAbcImodemInstrumentBlankRecoveredDataParticle, \
Pco2wAbcImodemInstrumentRecoveredDataParticle, \
Pco2wAbcImodemPowerRecoveredDataParticle, \
Pco2wAbcImodemControlRecoveredDataParticle, \
Pco2wAbcImodemMetadataRecoveredDataParticle
from mi.core.versioning import version
@version("15.6.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rU') as stream_handle:
driver = Pco2wAbcImodemRecoveredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class Pco2wAbcImodemRecoveredDriver(SimpleDatasetDriver):
"""
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.pco2w_abc_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
Pco2wAbcParticleClassKey.METADATA_PARTICLE_CLASS:
Pco2wAbcImodemMetadataRecoveredDataParticle,
Pco2wAbcParticleClassKey.POWER_PARTICLE_CLASS:
Pco2wAbcImodemPowerRecoveredDataParticle,
Pco2wAbcParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
Pco2wAbcImodemInstrumentRecoveredDataParticle,
Pco2wAbcParticleClassKey.INSTRUMENT_BLANK_PARTICLE_CLASS:
Pco2wAbcImodemInstrumentBlankRecoveredDataParticle,
Pco2wAbcParticleClassKey.CONTROL_PARTICLE_CLASS:
Pco2wAbcImodemControlRecoveredDataParticle,
}
}
parser = Pco2wAbcImodemParser(parser_config,
stream_handle,
self._exception_callback)
return parser
| {
"content_hash": "b0932ffc4ce60922559fc523b77b2906",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 102,
"avg_line_length": 38.732394366197184,
"alnum_prop": 0.7232727272727273,
"repo_name": "JeffRoy/mi-dataset",
"id": "5f1479526d8c661c2495a177096ae9970017575f",
"size": "2773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/pco2w_abc/imodem/pco2w_abc_imodem_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3610231"
}
],
"symlink_target": ""
} |
from nose.tools import *
from game.bin.map import *
def test_room():
gold = Room("GoldRoom",
"""This room has gold in it you can grab. There's a
door to the north.""")
assert_equal(gold.name, "GoldRoom")
assert_equal(gold.paths, {})
def test_room_paths():
center = Room("Center", "Test room in the center.")
north = Room("North", "Test room in the north.")
south = Room("South", "Test room in the south.")
center.add_paths({'north': north, 'south': south})
assert_equal(center.go('north'), north)
assert_equal(center.go('south'), south)
def test_map():
start = Room("Start", "You can go west and down a hole.")
west = Room("Trees", "There are trees here, you can go east.")
down = Room("Dungeon", "It's dark down here, you can go up.")
start.add_paths({'west': west, 'down': down})
west.add_paths({'east': start})
down.add_paths({'up': start})
assert_equal(start.go('west'), west)
assert_equal(start.go('west').go('east'), start)
assert_equal(start.go('down').go('up'), start)
def test_gothon_game_map():
assert_equal(START.go('shoot!'), generic_death)
assert_equal(START.go('dodge!'), generic_death)
room = START.go('tell a joke')
assert_equal(room, laser_weapon_armory)
| {
"content_hash": "9e43afaf0672ba1038ef2b4f3ad3bbe1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 67,
"avg_line_length": 34.21052631578947,
"alnum_prop": 0.6161538461538462,
"repo_name": "pwittchen/learn-python-the-hard-way",
"id": "6f69602289dda57334178dce9d8c58b3f4d4eb05",
"size": "1300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/exercise52/game/tests/map_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "996"
},
{
"name": "Python",
"bytes": "62483"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
} |
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
from __future__ import division, absolute_import, print_function
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
| {
"content_hash": "1f11cf024934b300412120b3c85d834d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 42.22222222222222,
"alnum_prop": 0.7736842105263158,
"repo_name": "ryfeus/lambda-packs",
"id": "ae5b1f078dd71da24bd974ca97c11f9ad362903a",
"size": "1140",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Spacy/source2.7/numpy/polynomial/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
ID = "chatevent"
permission = 3
privmsgEnabled = False
def timer(self, channels):
self.sendChatMessage(self.send, channels[0], "pong")
def chatEvent(self, channels, userdata, message, currChannel):
#print channels
#print currChannel
if channels != False:
if currChannel in channels:
self.sendChatMessage(self.send, currChannel, str(len(message)))
if "start" in message:
self.sendChatMessage(self.send, currChannel, "Starting time event")
self.events["time"].addEvent("TimerTest", 10, timer, [currChannel], from_event = True)
if "end" in message:
self.sendChatMessage(self.send, currChannel, "Ending time event")
self.events["time"].removeEvent("TimerTest", from_event = True)
def execute(self, name, params, channel, userdata, rank):
#print "running"
if len(params) == 1 and params[0] == "on":
if not self.events["chat"].doesExist("TestFunc"):
self.sendChatMessage(self.send, channel, "Turning chatevent on.")
self.timerChannel = channel
self.events["chat"].addEvent("TestFunc", chatEvent)
else:
self.sendChatMessage(self.send, channel, "chatevent is already running.")
elif len(params) == 1 and params[0] == "off":
if self.events["chat"].doesExist("TestFunc"):
self.sendChatMessage(self.send, channel, "Turning chatevent off.")
self.events["chat"].removeEvent("TestFunc")
else:
self.sendChatMessage(self.send, channel, "chatevent isn't running!")
elif len(params) == 2 and params[0] == "add":
channel = self.retrieveTrueCase(params[1])
if channel != False:
self.sendChatMessage(self.send, channel, "added")
self.events["chat"].addChannel("TestFunc", channel)
elif len(params) == 2 and params[0] == "rem":
channel = self.retrieveTrueCase(params[1])
if channel != False:
self.sendChatMessage(self.send, channel, "removed")
self.events["chat"].removeChannel("TestFunc", channel) | {
"content_hash": "5583291cf43aa9db92b6e4cfdd3af3db",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 102,
"avg_line_length": 42.03846153846154,
"alnum_prop": 0.6052150045745655,
"repo_name": "NightKev/Renol-IRC",
"id": "a5e6824fd987336aff9007f95e0d05b73a9b4e3f",
"size": "2186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "commands/example_chatAndTimerEventTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201288"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from spdx import checksum
from spdx import document
from spdx import utils
import hashlib
class FileType(object):
SOURCE = 1
BINARY = 2
ARCHIVE = 3
OTHER = 4
class File(object):
"""Representation of SPDX file.
Fields:
name - File name, str mandatory one.
comment - File comment str, Optional zero or one.
type - one of FileType.SOURCE, FileType.BINARY, FileType.ARCHIVE
and FileType.OTHER, optional zero or one.
chk_sum - SHA1, Mandatory one.
conc_lics - Mandatory one. document.License or utils.NoAssert or utils.SPDXNone.
licenses_in_file - list of licenses found in file, mandatory one or more.
document.License or utils.SPDXNone or utils.NoAssert.
document.license or utils.NoAssert or utils.SPDXNone.
license_comment - Optional.
copyright - Copyright text, Mandatory one. utils.NoAssert or utils.SPDXNone or str.
notice - optional One, str.
contributers - List of strings.
dependencies - list of file locations.
artifact_of_project_name - list of project names, possibly empty.
artifact_of_project_home - list of project home page, possibly empty.
artifact_of_project_uri - list of project uris, possibly empty.
"""
def __init__(self, name, chk_sum = None):
super(File, self).__init__()
self.name = name
self.comment = None
self.type = None
self.chk_sum = chk_sum
self.conc_lics = None
self.licenses_in_file = []
self.license_comment = None
self.copyright = None
self.notice = None
self.contributers = []
self.dependencies = []
self.artifact_of_project_name = []
self.artifact_of_project_home = []
self.artifact_of_project_uri = []
def add_lics(self, lics):
"""Appends lics to licenses_in_file."""
self.licenses_in_file.append(lics)
def add_contrib(self, contrib):
"""Appends contrib to contributers."""
self.contributers.append(contrib)
def add_depend(self, depend):
"""Appends depend to dependencies."""
self.dependencies.append(depend)
def add_artifact(self, symbol, value):
"""Adds value as artifact_of_project{symbol}."""
expr = 'self.artifact_of_project_{0}.append(value)'.format(symbol)
eval(expr)
def validate(self, messages):
"""Validates the fields and appends user friendly messages
to messages parameter if there are errors.
"""
return (self.validate_lic_conc(messages) and
self.validate_type(messages) and
self.validate_chksum(messages) and
self.validate_licenses_in_file(messages) and
self.validate_copyright(messages) and
self.validate_artifacts(messages))
def validate_copyright(self, messages):
if type(self.copyright) in [str, unicode, utils.NoAssert, utils.SPDXNone]:
return True
else:
messages.append('File copyright must be str or unicode or utils.NoAssert or utils.SPDXNone')
return False
def validate_artifacts(self, messages):
if (len(self.artifact_of_project_home) >=
max(len(self.artifact_of_project_uri), len(self.artifact_of_project_name))):
return True
else:
messages.append('File must have as much artifact of project as uri or homepage')
return False
def validate_licenses_in_file(self, messages):
if len(self.licenses_in_file) == 0:
messages.append('File must have at least one license in file.')
return False
else:
return True
def validate_lic_conc(self, messages):
if type(self.conc_lics) in [utils.NoAssert,
utils.SPDXNone] or isinstance(self.conc_lics, document.License):
return True
else:
messages.append('File concluded license must be one of document.License, utils.NoAssert or utils.SPDXNone')
return False
def validate_type(self, messages):
if self.type in [None, FileType.SOURCE, FileType.OTHER, FileType.BINARY,
FileType.ARCHIVE]:
return True
else:
messages.append('File type must be one of the constants defined in class spdx.file.FileType')
return False
def validate_chksum(self, messages):
if isinstance(self.chk_sum, checksum.Algorithm):
if self.chk_sum.identifier == 'SHA1':
return True
else:
messages.append('File checksum algorithm must be SHA1')
return False
else:
messages.append('File checksum must be instance of spdx.checksum.Algorithm')
return False
def calc_chksum(self):
BUFFER_SIZE = 65536
file_sha1 = hashlib.sha1()
with open(self.name, 'rb') as file_handle:
while True:
data = file_handle.read(BUFFER_SIZE)
if not data:
break
file_sha1.update(data)
return file_sha1.hexdigest()
def has_optional_field(self, field):
expr = 'self.{0} is not None'.format(field)
return eval(expr)
| {
"content_hash": "1f3f685b6021c72269f4732aee2559db",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 119,
"avg_line_length": 35.593333333333334,
"alnum_prop": 0.6210900917774864,
"repo_name": "bmwcarit/spdx-tools-python",
"id": "5613ea6835e49313639063f81e6e24f4ce170d5b",
"size": "5918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spdx/file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "331997"
}
],
"symlink_target": ""
} |
import numpy as np
class run:
def __init__(self,
instance):
#begin
print ' '
print 'normalize_by_negative_delay_points script running'
#create array to hold averages
positive_delay_averages = np.zeros(len(instance.xi))
#collect averages
cutoff = 80 #fs
positive_delay_averages = np.zeros(len(instance.xi))
for i in range(len(instance.xi)):
num_points = 0
current_slice_value = 0
for j in range(len(instance.yi)):
if instance.yi[j] > cutoff:
num_points = num_points + 1
current_slice_value = current_slice_value + instance.zi[j][i]
else:
pass
positive_delay_averages[i] = current_slice_value / num_points
#normalize by averages
for i in range(len(instance.xi)):
for j in range(len(instance.yi)):
instance.zi[j][i] = instance.zi[j][i] - positive_delay_averages[i]
#re-scale data zmin, zmax
instance.zmax = instance.zi.max()
instance.zmin = instance.zi.min()
instance.znull = 0
#finish
print ' done'
| {
"content_hash": "b3ba9a141d608e52625b6cb75a502c9e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 82,
"avg_line_length": 33.4,
"alnum_prop": 0.49326347305389223,
"repo_name": "untzag/datplot",
"id": "68ba5c8e62260287f890987775935656d70a8c32",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extensions/subtract_average_of_positive_points_along_y.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "124949"
},
{
"name": "HTML",
"bytes": "3608947"
},
{
"name": "Makefile",
"bytes": "1513"
},
{
"name": "Python",
"bytes": "260766"
},
{
"name": "Shell",
"bytes": "620"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='wilmu-linux-toolkit',
version='0.0.14',
description='Linux toolkit for Wilmington University',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities'
],
keywords='linux utility wilmu',
url='https://github.com/jpwhite3/wilmu-linux-toolkit',
author='JP White',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
# install_requires=['requests', 'unittest2', 'importlib', 'colorama', 'pyfiglet', 'argparse'],
entry_points = {
'console_scripts': [
'checklab=lab_toolkit.command_line:checklab',
'submitlab=lab_toolkit.command_line:submitlab',
'update_toolkit=lab_toolkit.command_line:update_toolkit'
],
},
include_package_data=True,
zip_safe=False
) | {
"content_hash": "1081f0142bec9085fdf25706b8178a26",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 98,
"avg_line_length": 34.03333333333333,
"alnum_prop": 0.6209598432908913,
"repo_name": "jpwhite3/wilmu-linux-toolkit",
"id": "f0bf74b5cab4f104fe6cb7dcbabb1dde2da291cb",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "599"
},
{
"name": "Python",
"bytes": "1842104"
},
{
"name": "Shell",
"bytes": "4449"
}
],
"symlink_target": ""
} |
from ._models_py3 import ElasticSan
from ._models_py3 import ElasticSanList
from ._models_py3 import ElasticSanOperationDisplay
from ._models_py3 import ElasticSanOperationListResult
from ._models_py3 import ElasticSanRPOperation
from ._models_py3 import ElasticSanUpdate
from ._models_py3 import Error
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorResponse
from ._models_py3 import IscsiTargetInfo
from ._models_py3 import NetworkRuleSet
from ._models_py3 import Resource
from ._models_py3 import SKUCapability
from ._models_py3 import Sku
from ._models_py3 import SkuInformation
from ._models_py3 import SkuInformationList
from ._models_py3 import SkuLocationInfo
from ._models_py3 import SourceCreationData
from ._models_py3 import SystemData
from ._models_py3 import TrackedResource
from ._models_py3 import VirtualNetworkRule
from ._models_py3 import Volume
from ._models_py3 import VolumeGroup
from ._models_py3 import VolumeGroupList
from ._models_py3 import VolumeGroupUpdate
from ._models_py3 import VolumeList
from ._models_py3 import VolumeUpdate
from ._elastic_san_management_enums import CreatedByType
from ._elastic_san_management_enums import EncryptionType
from ._elastic_san_management_enums import OperationalStatus
from ._elastic_san_management_enums import ProvisioningStates
from ._elastic_san_management_enums import SkuName
from ._elastic_san_management_enums import SkuTier
from ._elastic_san_management_enums import State
from ._elastic_san_management_enums import StorageTargetType
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ElasticSan",
"ElasticSanList",
"ElasticSanOperationDisplay",
"ElasticSanOperationListResult",
"ElasticSanRPOperation",
"ElasticSanUpdate",
"Error",
"ErrorAdditionalInfo",
"ErrorResponse",
"IscsiTargetInfo",
"NetworkRuleSet",
"Resource",
"SKUCapability",
"Sku",
"SkuInformation",
"SkuInformationList",
"SkuLocationInfo",
"SourceCreationData",
"SystemData",
"TrackedResource",
"VirtualNetworkRule",
"Volume",
"VolumeGroup",
"VolumeGroupList",
"VolumeGroupUpdate",
"VolumeList",
"VolumeUpdate",
"CreatedByType",
"EncryptionType",
"OperationalStatus",
"ProvisioningStates",
"SkuName",
"SkuTier",
"State",
"StorageTargetType",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "2080a08cfe8c0a95c40949af986f164d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 32.32911392405063,
"alnum_prop": 0.7533281127642913,
"repo_name": "Azure/azure-sdk-for-python",
"id": "760d42042c24a6d77f223fcfa56058c288e567b2",
"size": "3022",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/elasticsan/azure-mgmt-elasticsan/azure/mgmt/elasticsan/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import lintreview.github as github
import logging
log = logging.getLogger(__name__)
class GithubRepository(object):
"""Abstracting wrapper for the
various interactions we have with github.
This will make swapping in other hosting systems
a tiny bit easier in the future.
"""
def __init__(self, config, user, repo_name):
self.config = config
self.user = user
self.repo_name = repo_name
def repository(self):
"""Get the underlying repository model
"""
self.repo = github.get_repository(
self.config,
self.user,
self.repo_name)
return self.repo
def pull_request(self, number):
"""Get a pull request by number.
"""
pull = self.repository().pull_request(number)
return GithubPullRequest(pull)
def ensure_label(self, label):
"""Create label if it doesn't exist yet
"""
repo = self.repository()
if not repo.label(label):
repo.create_label(
name=label,
color="bfe5bf", # a nice light green
)
def create_status(self, sha, state, description):
"""Create a commit status
"""
context = self.config.get('APP_NAME', 'lintreview')
repo = self.repository()
repo.create_status(
sha,
state,
None,
description,
context)
class GithubPullRequest(object):
"""Abstract the underlying github models.
This makes other code simpler, and enables
the ability to add other hosting services later.
"""
def __init__(self, pull_request):
self.pull = pull_request
@property
def display_name(self):
data = self.pull.as_dict()
return u'%s#%s' % (data['head']['repo']['full_name'],
data['number'])
@property
def number(self):
return self.pull.number
@property
def is_private(self):
data = self.pull.as_dict()
return data['head']['repo']['private']
@property
def head(self):
data = self.pull.as_dict()
return data['head']['sha']
@property
def clone_url(self):
data = self.pull.as_dict()
return data['head']['repo']['clone_url']
@property
def base_repo_url(self):
data = self.pull.as_dict()
return data['base']['repo']['clone_url']
@property
def target_branch(self):
data = self.pull.as_dict()
return data['base']['ref']
def commits(self):
return self.pull.commits()
def review_comments(self):
return self.pull.review_comments()
def files(self):
return list(self.pull.files())
def remove_label(self, label_name):
issue = self.pull.issue()
labels = issue.labels()
if not any(label_name == label.name for label in labels):
return
log.debug("Removing issue label '%s'", label_name)
issue.remove_label(label_name)
def add_label(self, label_name):
issue = self.pull.issue()
issue.add_labels(label_name)
def create_comment(self, body):
self.pull.create_comment(body)
def create_review_comment(self, body, commit_id, path, position):
self.pull.create_review_comment(body, commit_id, path, position)
| {
"content_hash": "cce24cb359fe1d66f9f6558842234217",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 72,
"avg_line_length": 26.64566929133858,
"alnum_prop": 0.5750591016548463,
"repo_name": "adrianmoisey/lint-review",
"id": "3fba7c2aa718316640ab61895515aff4a6a0294d",
"size": "3384",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lintreview/repo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177403"
},
{
"name": "Ruby",
"bytes": "1057"
}
],
"symlink_target": ""
} |
import logging, time
from logic.smboolmanager import SMBoolManagerPlando as SMBoolManager
from logic.smbool import SMBool, smboolFalse
from logic.helpers import Bosses
from rom.romloader import RomLoader
from rom.rom_patches import RomPatches
from graph.graph import AccessGraphSolver as AccessGraph
from utils.utils import PresetLoader
from solver.conf import Conf
from graph.graph_utils import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, GraphUtils, getAccessPoint
from utils.parameters import easy, medium, hard, harder, hardcore, mania, infinity
from utils.doorsmanager import DoorsManager
from utils.objectives import Objectives
from logic.logic import Logic
from graph.location import define_location
class CommonSolver(object):
def loadRom(self, rom, interactive=False, magic=None, startLocation=None):
self.scavengerOrder = []
self.plandoScavengerOrder = []
self.additionalETanks = 0
# startLocation param is only use for seedless
if rom is None:
# TODO::add a --logic parameter for seedless
Logic.factory('vanilla')
self.romFileName = 'seedless'
self.majorsSplit = 'Full'
self.masterMajorsSplit = 'Full'
self.areaRando = True
self.bossRando = True
self.escapeRando = False
self.escapeTimer = "03:00"
self.startLocation = startLocation
RomPatches.setDefaultPatches(startLocation)
self.startArea = getAccessPoint(startLocation).Start['solveArea']
# in seedless load all the vanilla transitions
self.areaTransitions = vanillaTransitions[:]
self.bossTransitions = vanillaBossesTransitions[:]
self.escapeTransition = [vanillaEscapeTransitions[0]]
# in seedless we allow mixing of area and boss transitions
self.hasMixedTransitions = True
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.locations = Logic.locations
for loc in self.locations:
loc.itemName = 'Nothing'
# set doors related to default patches
DoorsManager.setDoorsColor()
self.doorsRando = False
self.hasNothing = False
self.objectives.setVanilla()
self.tourian = 'Vanilla'
self.majorUpgrades = []
self.splitLocsByArea = {}
else:
self.romFileName = rom
self.romLoader = RomLoader.factory(rom, magic)
Logic.factory(self.romLoader.readLogic())
self.romLoader.loadSymbols()
self.locations = Logic.locations
(self.majorsSplit, self.masterMajorsSplit) = self.romLoader.assignItems(self.locations)
(self.startLocation, self.startArea, startPatches) = self.romLoader.getStartAP()
if not GraphUtils.isStandardStart(self.startLocation) and self.majorsSplit != 'Full':
# update major/chozo locs in non standard start
self.romLoader.updateSplitLocs(self.majorsSplit, self.locations)
(self.areaRando, self.bossRando, self.escapeRando, hasObjectives, self.tourian) = self.romLoader.loadPatches()
RomPatches.ActivePatches += startPatches
self.escapeTimer = self.romLoader.getEscapeTimer()
self.doorsRando = self.romLoader.loadDoorsColor()
self.hasNothing = self.checkLocsForNothing()
if self.majorsSplit == 'Scavenger':
self.scavengerOrder = self.romLoader.loadScavengerOrder(self.locations)
if hasObjectives:
self.romLoader.loadObjectives(self.objectives)
if interactive:
# load event bit masks for auto tracker
self.eventsBitMasks = self.romLoader.loadEventBitMasks()
else:
if self.majorsSplit == "Scavenger":
# add scav hunt
self.objectives.setScavengerHunt()
self.objectives.tourianRequired = not bool(self.romLoader.readOption('escapeTrigger'))
if self.objectives.tourianRequired:
# add G4 on top of scav hunt
self.objectives.setVanilla()
else:
# only G4
self.objectives.setVanilla()
self.majorUpgrades = self.romLoader.loadMajorUpgrades()
self.splitLocsByArea = self.romLoader.getSplitLocsByArea(self.locations)
self.objectives.setSolverMode(self)
if self.mode == 'plando':
self.additionalETanks = self.romLoader.getAdditionalEtanks()
if interactive == False:
print("ROM {}\nmajors: {} area: {} boss: {} escape: {}\npatches: {}".format(rom, self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(self.romLoader.getPatches())))
else:
print("majors: {} area: {} boss: {} escape: {}".format(self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando))
(self.areaTransitions, self.bossTransitions, self.escapeTransition, self.hasMixedTransitions) = self.romLoader.getTransitions(self.tourian)
if interactive == True and self.debug == False:
# in interactive area mode we build the graph as we play along
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.smbm = SMBoolManager()
self.buildGraph()
# store at each step how many locations are available
self.nbAvailLocs = []
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for loc in self.locations:
self.log.debug('{:>50}: {:>16}'.format(loc.Name, loc.itemName))
def buildGraph(self):
self.areaGraph = AccessGraph(Logic.accessPoints, self.curGraphTransitions)
Objectives.setGraph(self.areaGraph, infinity)
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def getLoc(self, locName):
for loc in self.locations:
if loc.Name == locName:
return loc
def getNextDifficulty(self, difficulty):
nextDiffs = {
0: easy,
easy: medium,
medium: hard,
hard: harder,
harder: hardcore,
hardcore: mania,
mania: infinity
}
return nextDiffs[difficulty]
def checkLocsForNothing(self):
# for the auto tracker, need to know if we have to track nothing items
return any(loc.itemName == "Nothing" for loc in self.locations)
def computeLocationsDifficulty(self, locations, phase="major"):
difficultyTarget = Conf.difficultyTarget
nextLocations = locations
# before looping on all diff targets, get only the available locations with diff target infinity
if difficultyTarget != infinity:
self.areaGraph.getAvailableLocations(nextLocations, self.smbm, infinity, self.lastAP)
nextLocations = [loc for loc in nextLocations if loc.difficulty]
while True:
self.areaGraph.getAvailableLocations(nextLocations, self.smbm, difficultyTarget, self.lastAP)
# check post available functions too
for loc in nextLocations:
loc.evalPostAvailable(self.smbm)
self.areaGraph.useCache(True)
# also check if we can come back to current AP from the location
for loc in nextLocations:
loc.evalComeBack(self.smbm, self.areaGraph, self.lastAP)
self.areaGraph.useCache(False)
nextLocations = [loc for loc in nextLocations if not loc.difficulty]
if not nextLocations:
break
if difficultyTarget == infinity:
# we've tested all the difficulties
break
# start a new loop with next difficulty
difficultyTarget = self.getNextDifficulty(difficultyTarget)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available {} locs:".format(phase))
for loc in locations:
if loc.difficulty.bool == True:
print("{:>48}: {:>8}".format(loc.Name, round(loc.difficulty.difficulty, 2)))
print(" smbool: {}".format(loc.difficulty))
print(" path: {}".format([ap.Name for ap in loc.path]))
def collectMajor(self, loc, itemName=None):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc, itemName)
return loc
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc)
return loc
def collectItem(self, loc, item=None):
if item == None:
item = loc.itemName
if self.vcr != None:
self.vcr.addLocation(loc.Name, item)
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc.Name, loc.Area, loc.GraphArea))
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
if self.checkDuplicateMajor == True:
if item not in ['Nothing', 'NoEnergy', 'Missile', 'Super', 'PowerBomb', 'ETank', 'Reserve']:
if self.smbm.haveItem(item):
print("WARNING: {} has already been picked up".format(item))
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc.itemName = item
self.collectedItems.append(item)
# we still need the boss difficulty
if not loc.isBoss():
loc.difficulty = smboolFalse
if self.log.getEffectiveLevel() == logging.DEBUG:
print("---------------------------------------------------------------")
print("collectItem: {:<16} at {:<48} diff {}".format(item, loc.Name, loc.difficulty))
print("---------------------------------------------------------------")
# last loc is used as root node for the graph.
# when loading a plando we can load locations from non connected areas, so they don't have an access point.
if loc.accessPoint is not None:
self.lastAP = loc.accessPoint
self.lastArea = loc.SolveArea
def getLocIndex(self, locName):
for (i, loc) in enumerate(self.visitedLocations):
if loc.Name == locName:
return i
def removeItemAt(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
locIndex = self.getLocIndex(locName)
if locIndex is None:
self.errorMsg = "Location '{}' has not been visited".format(locName)
return
loc = self.visitedLocations.pop(locIndex)
# removeItemAt is only used from the tracker, so all the locs are in majorLocations
self.majorLocations.append(loc)
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startLocation
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1].accessPoint
self.lastArea = self.visitedLocations[-1].SolveArea
# delete location params which are set when the location is available
if loc.difficulty is not None:
loc.difficulty = None
if loc.distance is not None:
loc.distance = None
if loc.accessPoint is not None:
loc.accessPoint = None
if loc.path is not None:
loc.path = None
# item
item = loc.itemName
if self.mode in ['seedless', 'race', 'debug']:
# in seedless remove the first nothing found as collectedItems is not ordered
self.collectedItems.remove(item)
else:
self.collectedItems.pop(locIndex)
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def cancelLastItems(self, count):
if self.vcr != None:
self.vcr.addRollback(count)
if self.interactive == False:
self.nbAvailLocs = self.nbAvailLocs[:-count]
for _ in range(count):
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
if self.majorsSplit == 'Full' or loc.isClass(self.majorsSplit) or loc.isBoss():
self.majorLocations.append(loc)
else:
self.minorLocations.append(loc)
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startLocation
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1].accessPoint
if self.lastAP is None:
# default to location first access from access point
self.lastAP = list(self.visitedLocations[-1].AccessFrom.keys())[0]
self.lastArea = self.visitedLocations[-1].SolveArea
# delete location params which are set when the location is available
if loc.difficulty is not None:
loc.difficulty = None
if loc.distance is not None:
loc.distance = None
if loc.accessPoint is not None:
loc.accessPoint = None
if loc.path is not None:
loc.path = None
# item
item = loc.itemName
if item == self.collectedItems[-1]:
self.collectedItems.pop()
else:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc.Name, item, self.collectedItems[-1]))
# in plando we have to remove the last added item,
# else it could be used in computing the postAvailable of a location
if self.mode in ['plando', 'seedless', 'race', 'debug']:
loc.itemName = 'Nothing'
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def cancelObjectives(self, cur):
while self.completedObjectives and self.completedObjectives[-1][0] > cur:
goalCur, goalName = self.completedObjectives.pop()
self.log.debug("rollback objective {}".format(goalName))
self.objectives.setGoalCompleted(goalName, False)
def printLocs(self, locs, phase):
if len(locs) > 0:
print("{}:".format(phase))
print('{:>48} {:>12} {:>8} {:>8} {:>34} {:>10}'.format("Location Name", "Difficulty", "Distance", "ComeBack", "SolveArea", "AreaWeight"))
for loc in locs:
print('{:>48} {:>12} {:>8} {:>8} {:>34} {:>10}'.
format(loc.Name, round(loc.difficulty[1], 2), round(loc.distance, 2),
loc.comeBack, loc.SolveArea, loc.areaWeight if loc.areaWeight is not None else -1))
def getAvailableItemsList(self, locations, threshold):
# locations without distance are not available
locations = [loc for loc in locations if loc.distance is not None]
if len(locations) == 0:
return []
mandatoryBosses = Objectives.getMandatoryBosses()
# add nocomeback locations which has been selected by the comeback step (areaWeight == 1)
around = [loc for loc in locations if( (loc.areaWeight is not None and loc.areaWeight == 1)
or ((loc.SolveArea == self.lastArea or loc.distance < 3)
and loc.difficulty.difficulty <= threshold
and (not Bosses.areaBossDead(self.smbm, self.lastArea)
and (self.lastArea not in Bosses.areaBosses
or Bosses.areaBosses[self.lastArea] in mandatoryBosses))
and loc.comeBack is not None and loc.comeBack == True)
or (loc.Name == self.escapeLocName) )]
outside = [loc for loc in locations if not loc in around]
if self.log.getEffectiveLevel() == logging.DEBUG:
self.printLocs(around, "around1")
self.printLocs(outside, "outside1")
around.sort(key=lambda loc: (
# end game loc
0 if loc.Name == self.escapeLocName else 1,
# locs in the same area
0 if loc.SolveArea == self.lastArea else 1,
# nearest locs
loc.distance,
# beating a boss
0 if loc.isBoss() else 1,
# easiest first
loc.difficulty.difficulty
)
)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.printLocs(around, "around2")
# we want to sort the outside locations by putting the ones in the same area first,
# then we sort the remaining areas starting whith boss dead status.
# we also want to sort by range of difficulty and not only with the difficulty threshold.
ranged = {
"areaWeight": [],
"easy": [],
"medium": [],
"hard": [],
"harder": [],
"hardcore": [],
"mania": [],
"noComeBack": []
}
for loc in outside:
if loc.areaWeight is not None:
ranged["areaWeight"].append(loc)
elif loc.comeBack is None or loc.comeBack == False:
ranged["noComeBack"].append(loc)
else:
difficulty = loc.difficulty.difficulty
if difficulty < medium:
ranged["easy"].append(loc)
elif difficulty < hard:
ranged["medium"].append(loc)
elif difficulty < harder:
ranged["hard"].append(loc)
elif difficulty < hardcore:
ranged["harder"].append(loc)
elif difficulty < mania:
ranged["hardcore"].append(loc)
else:
ranged["mania"].append(loc)
for key in ranged:
ranged[key].sort(key=lambda loc: (
# first locs in the same area
0 if loc.SolveArea == self.lastArea else 1,
# first nearest locs
loc.distance,
# beating a boss
loc.difficulty.difficulty if (not Bosses.areaBossDead(self.smbm, loc.Area)
and loc.isBoss())
else 100000,
# areas with boss still alive
loc.difficulty.difficulty if (not Bosses.areaBossDead(self.smbm, loc.Area))
else 100000,
loc.difficulty.difficulty))
if self.log.getEffectiveLevel() == logging.DEBUG:
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
self.printLocs(ranged[key], "outside2:{}".format(key))
outside = []
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
outside += ranged[key]
locs = around + outside
# special case for newbie like presets and VARIA tweaks, when both Phantoon and WS Etank are available,
# if phantoon is visited first then WS Etank is no longer available as newbie can't pass sponge bath.
# do the switch only if phantoon and ws etank have the same comeback, in boss rando we can have
# phantoon comeback and ws etank nocomeback and it would fail to solve in that case.
if locs and locs[0].Name == 'Phantoon':
for i, loc in enumerate(locs):
if loc.Name == 'Energy Tank, Wrecked Ship' and locs[0].comeBack == loc.comeBack:
self.log.debug("switch Phantoon and WS Etank")
locs[i] = locs[0]
locs[0] = loc
break
return locs
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold):
# first take end game location to end the run
if (len(majorsAvailable) > 0
and majorsAvailable[0].Name == self.escapeLocName):
return self.collectMajor(majorsAvailable.pop(0))
# next take major items of acceptable difficulty in the current area
elif (len(majorsAvailable) > 0
and majorsAvailable[0].SolveArea == self.lastArea
and majorsAvailable[0].difficulty.difficulty <= diffThreshold
and majorsAvailable[0].comeBack == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
elif len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M={}, m={}'.format(majorsAvailable[0].Name, minorsAvailable[0].Name))
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0].difficulty.difficulty
nextMinDifficulty = minorsAvailable[0].difficulty.difficulty
nextMajArea = majorsAvailable[0].SolveArea
nextMinArea = minorsAvailable[0].SolveArea
nextMajComeBack = majorsAvailable[0].comeBack
nextMinComeBack = minorsAvailable[0].comeBack
nextMajDistance = majorsAvailable[0].distance
nextMinDistance = minorsAvailable[0].distance
maxAreaWeigth = 10000
nextMajAreaWeight = majorsAvailable[0].areaWeight if majorsAvailable[0].areaWeight is not None else maxAreaWeigth
nextMinAreaWeight = minorsAvailable[0].areaWeight if minorsAvailable[0] .areaWeight is not None else maxAreaWeigth
if self.log.getEffectiveLevel() == logging.DEBUG:
print(" : {:>4} {:>32} {:>4} {:>4} {:>6}".format("diff", "area", "back", "dist", "weight"))
print("major: {:>4} {:>32} {:>4} {:>4} {:>6}".format(round(nextMajDifficulty, 2), nextMajArea, nextMajComeBack, round(nextMajDistance, 2), nextMajAreaWeight))
print("minor: {:>4} {:>32} {:>4} {:>4} {:>6}".format(round(nextMinDifficulty, 2), nextMinArea, nextMinComeBack, round(nextMinDistance, 2), nextMinAreaWeight))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge') and nextMajAreaWeight != maxAreaWeigth:
# we have charge, no longer need minors
self.log.debug("we have charge, no longer need minors, take major")
return self.collectMajor(majorsAvailable.pop(0))
else:
# respect areaweight first
if nextMajAreaWeight != nextMinAreaWeight:
self.log.debug("maj/min != area weight")
if nextMajAreaWeight < nextMinAreaWeight:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# then take item from loc where you can come back
elif nextMajComeBack != nextMinComeBack:
self.log.debug("maj/min != combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDifficulty <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance and <= diffThreshold")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == self.lastArea and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDifficulty > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance and > diffThreshold")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def canRelaxEnd(self):
# sometimes you can't get all locations because of restricted locs, so allow to go to mother brain
if self.endGameLoc.Name == 'Mother Brain' and Conf.itemsPickup == 'all':
self.relaxedEndCheck = True
self.computeLocationsDifficulty(self.majorLocations)
self.relaxedEndCheck = False
return self.endGameLoc.difficulty == True
else:
return False
def getGunship(self):
# add gunship location and try to go back to it
solver = self
def GunshipAccess(sm):
nonlocal solver
return SMBool(solver.objectives.allGoalsCompleted())
def GunshipAvailable(_, sm):
nonlocal solver
if solver.relaxedEndCheck:
return SMBool(True)
else:
hasEnoughMinors = solver.pickup.enoughMinors(sm, solver.minorLocations)
hasEnoughMajors = solver.pickup.enoughMajors(sm, solver.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
return SMBool(hasEnoughItems)
gunship = define_location(
Area="Crateria",
GraphArea="Crateria",
SolveArea="Crateria Landing Site",
Name="Gunship",
# to display bigger gunship image in spoiler log
Class=["Boss"],
CanHidden=False,
Address=-1,
Id=None,
Visibility="Hidden",
Room='Landing Site',
AccessFrom = {
'Landing Site': GunshipAccess
},
Available = GunshipAvailable
)
gunship.itemName = 'Gunship'
return gunship
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
mbLoc = self.getLoc('Mother Brain')
if self.objectives.tourianRequired:
# update mother brain to handle all end game conditions, allow MB loc to access solver data
solver = self
def MotherBrainAccess(sm):
nonlocal solver
return SMBool(solver.objectives.allGoalsCompleted())
def MotherBrainAvailable(sm):
nonlocal solver
tourian = sm.enoughStuffTourian()
# can't check all locations
if solver.relaxedEndCheck:
return tourian
else:
hasEnoughMinors = solver.pickup.enoughMinors(sm, solver.minorLocations)
hasEnoughMajors = solver.pickup.enoughMajors(sm, solver.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
return sm.wand(tourian, SMBool(hasEnoughItems))
mbLoc.AccessFrom['Golden Four'] = MotherBrainAccess
mbLoc.Available = MotherBrainAvailable
self.endGameLoc = mbLoc
self.escapeLocName = 'Mother Brain'
else:
# remove mother brain location and replace it with gunship loc
self.locations.remove(mbLoc)
gunship = self.getGunship()
self.locations.append(gunship)
self.endGameLoc = gunship
self.escapeLocName = 'Gunship'
if self.majorsSplit == 'Major':
self.majorLocations = [loc for loc in self.locations if loc.isMajor() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if loc.isMinor()]
elif self.majorsSplit == 'Chozo':
self.majorLocations = [loc for loc in self.locations if loc.isChozo() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if not loc.isChozo() and not loc.isBoss()]
elif self.majorsSplit == 'Scavenger':
self.majorLocations = [loc for loc in self.locations if loc.isScavenger() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if not loc.isScavenger() and not loc.isBoss()]
else:
# Full
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
endDifficulty = mania
diffThreshold = self.getDiffThreshold()
self.relaxedEndCheck = False
self.aborted = False
self.completedObjectives = []
while self.endGameLoc not in self.visitedLocations:
# check time limit
if self.runtimeLimit_s > 0:
if time.process_time() - self.startTime > self.runtimeLimit_s:
self.log.debug("time limit exceeded ({})".format(self.runtimeLimit_s))
return (-1, False)
self.log.debug("Current AP/Area: {}/{}".format(self.lastAP, self.lastArea))
# check if a new objective can be completed
goals = self.objectives.checkGoals(self.smbm, self.lastAP)
if any([possible for possible in goals.values()]):
for goalName, possible in goals.items():
if possible:
self.log.debug("complete objective {}".format(goalName))
self.objectives.setGoalCompleted(goalName, True)
self.completedObjectives.append((len(self.collectedItems), goalName))
break
continue
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.majorsSplit != 'Full':
self.computeLocationsDifficulty(self.minorLocations, phase="minor")
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if loc.difficulty is not None and loc.difficulty.bool == True]
minorsAvailable = [loc for loc in self.minorLocations if loc.difficulty is not None and loc.difficulty.bool == True]
self.nbAvailLocs.append(len(self.getAllLocs(majorsAvailable, minorsAvailable)))
# remove next scavenger locs before checking if we're stuck
if self.majorsSplit == 'Scavenger':
majorsAvailable = self.filterScavengerLocs(majorsAvailable)
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
self.log.debug("STUCK MAJORS and MINORS")
if not self.endGameLoc.difficulty and self.canRelaxEnd():
self.log.debug("Can't collect 100% but Mother Brain is available in relax end")
majorsAvailable.append(self.endGameLoc)
elif self.comeBack.rewind(len(self.collectedItems)) == True:
self.log.debug("Rewind as we're stuck")
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
self.aborted = True
break
# handle no comeback locations
rewindRequired = self.comeBack.handleNoComeBack(majorsAvailable, minorsAvailable,
len(self.collectedItems))
if rewindRequired == True:
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
self.aborted = True
break
# sort them on difficulty and proximity
self.log.debug("getAvailableItemsList majors")
majorsAvailable = self.getAvailableItemsList(majorsAvailable, diffThreshold)
if self.majorsSplit == 'Full':
minorsAvailable = majorsAvailable
else:
self.log.debug("getAvailableItemsList minors")
minorsAvailable = self.getAvailableItemsList(minorsAvailable, diffThreshold)
# choose one to pick up
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold)
self.comeBack.cleanNoComeBack(self.getAllLocs(self.majorLocations, self.minorLocations))
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc.Name, loc.itemName))
self.log.debug("bosses: {}".format([(boss, Bosses.bossDead(self.smbm, boss)) for boss in Bosses.Golden4()]))
return (difficulty, itemsOk)
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def getAllLocs(self, majorsAvailable, minorsAvailable):
if self.majorsSplit == 'Full':
return majorsAvailable
else:
return majorsAvailable+minorsAvailable
def computeDifficultyValue(self):
if self.aborted:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc.difficulty.difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
def getScavengerHuntState(self):
# check where we are in the scavenger hunt
huntInProgress = False
for index, loc in enumerate(self.scavengerOrder):
if loc not in self.visitedLocations:
huntInProgress = True
break
return (huntInProgress, index)
def filterScavengerLocs(self, majorsAvailable):
huntInProgress, index = self.getScavengerHuntState()
if huntInProgress and index < len(self.scavengerOrder)-1:
self.log.debug("Scavenger hunt in progress, {}/{}".format(index, len(self.scavengerOrder)-1))
# remove all next locs in the hunt
nextHuntLocs = self.scavengerOrder[index+1:]
for loc in nextHuntLocs:
self.log.debug("Scavenger hunt, try to remove loc {}".format(loc.Name))
try:
majorsAvailable.remove(loc)
except:
pass
return majorsAvailable
def scavengerHuntComplete(self, smbm=None, ap=None):
if self.masterMajorsSplit != 'Scavenger':
return SMBool(True)
else:
# check that last loc from the scavenger hunt list has been visited
lastLoc = self.scavengerOrder[-1]
return SMBool(lastLoc in self.visitedLocations)
def getPriorityArea(self):
# if scav returns solve area of next loc in the hunt
if self.majorsSplit != 'Scavenger':
return None
else:
huntInProgress, index = self.getScavengerHuntState()
if huntInProgress and index < len(self.scavengerOrder)-1:
return self.scavengerOrder[index].SolveArea
else:
return None
| {
"content_hash": "2b37010992bd1d3c356398b4973a25b6",
"timestamp": "",
"source": "github",
"line_count": 882,
"max_line_length": 202,
"avg_line_length": 47.39229024943311,
"alnum_prop": 0.5842822966507177,
"repo_name": "theonlydude/RandomMetroidSolver",
"id": "34f9140ce2bcb2c6f30a69f6107b4607ceb64ff8",
"size": "41800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solver/commonSolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "323310"
},
{
"name": "CSS",
"bytes": "70414"
},
{
"name": "Dockerfile",
"bytes": "3915"
},
{
"name": "HTML",
"bytes": "916237"
},
{
"name": "JavaScript",
"bytes": "975366"
},
{
"name": "Lua",
"bytes": "2247"
},
{
"name": "Makefile",
"bytes": "3217"
},
{
"name": "Perl",
"bytes": "1680"
},
{
"name": "Python",
"bytes": "2240472"
},
{
"name": "Shell",
"bytes": "227851"
}
],
"symlink_target": ""
} |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
import re
import os
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
def get_requires(filename):
requirements = []
with open(filename, 'rt') as req_file:
for line in req_file.read().splitlines():
if not line.strip().startswith("#"):
requirements.append(line)
return requirements
def load_version():
"""Loads a file content"""
filename = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"cpt", "__init__.py"))
with open(filename, "rt") as version_file:
conan_init = version_file.read()
version = re.search("__version__ = '([0-9a-z.-]+)'", conan_init).group(1)
return version
project_requirements = get_requires("cpt/requirements.txt")
setup(
name='conan_package_tools',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=load_version(),
description='Packaging tools for Conan C/C++ package manager',
# The project's main homepage.
url='https://github.com/conan-io/conan-package-tools',
# Author details
author='JFrog LTD. Luis Martinez de Bartolome',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords=['conan', 'C/C++', 'package', 'libraries', 'developer', 'manager',
'dependency', 'tool', 'c', 'c++', 'cpp'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=project_requirements,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'cpt': ['*.txt'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'run_create_in_docker=cpt.run_in_docker:run',
],
},
)
| {
"content_hash": "bc51844229cd0e3085397daf6799f321",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 94,
"avg_line_length": 35.36283185840708,
"alnum_prop": 0.6504004004004004,
"repo_name": "cstb/conan-package-tools",
"id": "74ce48c93da3749095d0121774c3b1e15963e3e8",
"size": "3996",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "255403"
},
{
"name": "Shell",
"bytes": "1315"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hermes.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "bcca9b79cf14bc1f879ae88cb8acda1c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "renanalencar/hermes",
"id": "7cf771168dd44a5266e11a98d049e2a60c256d52",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45248"
},
{
"name": "HTML",
"bytes": "48827"
},
{
"name": "JavaScript",
"bytes": "87491"
},
{
"name": "Python",
"bytes": "158986"
}
],
"symlink_target": ""
} |
import re
import traceback
class Interpreter:
def __init__(self):
self._inblock = False
self._last_empty = False
self._aggregate = []
self._environment = dict()
self.comments = re.compile(r'#.*$', re.MULTILINE)
self.identifier = re.compile(r'(?:^|\s)((?:[^\d\W]\w*\.)*(?:[^\d\W]\w*))$')
# blockopeners are :({[ and """,'''
# currently, we find them by explicitly looking for compilation errors
# TODO: find better way to do this
self.blockopeners = (
'EOF while scanning triple-quoted string literal',
'unexpected EOF while parsing'
)
def inblock(self):
"""
Returns whether a block is open currently.
:return: True if a block is open
"""
return self._inblock
def eval(self, line):
"""
Evaluates line, looks for blocks and eventually executes statements...
:param line: Line to be fed
:return:
"""
stripped_line = self._strip(line)
if len(stripped_line) == 0:
# leave block if:
# - line completely empty
# - or two empty tab lines
if self._inblock:
only_spaces = len(line.strip()) == 0
# if block is left, execute aggregation
# leave if line empty or last empty and only spaces
if len(line) == 0 or (self._last_empty and only_spaces):
code = '\n'.join(self._aggregate)
compilation = self._try_compile(code)
if compilation is False:
return
self._aggregate = []
self._inblock = False
self._last_empty = False
self._exec(compilation)
elif only_spaces:
# only spaces, so we need two such lines
self._last_empty = True
else:
self._last_empty = False
return # if empty line
if not self._inblock:
compilation = self._try_compile(line)
self._last_empty = False
if self._inblock:
self._aggregate.append(line)
else:
self._exec(compilation)
def _try_compile(self, line):
try:
return compile(line, '<input>', 'single')
except SyntaxError as e:
if e.msg in self.blockopeners:
self._inblock = True
else:
traceback.print_exc()
except Exception as e:
traceback.print_exc()
return False
def _exec(self, compilation):
"""
Executes some code and catches exceptions.
:param code: Code to execute
:return:
"""
try:
exec(compilation, self._environment)
except Exception as e:
traceback.print_exc()
def _strip(self, line):
"""
Strips away comments and spaces.
:param line: The line to strip
:return: The stripped line
"""
return self.comments.sub('', line).strip()
def complete(self, line):
"""
A preliminary auto complete function.
Only works for things in the global environment, so far!
And only up to a depth of 1.
:param line: the current line
:return: completion in case of success, False otherwise
"""
match = self.identifier.search(line)
if match is not None:
parts = match.group(1).split('.')
environ = self._environment
if len(parts) > 0:
last_part = parts[-1]
# submodules
if len(parts) > 1:
if parts[-2] not in environ:
return False
environ = dir(environ[parts[-2]]) # update environ
matches = [key for key in environ if key.startswith(last_part)]
if len(matches) == 1:
return matches[0][len(last_part):]
return False | {
"content_hash": "3a29324e18d15a1e04b5885b82b5c075",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 83,
"avg_line_length": 32.74603174603175,
"alnum_prop": 0.5053320407174018,
"repo_name": "paberr/ppython",
"id": "2c0c863fde781b1e6c251cc3e564435a95a975a2",
"size": "4126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ppython/interpreter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19424"
}
],
"symlink_target": ""
} |
import os
import random
import logging
import stat
_logger = logging.getLogger("cwltool")
def abspath(src, basedir):
if src.startswith("file://"):
ab = src[7:]
else:
ab = src if os.path.isabs(src) else os.path.join(basedir, src)
return ab
class PathMapper(object):
"""Mapping of files from relative path provided in the file to a tuple of
(absolute local path, absolute container path)"""
def __init__(self, referenced_files, basedir):
self._pathmap = {}
for src in referenced_files:
ab = abspath(src, basedir)
self._pathmap[src] = (ab, ab)
def mapper(self, src):
return self._pathmap[src]
def files(self):
return self._pathmap.keys()
def reversemap(self, target):
for k,v in self._pathmap.items():
if v[1] == target:
return (k, v[0])
class DockerPathMapper(PathMapper):
def __init__(self, referenced_files, basedir):
self._pathmap = {}
self.dirs = {}
for src in referenced_files:
ab = abspath(src, basedir)
dir, fn = os.path.split(ab)
subdir = False
for d in self.dirs:
if dir.startswith(d):
subdir = True
break
if not subdir:
for d in list(self.dirs):
if d.startswith(dir):
# 'dir' is a parent of 'd'
del self.dirs[d]
self.dirs[dir] = True
prefix = "job" + str(random.randint(1, 1000000000)) + "_"
names = set()
for d in self.dirs:
name = os.path.join("/tmp", prefix + os.path.basename(d))
i = 1
while name in names:
i += 1
name = os.path.join("/tmp", prefix + os.path.basename(d) + str(i))
names.add(name)
self.dirs[d] = name
for src in referenced_files:
ab = abspath(src, basedir)
deref = ab
st = os.lstat(deref)
while stat.S_ISLNK(st.st_mode):
rl = os.readlink(deref)
deref = rl if os.path.isabs(rl) else os.path.join(os.path.dirname(deref), rl)
st = os.lstat(deref)
for d in self.dirs:
if ab.startswith(d):
self._pathmap[src] = (deref, os.path.join(self.dirs[d], ab[len(d)+1:]))
| {
"content_hash": "ea8368f8b7faff8cd71a579cbdfd880f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 93,
"avg_line_length": 30.6375,
"alnum_prop": 0.507547939616483,
"repo_name": "brainstorm/common-workflow-language",
"id": "ef739cf85d3a615bad5b3f982e2e35d655698e8b",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reference/cwltool/pathmapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "96185"
},
{
"name": "Python",
"bytes": "97848"
},
{
"name": "Shell",
"bytes": "5941"
}
],
"symlink_target": ""
} |
import random
import numpy
from matplotlib import pyplot ## == import matplotlib.pyplot
## Hive + Enemy
num_bees = 1000
num_warrior =numpy.random.randint(1,num_bees+1)
num_worker= num_bees - num_warrior
alert=False
print "num warrior", num_warrior
print "num worker", num_worker
print "\n"
class Queen_Bee:
hp=1000
dead = False
nutrition=0
def make_bees(self,worker_bee, warrior_bee):
while self.nutrition>0:
if alert==True:
warrior_bee.append(Warrior_Bee())
self.nutrition=self.nutrition-1
else:
worker_bee.append(Worker_Bee())
self.nutrition=self.nutrition-1
#warrior_bee, worker_bee's hp==1
class Warrior_Bee:
at=2
dead = False
def attack(self,enemy):
enemy.hp=enemy.hp-self.at
class Worker_Bee:
dead = False
def work(self, queen):
queen.nutrition=queen.nutrition+1
class Enemy:
hp=10000
at=400
dead = False
def attack(self, worker_bee, warrior_bee, queen):
if len(warrior_bee)>=self.at:
del warrior_bee[0:self.at]
elif self.at<=len(warrior_bee)+len(worker_bee) and len(warrior_bee)<self.at:
del worker_bee[0:self.at-len(warrior_bee)]
del warrior_bee[:]
elif self.at>len(warrior_bee)+len(worker_bee):
queen.hp=queen.hp-self.at+len(warrior_bee)+len(worker_bee)
del warrior_bee[:]
del worker_bee[:]
## Bee, Enemy »ý¼º
queen=Queen_Bee()
warrior_bee=[]
worker_bee=[]
for index in range(0, num_warrior):
bee=Warrior_Bee()
warrior_bee.append(bee)
for index in range(0, num_worker):
bee=Worker_Bee()
worker_bee.append(bee)
enemy = Enemy()
## Fight
steps= []
enemy_hps = []
queen_hps=[]
num_worker=[]
num_warrior=[]
step = 0
while enemy.hp>0 and queen.hp>0:
for worker in worker_bee:
worker.work(queen)
print "nutrition before ¾Ë", queen.nutrition
for warrior in warrior_bee:
warrior.attack(enemy)
new_warrior_bee=[]
for warrior in warrior_bee:
if warrior.dead==False:
new_warrior_bee.append(warrior)
warrior_bee=new_warrior_bee
print "enemy hp", enemy.hp
print "num warrior", len(warrior_bee)
print "num worker", len(worker_bee)
enemy.attack(worker_bee, warrior_bee, queen)
print "num warrior", len(warrior_bee)
print "num worker", len(worker_bee)
print "qeen hp", queen.hp
if len(warrior_bee)<=0:
alert=True
else:
alert=False
print "alert", alert
queen.make_bees(worker_bee, warrior_bee)
print "nutrition after ¾Ë", queen.nutrition
print "num warrior", len(warrior_bee)
print "num worker", len(worker_bee), "\n"
step=step+1
steps.append(step)
enemy_hps.append(enemy.hp)
queen_hps.append(queen.hp)
num_worker.append(len(worker_bee))
num_warrior.append(len(warrior_bee))
if enemy.hp<0:
print "bees win"
elif queen.hp<0:
print "enemy wins"
# print steps, "\n", enemy_hps, "\n" , queen_hps, "\n" , num_worker, "\n", num_warrior
pyplot.plot(steps, enemy_hps, label="enemy_hps")
pyplot.plot(steps, queen_hps, label="queen_hps")
pyplot.plot(steps, num_worker, label="# of worker bee")
pyplot.plot(steps, num_warrior, label="# of warrior bee")
pyplot.legend()
pyplot.show()
| {
"content_hash": "565b62a60ca9d4b182b2512480da3a17",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 89,
"avg_line_length": 20.347058823529412,
"alnum_prop": 0.6082682856316854,
"repo_name": "omsktransmash/ScientificPythonTutorial",
"id": "7566cf85d72e67ba7994350523f5da633b293cc9",
"size": "3459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Hyeokkoo/0701 (bee HW).py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "84355"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import sys
import lockfile
import psutil
from lockfile import pidlockfile
logger = logging.getLogger(__name__)
class OwnerPrintingPIDLockFile(pidlockfile.PIDLockFile):
def acquire(self, timeout=None):
# If the lock is held, attempt to determine holder and print a message before waiting.
# If owner process cannot be found, go ahead and kill the orphaned lock file before waiting.
if self.is_locked():
try:
pid = self.read_pid()
cmd = self.cmdline_for_pid(pid)
if cmd is not None:
print('Waiting on pants process {0} ({1}) to complete'.format(pid, cmd), file=sys.stderr)
else:
self.break_lock()
except Exception as e:
logger.warn('Error while determining lock owner: {0}'.format(e))
return pidlockfile.PIDLockFile.acquire(self, timeout)
@staticmethod
def cmdline_for_pid(pid):
try:
process = psutil.Process(pid)
return ' '.join(process.cmdline)
except psutil.NoSuchProcess:
return None
| {
"content_hash": "526abffa7ac515cfd73343bfd68f7afb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 99,
"avg_line_length": 31.89189189189189,
"alnum_prop": 0.6754237288135593,
"repo_name": "tejal29/pants",
"id": "2f7780dc51a1834ee64e407b01bc94147a3186a0",
"size": "1327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/process/pidlock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10977"
},
{
"name": "GAP",
"bytes": "4810"
},
{
"name": "HTML",
"bytes": "75563"
},
{
"name": "Java",
"bytes": "47798"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "5348"
},
{
"name": "Python",
"bytes": "2364916"
},
{
"name": "Scala",
"bytes": "5556"
},
{
"name": "Shell",
"bytes": "39930"
},
{
"name": "Thrift",
"bytes": "1841"
},
{
"name": "XML",
"bytes": "8658"
}
],
"symlink_target": ""
} |
"""
Test the parallel module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2010-2011 Gael Varoquaux
# License: BSD Style, 3 clauses.
import time
try:
import cPickle as pickle
PickleError = TypeError
except:
import pickle
PickleError = pickle.PicklingError
from ..parallel import Parallel, delayed, SafeFunction, WorkerInterrupt, \
multiprocessing, cpu_count
from ..my_exceptions import JoblibException
import nose
################################################################################
def division(x, y):
return x/y
def square(x):
return x**2
def exception_raiser(x):
if x == 7:
raise ValueError
return x
def interrupt_raiser(x):
time.sleep(.05)
raise KeyboardInterrupt
def f(x, y=0, z=0):
""" A module-level function so that it can be spawn with
multiprocessing.
"""
return x**2 + y + z
################################################################################
def test_cpu_count():
assert cpu_count() > 0
################################################################################
# Test parallel
def test_simple_parallel():
X = range(10)
for n_jobs in (1, 2, -1):
yield (nose.tools.assert_equal, [square(x) for x in X],
Parallel(n_jobs=-1)(delayed(square)(x) for x in X))
def test_parallel_kwargs():
""" Check the keyword argument processing of pmap.
"""
lst = range(10)
for n_jobs in (1, 4):
yield (nose.tools.assert_equal,
[f(x, y=1) for x in lst],
Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)
)
def test_parallel_pickling():
""" Check that pmap captures the errors when it is passed an object
that cannot be pickled.
"""
def g(x):
return x**2
nose.tools.assert_raises(PickleError,
Parallel(),
(delayed(g)(x) for x in range(10))
)
def test_error_capture():
""" Check that error are captured, and that correct exceptions
are raised.
"""
if multiprocessing is not None:
# A JoblibException will be raised only if there is indeed
# multiprocessing
nose.tools.assert_raises(JoblibException,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
nose.tools.assert_raises(WorkerInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
else:
nose.tools.assert_raises(KeyboardInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
nose.tools.assert_raises(ZeroDivisionError,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
try:
Parallel(n_jobs=1)(
delayed(division)(x, y) for x, y in zip((0, 1), (1, 0)))
except Exception, e:
pass
nose.tools.assert_false(isinstance(e, JoblibException))
class Counter(object):
def __init__(self, list1, list2):
self.list1 = list1
self.list2 = list2
def __call__(self, i):
self.list1.append(i)
nose.tools.assert_equal(len(self.list1), len(self.list2))
def consumer(queue, item):
queue.append('Consumed %s' % item)
def test_dispatch_one_job():
""" Test that with only one job, Parallel does act as a iterator.
"""
queue = list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=1)(delayed(consumer)(queue, x) for x in producer())
nose.tools.assert_equal(queue,
['Produced 0', 'Consumed 0',
'Produced 1', 'Consumed 1',
'Produced 2', 'Consumed 2',
'Produced 3', 'Consumed 3',
'Produced 4', 'Consumed 4',
'Produced 5', 'Consumed 5']
)
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_multiprocessing():
""" Check that using pre_dispatch Parallel does indeed dispatch items
lazily.
"""
if multiprocessing is None:
return
manager = multiprocessing.Manager()
queue = manager.list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=2, pre_dispatch=3)(delayed(consumer)(queue, i)
for i in producer())
nose.tools.assert_equal(list(queue)[:4],
['Produced 0', 'Produced 1', 'Produced 2',
'Consumed 0', ])
nose.tools.assert_equal(len(queue), 12)
def test_exception_dispatch():
"Make sure that exception raised during dispatch are indeed captured"
nose.tools.assert_raises(
ValueError,
Parallel(n_jobs=6, pre_dispatch=16, verbose=0),
(delayed(exception_raiser)(i) for i in range(30)),
)
################################################################################
# Test helpers
def test_joblib_exception():
# Smoke-test the custom exception
e = JoblibException('foobar')
# Test the repr
repr(e)
# Test the pickle
pickle.dumps(e)
def test_safe_function():
safe_division = SafeFunction(division)
nose.tools.assert_raises(JoblibException, safe_division, 1, 0)
| {
"content_hash": "3cb2dbeae44b1d828630694525b405ee",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 80,
"avg_line_length": 30.067357512953368,
"alnum_prop": 0.5121488885059452,
"repo_name": "ominux/scikit-learn",
"id": "2599acb393439c3a178716305fcaa0591e02449e",
"size": "5803",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/externals/joblib/test/test_parallel.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "455969"
},
{
"name": "C++",
"bytes": "240380"
},
{
"name": "Makefile",
"bytes": "1411"
},
{
"name": "Python",
"bytes": "2064853"
},
{
"name": "Shell",
"bytes": "486"
}
],
"symlink_target": ""
} |
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
WRITE_DISABLED = "Write Disabled"
UNLINK_INTERVAL = 15
UNLINK_RETRIES = 30
class VMAXProvision(object):
"""Provisioning Class for Dell EMC VMAX volume drivers.
It supports VMAX arrays.
"""
def __init__(self, rest):
self.utils = utils.VMAXUtils()
self.rest = rest
def create_storage_group(
self, array, storagegroup_name, srp, slo, workload,
extra_specs, do_disable_compression=False):
"""Create a new storage group.
:param array: the array serial number
:param storagegroup_name: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extra_specs: additional info
:param do_disable_compression: disable compression flag
:returns: storagegroup - storage group object
"""
start_time = time.time()
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_storage_group(storage_group):
# Check if storage group has been recently created
storagegroup = self.rest.get_storage_group(
array, storagegroup_name)
if storagegroup is None:
storagegroup = self.rest.create_storage_group(
array, storage_group, srp, slo, workload, extra_specs,
do_disable_compression)
LOG.debug("Create storage group took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
LOG.info("Storage group %(sg)s created successfully.",
{'sg': storagegroup_name})
else:
LOG.info("Storage group %(sg)s already exists.",
{'sg': storagegroup_name})
return storagegroup
return do_create_storage_group(storagegroup_name)
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
volume_size, extra_specs):
"""Create a new volume in the given storage group.
:param array: the array serial number
:param volume_name: the volume name (String)
:param storagegroup_name: the storage group name
:param volume_size: volume size (String)
:param extra_specs: the extra specifications
:returns: dict -- volume_dict - the volume dict
"""
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_volume_from_sg(storage_group):
start_time = time.time()
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs)
LOG.debug("Create volume from storage group "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
return volume_dict
return do_create_volume_from_sg(storagegroup_name)
def delete_volume_from_srp(self, array, device_id, volume_name):
"""Delete a volume from the srp.
:param array: the array serial number
:param device_id: the volume device id
:param volume_name: the volume name
"""
start_time = time.time()
LOG.debug("Delete volume %(volume_name)s from srp.",
{'volume_name': volume_name})
self.rest.delete_volume(array, device_id)
LOG.debug("Delete volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(
start_time, time.time())})
def create_volume_snapvx(self, array, source_device_id,
snap_name, extra_specs):
"""Create a snapVx of a volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param snap_name: the snapshot name
:param extra_specs: the extra specifications
"""
start_time = time.time()
LOG.debug("Create Snap Vx snapshot of: %(source)s.",
{'source': source_device_id})
self.rest.create_volume_snap(
array, snap_name, source_device_id, extra_specs)
LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param create_snap: Flag for create snapvx
"""
start_time = time.time()
if create_snap:
self.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs)
# Link source to target
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
LOG.debug("Create element replica took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def break_replication_relationship(
self, array, target_device_id, source_device_id, snap_name,
extra_specs):
"""Unlink a snapshot from its target volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Break snap vx link relationship between: %(src)s "
"and: %(tgt)s.",
{'src': source_device_id, 'tgt': target_device_id})
self._unlink_volume(array, source_device_id, target_device_id,
snap_name, extra_specs)
def _unlink_volume(
self, array, source_device_id, target_device_id, snap_name,
extra_specs, list_volume_pairs=None):
"""Unlink a target volume from its source volume.
:param array: the array serial number
:param source_device_id: the source device id
:param target_device_id: the target device id
:param snap_name: the snap name
:param extra_specs: extra specifications
:param list_volume_pairs: list of volume pairs, optional
:return: return code
"""
def _unlink_vol():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_vol_success']:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, unlink=True,
list_volume_pairs=list_volume_pairs)
kwargs['modify_vol_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_volume failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_vol_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'modify_vol_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def delete_volume_snap(self, array, snap_name,
source_device_id, restored=False):
"""Delete a snapVx snapshot of a volume.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
:param restored: Flag to indicate if restored session is being deleted
"""
LOG.debug("Delete SnapVx: %(snap_name)s for volume %(vol)s.",
{'vol': source_device_id, 'snap_name': snap_name})
self.rest.delete_volume_snap(
array, snap_name, source_device_id, restored)
def is_restore_complete(self, array, source_device_id,
snap_name, extra_specs):
"""Check and wait for a restore to complete
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: snapshot name
:param extra_specs: extra specification
:returns: bool
"""
def _wait_for_restore():
"""Called at an interval until the restore is finished.
:raises: loopingcall.LoopingCallDone
:raises: VolumeBackendAPIException
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['wait_for_restore_called']:
if self._is_restore_complete(
array, source_device_id, snap_name):
kwargs['wait_for_restore_called'] = True
except Exception:
exception_message = (_("Issue encountered waiting for "
"restore."))
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
if kwargs['wait_for_restore_called']:
raise loopingcall.LoopingCallDone()
if kwargs['retries'] > int(extra_specs[utils.RETRIES]):
LOG.error("_wait_for_restore failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(
retvalue=int(extra_specs[utils.RETRIES]))
kwargs = {'retries': 0,
'wait_for_restore_called': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_restore)
rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait()
return rc
def _is_restore_complete(self, array, source_device_id, snap_name):
"""Helper function to check if restore is complete.
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: the snapshot name
:returns: restored -- bool
"""
restored = False
snap_details = self.rest.get_volume_snap(
array, source_device_id, snap_name)
if snap_details:
linked_devices = snap_details.get("linkedDevices", [])
for linked_device in linked_devices:
if ('targetDevice' in linked_device and
source_device_id == linked_device['targetDevice']):
if ('state' in linked_device and
linked_device['state'] == "Restored"):
restored = True
return restored
def delete_temp_volume_snap(self, array, snap_name, source_device_id):
"""Delete the temporary snapshot created for clone operations.
There can be instances where the source and target both attempt to
delete a temp snapshot simultaneously, so we must lock the snap and
then double check it is on the array.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
"""
@coordination.synchronized("emc-snapvx-{snapvx_name}")
def do_delete_temp_snap(snapvx_name):
# Ensure snap has not been recently deleted
if self.rest.get_volume_snap(
array, source_device_id, snapvx_name):
self.delete_volume_snap(array, snapvx_name, source_device_id)
do_delete_temp_snap(snap_name)
def delete_volume_snap_check_for_links(self, array, snap_name,
source_devices, extra_specs):
"""Check if a snap has any links before deletion.
If a snapshot has any links, break the replication relationship
before deletion.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_devices: the source device ids
:param extra_specs: the extra specifications
"""
list_device_pairs = []
if not isinstance(source_devices, list):
source_devices = [source_devices]
for source_device in source_devices:
LOG.debug("Check for linked devices to SnapVx: %(snap_name)s "
"for volume %(vol)s.",
{'vol': source_device, 'snap_name': snap_name})
linked_list = self.rest.get_snap_linked_device_list(
array, source_device, snap_name)
for link in linked_list:
target_device = link['targetDevice']
list_device_pairs.append((source_device, target_device))
if list_device_pairs:
self._unlink_volume(array, "", "", snap_name, extra_specs,
list_volume_pairs=list_device_pairs)
self.delete_volume_snap(array, snap_name, source_devices)
def extend_volume(self, array, device_id, new_size, extra_specs,
rdf_group=None):
"""Extend a volume.
:param array: the array serial number
:param device_id: the volume device id
:param new_size: the new size (GB)
:param extra_specs: the extra specifications
:param rdf_group: the rdf group number, if required
:returns: status_code
"""
start_time = time.time()
if rdf_group:
@coordination.synchronized('emc-rg-{rdf_group}')
def _extend_replicated_volume(rdf_group):
self.rest.extend_volume(array, device_id,
new_size, extra_specs)
_extend_replicated_volume(rdf_group)
else:
self.rest.extend_volume(array, device_id, new_size, extra_specs)
LOG.debug("Extend VMAX volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def get_srp_pool_stats(self, array, array_info):
"""Get the srp capacity stats.
:param array: the array serial number
:param array_info: the array dict
:returns: total_capacity_gb
:returns: remaining_capacity_gb
:returns: subscribed_capacity_gb
:returns: array_reserve_percent
"""
total_capacity_gb = 0
remaining_capacity_gb = 0
subscribed_capacity_gb = 0
array_reserve_percent = 0
srp = array_info['srpName']
LOG.debug(
"Retrieving capacity for srp %(srpName)s on array %(array)s.",
{'srpName': srp, 'array': array})
srp_details = self.rest.get_srp_by_name(array, srp)
if not srp_details:
LOG.error("Unable to retrieve srp instance of %(srpName)s on "
"array %(array)s.",
{'srpName': srp, 'array': array})
return 0, 0, 0, 0, False
try:
total_capacity_gb = srp_details['total_usable_cap_gb']
try:
used_capacity_gb = srp_details['total_used_cap_gb']
remaining_capacity_gb = float(
total_capacity_gb - used_capacity_gb)
except KeyError:
remaining_capacity_gb = srp_details['fba_free_capacity']
subscribed_capacity_gb = srp_details['total_subscribed_cap_gb']
array_reserve_percent = srp_details['reserved_cap_percent']
except KeyError:
pass
return (total_capacity_gb, remaining_capacity_gb,
subscribed_capacity_gb, array_reserve_percent)
def verify_slo_workload(self, array, slo, workload, srp):
"""Check if SLO and workload values are valid.
:param array: the array serial number
:param slo: Service Level Object e.g bronze
:param workload: workload e.g DSS
:param srp: the storage resource pool name
:returns: boolean
"""
is_valid_slo, is_valid_workload = False, False
if workload and workload.lower() == 'none':
workload = None
if not workload:
is_valid_workload = True
if slo and slo.lower() == 'none':
slo = None
valid_slos = self.rest.get_slo_list(array)
valid_workloads = self.rest.get_workload_settings(array)
for valid_slo in valid_slos:
if slo == valid_slo:
is_valid_slo = True
break
for valid_workload in valid_workloads:
if workload == valid_workload:
is_valid_workload = True
break
if not slo:
is_valid_slo = True
if workload:
is_valid_workload = False
if not is_valid_slo:
LOG.error(
"SLO: %(slo)s is not valid. Valid values are: "
"%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos})
if not is_valid_workload:
LOG.error(
"Workload: %(workload)s is not valid. Valid values are "
"%(valid_workloads)s. Note you cannot "
"set a workload without an SLO.",
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:returns: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = storage_group['workload']
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)s. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
@coordination.synchronized('emc-rg-{rdf_group}')
def break_rdf_relationship(self, array, device_id, target_device,
rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices.
:param array: the array serial number
:param device_id: the source device id
:param target_device: target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair
"""
LOG.info("Suspending rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if state.lower() == utils.RDF_SYNCINPROG_STATE:
self.rest.wait_for_rdf_consistent_state(
array, device_id, target_device,
rep_extra_specs, state)
if state.lower() == utils.RDF_SUSPENDED_STATE:
LOG.info("RDF pair is already suspended")
else:
self.rest.modify_rdf_device_pair(
array, device_id, rdf_group, rep_extra_specs, suspend=True)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def break_metro_rdf_pair(self, array, device_id, target_device,
rdf_group, rep_extra_specs, metro_grp):
"""Delete replication for a Metro device pair.
Need to suspend the entire group before we can delete a single pair.
:param array: the array serial number
:param device_id: the device id
:param target_device: the target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: the replication extra specifications
:param metro_grp: the metro storage group name
"""
# Suspend I/O on the RDF links...
LOG.info("Suspending I/O for all volumes in the RDF group: %(rdfg)s",
{'rdfg': rdf_group})
self.disable_group_replication(
array, metro_grp, rdf_group, rep_extra_specs)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def delete_rdf_pair(
self, array, device_id, rdf_group, target_device, extra_specs):
"""Delete an rdf pairing.
If the replication mode is synchronous, only one attempt is required
to delete the pair. Otherwise, we need to wait until all the tracks
are cleared before the delete will be successful. As there is
currently no way to track this information, we keep attempting the
operation until it is successful.
:param array: the array serial number
:param device_id: source volume device id
:param rdf_group: the rdf group number
:param target_device: the target device
:param extra_specs: extra specifications
"""
LOG.info("Deleting rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if (extra_specs.get(utils.REP_MODE) and
extra_specs.get(utils.REP_MODE) == utils.REP_SYNC):
return self.rest.delete_rdf_pair(array, device_id, rdf_group)
def _delete_pair():
"""Delete a rdf volume pair.
Called at an interval until all the tracks are cleared
and the operation is successful.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['delete_pair_success']:
self.rest.delete_rdf_pair(
array, device_id, rdf_group)
kwargs['delete_pair_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("Delete volume pair failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['delete_pair_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'delete_pair_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_delete_pair)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def get_or_create_volume_group(self, array, group, extra_specs):
"""Get or create a volume group.
Sometimes it may be necessary to recreate a volume group on the
backend - for example, when the last member volume has been removed
from the group, but the cinder group object has not been deleted.
:param array: the array serial number
:param group: the group object
:param extra_specs: the extra specifications
:return: group name
"""
vol_grp_name = self.utils.update_volume_group_name(group)
return self.get_or_create_group(array, vol_grp_name, extra_specs)
def get_or_create_group(self, array, group_name, extra_specs):
"""Get or create a generic volume group.
:param array: the array serial number
:param group_name: the group name
:param extra_specs: the extra specifications
:return: group name
"""
storage_group = self.rest.get_storage_group(array, group_name)
if not storage_group:
self.create_volume_group(array, group_name, extra_specs)
return group_name
def create_volume_group(self, array, group_name, extra_specs):
"""Create a generic volume group.
:param array: the array serial number
:param group_name: the name of the group
:param extra_specs: the extra specifications
:returns: volume_group
"""
return self.create_storage_group(array, group_name,
None, None, None, extra_specs)
def create_group_replica(
self, array, source_group, snap_name, extra_specs):
"""Create a replica (snapVx) of a volume group.
:param array: the array serial number
:param source_group: the source group name
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.",
{'srcGroup': source_group})
# Create snapshot
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
def delete_group_replica(self, array, snap_name, source_group_name,
src_dev_ids, extra_specs):
"""Delete the snapshot.
:param array: the array serial number
:param snap_name: the name for the snap shot
:param source_group_name: the source group name
:param src_dev_ids: the list of source device ids
:param extra_specs: extra specifications
"""
# Delete snapvx snapshot
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name, 'snap_name': snap_name})
self.delete_volume_snap_check_for_links(
array, snap_name, src_dev_ids, extra_specs)
def link_and_break_replica(self, array, source_group_name,
target_group_name, snap_name, extra_specs,
list_volume_pairs, delete_snapshot=False):
"""Links a group snap and breaks the relationship.
:param array: the array serial
:param source_group_name: the source group name
:param target_group_name: the target group name
:param snap_name: the snapshot name
:param extra_specs: extra specifications
:param list_volume_pairs: the list of volume pairs
:param delete_snapshot: delete snapshot flag
"""
LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
# Link the snapshot
self.rest.modify_volume_snap(
array, None, None, snap_name, extra_specs, link=True,
list_volume_pairs=list_volume_pairs)
# Unlink the snapshot
LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
self._unlink_volume(array, None, None, snap_name, extra_specs,
list_volume_pairs=list_volume_pairs)
# Delete the snapshot if necessary
if delete_snapshot:
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
source_devices = [a for a, b in list_volume_pairs]
self.delete_volume_snap(array, snap_name, source_devices)
def enable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs, establish=False):
"""Resume rdf replication on a storage group.
Replication is enabled by default. This allows resuming
replication on a suspended group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param establish: flag to indicate 'establish' instead of 'resume'
"""
action = "Establish" if establish is True else "Resume"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def disable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Suspend rdf replication on a storage group.
This does not delete the rdf pairs, that can only be done
by deleting the group. This method suspends all i/o activity
on the rdf links.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
action = "Suspend"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def failover_group(self, array, storagegroup_name,
rdf_group_num, extra_specs, failover=True):
"""Failover or failback replication on a storage group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param failover: flag to indicate failover/ failback
"""
action = "Failover" if failover else "Failback"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def delete_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Split replication for a group and delete the pairs.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
group_details = self.rest.get_storage_group_rep(
array, storagegroup_name)
if (group_details and group_details.get('rdf')
and group_details['rdf'] is True):
action = "Split"
LOG.debug("Splitting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
LOG.debug("Deleting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.delete_storagegroup_rdf(
array, storagegroup_name, rdf_group_num)
def revert_volume_snapshot(self, array, source_device_id,
snap_name, extra_specs):
"""Revert a volume snapshot
:param array: the array serial number
:param source_device_id: device id of the source
:param snap_name: snapvx snapshot name
:param extra_specs: the extra specifications
"""
start_time = time.time()
self.rest.modify_volume_snap(
array, source_device_id, "", snap_name, extra_specs, restore=True)
LOG.debug("Restore volume snapshot took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
| {
"content_hash": "27a6170ffc5a6fb0b07e72158a389cda",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 78,
"avg_line_length": 42.6800518134715,
"alnum_prop": 0.5756472123584935,
"repo_name": "phenoxim/cinder",
"id": "b356bbd4ca2719990a8da6ae8986c6d81ea62209",
"size": "33599",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/dell_emc/vmax/provision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20325688"
},
{
"name": "Shell",
"bytes": "16353"
}
],
"symlink_target": ""
} |
import argparse
import re
import sys
import os
import difflib
import const
def main(argv):
# define return code const value
const.CheckPassFail = 1
const.CheckPassOK = 0
const.GeneralError = -1
const.UnknownArguments = -2
const.MissingResult = -3
# Parse the command line
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
required.add_argument('-b', '--base-result-path', type=str, required=True,
help='full path to base result')
required.add_argument('-d', '--diff-result-path', type=str, required=True,
help='full path to diff result')
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknownArguments
try:
# Collect a list of summary result file relative path in base result
base_files = []
for root, dirs, files in os.walk(args.base_result_path):
for file in files:
if file.endswith('sum.txt'):
relative_path = os.path.relpath(root, args.base_result_path)
relative_path = os.path.join(relative_path, file)
base_files.append(relative_path)
# Collect a list of summary result file relative path in diff result
diff_files = []
for root, dirs, files in os.walk(args.diff_result_path):
for file in files:
if file.endswith('sum.txt'):
relative_path = os.path.relpath(root, args.diff_result_path)
relative_path = os.path.join(relative_path, file)
diff_files.append(relative_path)
except:
e = sys.exc_info()[0]
print('Error: CheckPass failed due to ', e)
return const.GeneralError
# Check if results are empty
if len(base_files) == 0:
print('Error: base result is empty')
return const.MissingResult
if len(diff_files) == 0:
print('Error: diff result is empty')
return const.MissingResult
# Counting the newly failed or passed test cases
count_new_failed = 0
count_new_passed = 0
print('Checking started.')
for file in base_files:
if file in diff_files:
try:
base_file_path = os.path.join(args.base_result_path, file)
diff_file_path = os.path.join(args.diff_result_path, file)
with open(base_file_path, 'r') as bases, open(diff_file_path, 'r') as diffs:
diff = difflib.ndiff(bases.readlines(),diffs.readlines())
for line in diff:
if re.search('- Successfully read ', line):
count_new_failed = count_new_failed + 1
if re.search('\+ Successfully read ', line):
count_new_passed = count_new_passed + 1
except:
e = sys.exc_info()[0]
print('Error: CheckPass failed due to ', e)
return const.GeneralError
else:
missing_result_file = os.path.join(args.diff_result_path, file)
print('Error: diff result does not result file ', missing_result_file)
return const.MissingResult
print('CheckPass: ', count_new_failed, ' test cases passed in base result but failed in diff result.')
print('CheckPass: ', count_new_passed, ' test cases failed in base result but passed in diff result.')
if count_new_failed == 0:
print('CheckPass Passed.')
return const.CheckPassOK
else:
print('CheckPass Failed.\n')
return const.CheckPassFail
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
| {
"content_hash": "1884a5b8b647f01eec8041cdb4954bfa",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 107,
"avg_line_length": 39.214285714285715,
"alnum_prop": 0.581056466302368,
"repo_name": "libengu/llilc",
"id": "7336c6e1eb2aae34f51cb3ee36bcaeac1c6e6cf0",
"size": "4974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/llilc_checkpass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83708"
},
{
"name": "C++",
"bytes": "936901"
},
{
"name": "CMake",
"bytes": "19945"
},
{
"name": "PowerShell",
"bytes": "33960"
},
{
"name": "Python",
"bytes": "27935"
},
{
"name": "Shell",
"bytes": "310"
}
],
"symlink_target": ""
} |
import numpy as np
import tvm
from tvm import relay
from tvm.relay import transform
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_fold_const():
c_data = np.array([1, 2, 3]).astype("float32")
t = relay.TensorType([1, 2, 3], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
return relay.Function([x], z)
def expected():
x = relay.var("x", t)
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
return relay.Function([x], z)
def fail(x):
raise RuntimeError()
# the fold constant should work on any context.
with tvm.build_config(add_lower_pass=[(0, fail)]):
with tvm.target.create("cuda"):
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
assert relay.analysis.alpha_equal(zz, zexpected)
def test_fold_let():
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
t1 = sb.let("t1", relay.const(c_data))
t2 = sb.let("t2", relay.add(t1, t1))
t3 = sb.let("t3", relay.add(t2, x))
sb.ret(t3)
return relay.Function([x], sb.get())
def expected():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
c_folded = (c_data + c_data)
t3 = sb.let("t3", relay.add(relay.const(c_folded), x))
sb.ret(t3)
return relay.Function([x], sb.get())
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
assert relay.analysis.graph_equal(zz, zexpected)
def test_fold_tuple():
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
y = relay.Tuple([x, c])
z = relay.add(y[1], c)
z = relay.add(z, y[0])
return relay.Function([x], z)
def expected():
c = relay.const(c_data + c_data)
x = relay.var("x", t)
z = relay.add(c, x)
return relay.Function([x], z)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
assert relay.analysis.graph_equal(zz, zexpected)
def test_fold_concat():
c_data = np.array([[1, 2, 3]]).astype("float32")
def before():
a = relay.const(c_data)
b = relay.const(c_data)
y = relay.concatenate((a, b), axis=0)
return relay.Function([], y)
def expected():
y_data = np.concatenate((c_data, c_data), axis=0)
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
assert relay.analysis.graph_equal(zz, zexpected)
def test_fold_shape_of():
c_shape = (8, 9, 10)
def before(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.shape_of(x + y, dtype)
return relay.Function([x, y], z)
def expected(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.const(np.array(c_shape).astype(dtype), dtype=dtype)
func = relay.Function([x, y], z)
return func
for dtype in ["int32", "float32"]:
zz = run_opt_pass(before(dtype), transform.FoldConstant())
zexpected = run_opt_pass(expected(dtype), transform.InferType())
assert relay.analysis.graph_equal(zz, zexpected)
if __name__ == "__main__":
test_fold_const()
test_fold_let()
test_fold_tuple()
test_fold_concat()
test_fold_shape_of()
| {
"content_hash": "43c0b6be18c13005db9a6f0f8fb2107c",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 72,
"avg_line_length": 30.985507246376812,
"alnum_prop": 0.5762394761459307,
"repo_name": "Huyuwei/tvm",
"id": "97b20c6b9219a91ec21f2076a681a84b1f5fd1a5",
"size": "5061",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/relay/test_pass_fold_constant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
import os, platform
import dynamixel
import time
import options
import math
import serial
ticks_per_rad = 4096.0/(math.pi*2)
############################################
# _______ __ ______ __
# /_ __(_)_ _ / // / __ \_ _____ ________/ /
# / / / / ' \ / _ / /_/ / |/|/ / _ `/ __/ _ /
# /_/ /_/_/_/_/ /_//_/\____/|__,__/\_,_/_/ \_,_/
############################################
myActuators = list()
def forwardKinematics(theta1, theta2, l1, l2):
return [l1*math.cos(theta1)+l2*(math.cos(theta1+theta2)),
l1*math.sin(theta1)+l2*(math.sin(theta1+theta2))]
#Given: xE,yE, l1, l2
#Return: theta1,theta2
def inverseKinematics(xIn, yIn, l1, l2):
myTheta2 = 2*math.atan2(math.sqrt(((l1+l2)**2-(xIn**2+yIn**2))),math.sqrt((xIn**2+yIn**2.0)-(l1-l2)**2))
myTheta1 = math.atan2(yIn,xIn)-math.atan2(l2*math.sin(myTheta2),l1+l2*math.cos(myTheta2))
return (scaleToCircle(myTheta1), scaleToCircle(myTheta2))
def computeAltIK(x, y, theta1, theta2):
#theta1 and 2 are IK outputs
t2 = -theta2
angle_to_endpoint = scaleToCircle(math.atan2(y,x))
if angle_to_endpoint > theta1:
t1 = theta1 + 2*(angle_to_endpoint-theta1)
elif angle_to_endpoint < theta1:
t1 = theta1 + 2*(angle_to_endpoint-theta1)
else:
t1 = theta1
return (t1, t2)
def scaleToCircle(radianvalue):
return radianvalue % (2*math.pi)
def boundWithinGoal(value, upper, lower):
if value > upper:
bounded = upper
elif value < lower:
bounded = lower
else:
bounded = value
return bounded
def boundWithinRobotReach(x, y, radius):
if math.sqrt(math.pow(x,2)+math.pow(y,2)) > radius:
angle = math.atan2(y,x)
return (0.98*radius*math.cos(angle), 0.98*radius*math.sin(angle))
else:
return (x,y)
def withinThreshold(difference, thresh):
if abs(difference) <= thresh:
return True
elif abs(abs(difference)-2*math.pi) <= thresh:
return True
else:
return False
def actuatorsMoving(actuators):
for actuator in actuators:
if actuator.cache[dynamixel.defs.REGISTER['Moving']]:
return True
return False
if platform.dist()[0] == 'Ubuntu':
portName = options.ubuntu_port
elif os.name == "posix":
portName = options.unix_port
else:
portName = options.windows_port
serial = dynamixel.serial_stream.SerialStream( port=portName, baudrate=options.baudrate, timeout=1)
net = dynamixel.dynamixel_network.DynamixelNetwork( serial )
net.scan( 1, options.num_servos )
print "Scanning for Dynamixels...",
for dyn in net.get_dynamixels():
print dyn.id,
myActuators.append(net[dyn.id])
print "FOUND:" + str(myActuators)
for actuator in myActuators:
actuator.moving_speed = options.servo_speed
actuator.synchronized = True
actuator.torque_enable = True
actuator.torque_control_enable = False
actuator.torque_limit = 1024
actuator.max_torque = 1024
class Arm(object):
def __init__(self, shoulder, elbow, params):
self.params = params
self.shoulder = shoulder
self.elbow = elbow
self.elbow_angle = 0
self.shoulder_angle = 0
#motors
def update(self):
net.synchronize()
self.shoulder.read_all()
self.elbow.read_all()
def moveToXY(self,x,y):
theta1, theta2 = inverseKinematics(x,y, self.params.l1, self.params.l2)
(shoulderCurr, elbowCurr) = self.returnCurrentPositions()
(shoulderCurrNOMOD, elbowCurrNOMOD) = self.returnCurrentPositionsNOMOD()
alpha = shoulderCurr - theta1
if abs(alpha) > abs(shoulderCurr - (theta1+2*math.pi)):
alpha = shoulderCurr - (theta1+2*math.pi)
if abs(alpha) > abs(shoulderCurr - (theta1-2*math.pi)):
alpha = shoulderCurr - (theta1-2*math.pi)
beta = elbowCurr - theta2
if abs(beta) > abs(elbowCurr - (theta2+2*math.pi)):
beta = elbowCurr - (theta2+2*math.pi)
if abs(beta) > abs(elbowCurr - (theta2-2*math.pi)):
beta = elbowCurr - (theta2-2*math.pi)
self.moveToTheta(shoulderCurrNOMOD-alpha, elbowCurrNOMOD-beta)
def moveToXYGoal(self, x, y):
x, y = Arm.transformGoaltoRobot(self,x,y)
x, y = boundWithinRobotReach(x,y, self.params.l1+self.params.l2)
x = boundWithinGoal(x, self.params.max_x, self.params.min_x)
y = boundWithinGoal(y, self.params.max_y, self.params.min_y)
self.moveToXY(x,y)
def transformGoaltoRobot(self,x,y):
return (x-self.params.horizontal_offset, y-self.params.vertical_offset)
def moveToTheta(self, t1, t2):
#print t1, t2
self.shoulder_angle = t1
self.elbow_angle = t2
self.shoulder.goal_position = int((self.shoulder_angle*ticks_per_rad)+self.params.shoulder_offset)
self.elbow.goal_position = int(((self.elbow_angle*ticks_per_rad) +self.params.elbow_offset)/2)
def isMoving(self):
for actuator in [self.shoulder, self.elbow]:
if actuator.cache[dynamixel.defs.REGISTER['Moving']]:
return True
return False
def returnCurrentPositions(self):
theta1 = (self.shoulder.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.shoulder_offset)/ticks_per_rad
theta2 = (self.elbow.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.elbow_offset)/ticks_per_rad*2
theta1 = scaleToCircle(theta1)
theta2 = scaleToCircle(theta2)
return (theta1, theta2)
def returnCurrentPositionsNOMOD(self):
theta1 = (self.shoulder.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.shoulder_offset)/ticks_per_rad
theta2 = (self.elbow.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.elbow_offset)/ticks_per_rad*2
return (theta1, theta2)
def nearGoalPosition(self):
shoulder, elbow = Arm.returnCurrentPositions(self)
if withinThreshold(scaleToCircle(shoulder-self.shoulder_angle),self.params.angle_threshold) and withinThreshold(scaleToCircle(elbow-self.elbow_angle),self.params.angle_threshold):
return True
else:
return False
a = Arm(myActuators[0], myActuators[1], options.left_arm)
b = Arm(myActuators[2], myActuators[3], options.right_arm)
a.update()
b.update()
points = [[55,0],[50,0],[45,0],[40,0],[35,0],[30,0],[25,0],[20,0],[15,0],[10,0],[5,0]]
goal = [60,0]
t = time.time()
raw_input("Press any key to start")
while True:
try:
(theta1_left, theta2_left) = a.returnCurrentPositions()
(theta1_right, theta2_right) = b.returnCurrentPositions()
currXY_left = forwardKinematics(theta1_left, theta2_left, options.left_arm.l1, options.left_arm.l2) #in robot coords
currXY_left_world = [currXY_left[0]+options.left_arm.horizontal_offset, currXY_left[1]+options.left_arm.vertical_offset]
gamma_left = math.atan2(goal[1]-currXY_left_world[1], goal[0]-currXY_left_world[0])
currXY_right = forwardKinematics(theta1_right, theta2_right, options.right_arm.l1, options.right_arm.l2) #in robot coords
currXY_right_world = [currXY_right[0]+options.right_arm.horizontal_offset, currXY_right[1]+options.right_arm.vertical_offset]
gamma_right = math.atan2(goal[1]-currXY_right_world[1], goal[0]-currXY_right_world[0])
l_left=4
l_right=4
if( ((goal[1]-currXY_left_world[1])**2 + (goal[0]-currXY_left_world[0])**2) < l_left**2):
l_left = math.sqrt((goal[1]-currXY_left_world[1])**2 + (goal[0]-currXY_left_world[0])**2)
if ( ((goal[1]-currXY_right_world[1])**2 + (goal[0]-currXY_right_world[0])**2) < l_right**2):
l_right = math.sqrt((goal[1]-currXY_right_world[1])**2 + (goal[0]-currXY_right_world[0])**2)
a.moveToXYGoal(currXY_left_world[0]+l_left*math.cos(gamma_left), currXY_left_world[1]+l_left*math.sin(gamma_left))
b.moveToXYGoal(currXY_right_world[0]+l_right*math.cos(gamma_right), currXY_right_world[1]+l_right*math.sin(gamma_right))
a.update()
b.update()
'''
if time.time() > 1+t:
goal = points.pop()
t = time.time()
'''
except KeyboardInterrupt:
break
| {
"content_hash": "76f3fae479463a7d15e9a840bb7e7d10",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 181,
"avg_line_length": 33.004329004329,
"alnum_prop": 0.6798268625393494,
"repo_name": "skyleradams/tim-howard",
"id": "65d9f24001b36ba44760cb925e4fd74451e9073b",
"size": "7624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/dynamixel_control.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "12766"
},
{
"name": "Matlab",
"bytes": "943"
},
{
"name": "Python",
"bytes": "50167"
}
],
"symlink_target": ""
} |
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:complexType name="tEmpty">
<xs:attribute name="units" type="xs:string" use="required"/>
</xs:complexType>
<xs:element name="Empty" type="tEmpty"/>
<xs:complexType name="tMixed" mixed="true">
<xs:attribute name="units" type="xs:string" use="required"/>
</xs:complexType>
<xs:element name="Mixed" type="tMixed"/>
<xs:complexType name="tSimple">
<xs:simpleContent>
<xs:extension base="xs:double">
<xs:attribute name="units" type="xs:string" use="required"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="Simple" type="tSimple" nillable="true"/>
<xs:element name="Something"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_200907231705 (unittest.TestCase):
def testParsing (self):
xml = '<Empty units="m"/>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_EMPTY, instance._ContentTypeTag)
self.assertTrue(instance.validateBinding())
xml = '<Empty units="m">5</Empty>'
self.assertRaises(pyxb.MixedContentError, CreateFromDocument, xml)
xml = '<Mixed units="m"/>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_MIXED, instance._ContentTypeTag)
xml = '<Mixed units="m">5</Mixed>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_MIXED, instance._ContentTypeTag)
self.assertEqual(u'5', instance.orderedContent()[0].value)
xml = '<Mixed units="m">5<Something/>4</Mixed>'
self.assertRaises(pyxb.UnrecognizedContentError, CreateFromDocument, xml)
xml = '<Simple units="m"/>'
self.assertRaises(pyxb.SimpleContentAbsentError, CreateFromDocument, xml)
def testCtorEmpty (self):
instance = Empty()
self.assertRaises(pyxb.AttributeValidationError, instance.validateBinding)
instance = Empty(units='m')
self.assertTrue(instance.validateBinding())
self.assertRaises(pyxb.MixedContentError, Empty, 4, units='m')
def testCtorMixed (self):
instance = Mixed()
self.assertRaises(pyxb.AttributeValidationError, instance.validateBinding)
instance = Mixed(units='m')
self.assertTrue(instance.validateBinding())
instance = Mixed(4, units='m')
self.assertTrue(instance.validateBinding())
self.assertEqual(u'4', instance.orderedContent()[0].value)
instance = Mixed(xs.int(4), units='m')
self.assertTrue(instance.validateBinding())
self.assertEqual(u'4', instance.orderedContent()[0].value)
def testCtorSimple (self):
self.assertRaises(pyxb.SimpleContentAbsentError, Simple)
instance = Simple(4)
self.assertRaises(pyxb.AttributeValidationError, instance.validateBinding)
self.assertRaises(pyxb.SimpleContentAbsentError, Simple, units='m')
instance = Simple(4.5, units='m')
self.assertEqual(4.5, instance.value())
def testParsingNil (self):
xml = '<Simple xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true" units="m"/>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE, instance._ContentTypeTag)
self.assertTrue(instance.validateBinding())
self.assertTrue(instance.value() is None)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d085b759fb0a47b4f7d6d3da86a791ec",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 104,
"avg_line_length": 40.80612244897959,
"alnum_prop": 0.6799199799949988,
"repo_name": "jonfoster/pyxb-upstream-mirror",
"id": "0c06be75895c8c0e80ac308190077614d1cf0f16",
"size": "4023",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/bugs/test-200907231705.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6286"
},
{
"name": "Python",
"bytes": "1854695"
},
{
"name": "Shell",
"bytes": "37524"
}
],
"symlink_target": ""
} |
import os
from django.http import FileResponse
from wsgiref.util import FileWrapper
from settings.static import MEDIA_URL
# from django.core.servers.basehttp import FileWrapper
from django.views.generic import TemplateView
from django.shortcuts import render_to_response, render, redirect, get_object_or_404
from django.core.mail import send_mail
from django.http import HttpResponse
from django.template import RequestContext
from django.http import HttpResponseRedirect
from pangolinfog.forms import *
# from pangolinfog.recaptcha.forms import *
from django.template.loader import get_template
from django.core.mail import EmailMessage
from django.template import Context
from product.models import Category
from product.models import Product, Accessory
from content.models import Slide
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView
from nocaptcha_recaptcha.fields import NoReCaptchaField
def contact(request):
form_class = ContactForm
success_url = reverse_lazy('success')
args = {}
background_image = get_object_or_404(Slide, header_about=1)
args['menu'] = "contact"
categories_main_menu = Category.objects.filter(published_in_menu=1).order_by('ordering')
args['categories_main_menu'] = categories_main_menu
args['form'] = form_class
args['background_image'] = background_image
def form_valid(self, form):
return super(form_class, self).form_valid(form)
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name'
, '')
contact_email = request.POST.get(
'contact_email'
, '')
contact_phone = request.POST.get(
'contact_phone'
, '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'contact_name': contact_name,
'contact_email': contact_email,
'contact_phone': contact_phone,
'form_content': form_content,
})
content = template.render(context)
email = EmailMessage(
"Pangolin Fog",
content,
"Pangolin Fog" +'',
['[email protected]'],
headers = {'Reply-To': contact_email }
)
email.send()
return redirect(request.META.get('HTTP_REFERER', '/'))
return render(request, 'contact.html', args)
def jq_subsc(request):
return render(request, 'jq_subsc.html')
def download_file(request):
_file = 'manualtourhazer2.pdf.zip'
filename = os.path.basename(_file)
# python 3
# response = FileResponse(FileWrapper(open(filename, 'rb')), content_type='application/x-zip-compressed')
# python 2
response = FileResponse(FileWrapper(file(filename, 'rb')), content_type='application/x-zip-compressed')
response['Content-Disposition'] = "attachment; filename=%s" % _file
return response
def download_mp3(request):
_file = 'Last_Summer_in_Yalta.mp3.zip'
filename = os.path.basename(_file)
# python 3
# response = FileResponse(FileWrapper(open(filename, 'rb')), content_type='application/x-zip-compressed')
# python 2
response = FileResponse(FileWrapper(file(filename, 'rb')), content_type='application/x-zip-compressed')
response['Content-Disposition'] = "attachment; filename=%s" % _file
return response
def main(request):
args = {}
slides = Slide.objects.filter(published_main=1).order_by('ordering')
categories_main_menu = Category.objects.filter(published_in_menu=1).order_by('ordering')
products_main = Product.objects.filter(published_main=1)
args['products_main'] = products_main
args['categories_main_menu'] = categories_main_menu
args['slides'] = slides
return render_to_response("home.html", args)
def news(request):
args = {}
slides = Slide.objects.filter(published_portfolio=1).order_by('ordering')
news = Slide.objects.filter(published_news=1).order_by('ordering')
background_image = get_object_or_404(Slide, header_about=1)
args['news'] = news
args['menu'] = "news"
args['slides'] = slides
args['background_image'] = background_image
return render_to_response("news.html", args)
def about(request):
args = {}
slides = Slide.objects.filter(published_portfolio=1).order_by('ordering')
news = Slide.objects.filter(published_news=1).order_by('ordering')
background_image = get_object_or_404(Slide, header_about=1)
args['news'] = news
args['menu'] = "about"
args['slides'] = slides
args['background_image'] = background_image
return render_to_response("about.html", args)
| {
"content_hash": "9e82a8819166fd3d58452cc7ce540c64",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 109,
"avg_line_length": 30.664596273291924,
"alnum_prop": 0.6631557626088718,
"repo_name": "skylifewww/pangolin-fog",
"id": "bcd55a954f444fac9d69945cc831a2aa6b45537a",
"size": "4937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pangolinfog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "126434"
},
{
"name": "HTML",
"bytes": "154546"
},
{
"name": "JavaScript",
"bytes": "174324"
},
{
"name": "Makefile",
"bytes": "1483"
},
{
"name": "Nginx",
"bytes": "641"
},
{
"name": "Python",
"bytes": "177394"
}
],
"symlink_target": ""
} |
"""Helpers used to render lists.
"""
__authors__ = [
'"Daniel Hans" <[email protected]>',
'"Chen Lunpeng" <[email protected]>',
'"Sverre Rabbelier" <[email protected]>',
'"Pawel Solyga" <[email protected]>',
]
from django.utils import simplejson
from soc.logic import dicts
URL_PATTERN = '<a href="%(url)s"%(target)s%(nofollow)s>%(name)s</a>'
class IsNonEmptyRequest(object):
"""Request to check whether the list is non-empty."""
def __init__(self, idx):
self.idx = idx
self.GET = {}
self.POST = {}
def urlize(url, name=None, target="_blank", nofollow=True):
"""Make an url clickable.
Args:
url: the actual url, such as '/user/list'
name: the display name, such as 'List Users', defaults to url
target: the 'target' attribute of the <a> element
nofollow: whether to add the 'rel="nofollow"' attribute
"""
if not url:
return ''
from django.utils.safestring import mark_safe
from django.utils.html import escape
safe_url = escape(url)
safe_name = escape(name)
link = URL_PATTERN % {
'url': safe_url,
'name': safe_name if name else safe_url,
'target': ' target="%s"' % target if target else '',
'nofollow': ' rel="nofollow"' if nofollow else "",
}
return mark_safe(link)
def entityToRowDict(entity, key_order, no_filter, extra_cols_func,
button_ops_func, row_ops_func, args):
"""Returns the row dict for the specified entity.
"""
extra_cols = extra_cols_func(entity, *args)
button_ops = button_ops_func(entity, *args)
row_ops = row_ops_func(entity, *args)
fields = set(key_order).difference(set(extra_cols))
columns = entity.toDict(list(fields))
columns.update(extra_cols)
columns['key'] = str(entity.key().id_or_name())
filter_fields = [i for i in columns.keys() if i not in no_filter]
columns = dicts.cleanDict(columns, filter_fields)
operations = {
"row": row_ops,
"buttons": button_ops,
}
result = {
"columns": columns,
"operations": operations,
}
return result
def keyToColumnProperties(key, col_props, hidden):
"""Returns the column properties for the specified key.
"""
props = {
'name': key,
'index': key,
'resizable': True,
}
if key == 'key' or key in hidden:
props['hidden'] = True
if key in hidden:
props['searchoptions'] = {"searchhidden": True}
extra_props = col_props.get(key, {})
props.update(extra_props)
return props
def getKeyOrderAndColNames(params, visibility):
"""Retrieve key order and col names
"""
key_order = ["key"] + params.get('%s_field_keys' % visibility)
col_names = ["Key"] + params.get('%s_field_names' % visibility)
ignore = params.get('%s_field_ignore' % visibility, [])
for field in ignore:
if field not in key_order:
continue
pos = key_order.index(field)
key_order = key_order[:pos] + key_order[pos+1:]
col_names = col_names[:pos] + col_names[pos+1:]
if not (key_order and col_names):
key_order = col_names = ['kind']
return key_order, col_names
def isJsonRequest(request):
"""Returns true iff the request is a JSON request.
"""
if request.GET.get('fmt') == 'json':
return True
return False
def isDataRequest(request):
"""Returns true iff the request is a data request.
"""
if isJsonRequest(request):
return True
return False
def isNonEmptyRequest(request):
"""Returns true iff the request is a non-empty request.
"""
return isinstance(request, IsNonEmptyRequest)
def getListIndex(request):
"""Returns the index of the requested list.
"""
if isNonEmptyRequest(request):
return request.idx
idx = request.GET.get('idx', '')
idx = int(idx) if idx.isdigit() else -1
return idx
def getErrorResponse(request, msg):
"""Returns an error appropriate for the request type.
"""
from soc.views.helper import responses
if isJsonRequest(request):
return responses.jsonErrorResponse(request, msg)
raise Exception(msg)
def getResponse(request, contents):
"""Returns a response appropriate for the request type.
"""
from soc.views.helper import responses
if isJsonRequest(request):
json = simplejson.dumps(contents)
return responses.jsonResponse(request, json)
if isNonEmptyRequest(request):
return contents
# TODO(SRabbelier): this is probably the best way to handle this
return contents
def getListConfiguration(request, params, visibility, order):
"""Returns the list data for the specified params.
Args:
visibility: determines which list will be used
order: the order the data should be sorted in
"""
key_order, col_names = getKeyOrderAndColNames(params, visibility)
conf_extra = params.get('%s_conf_extra' % visibility, {})
conf_min_num = params.get('%s_conf_min_num' % visibility, 0)
button_global = params.get('%s_button_global' % visibility, [])
row_action = params.get('%s_row_action' % visibility, {})
col_props = params.get('%s_field_props' % visibility, {})
hidden = params.get('%s_field_hidden' % visibility, [])
col_model = [keyToColumnProperties(i, col_props, hidden) for i in key_order]
rowList = [5, 10, 20, 50, 100, 500, 1000]
rowList = [i for i in rowList if i >= conf_min_num]
rowNum = min(rowList)
sortorder = "asc"
sortname = order[0] if order else "key"
if sortname and sortname[0] == '-':
sortorder = "desc"
sortname = sortname[1:]
configuration = {
"autowidth": True,
"colModel": col_model,
"colNames": col_names,
"height": "auto",
"rowList": rowList,
"rowNum": max(1, rowNum),
"sortname": sortname,
"sortorder": sortorder,
"toolbar": [True, "top"],
"multiselect": False,
}
configuration.update(conf_extra)
operations = {
"buttons": button_global,
"row": row_action,
}
contents = {
'configuration': configuration,
'operations': operations,
}
return contents
def getListData(request, params, fields, visibility=None, args=[]):
"""Returns the list data for the specified params.
Args:
fields: a filter that should be applied to this list
visibility: determines which list will be used
args: list of arguments to be passed to extract funcs
"""
if not visibility:
visibility = 'public'
if not fields:
fields = {}
logic = params['logic']
if isNonEmptyRequest(request):
query = logic.getQueryForFields(filter=fields)
return query.count(1) > 0
get_args = request.GET
start = get_args.get('start', '')
limit = params.get('%s_conf_limit' % visibility)
if not limit:
limit = get_args.get('limit', 50)
limit = int(limit)
if start:
start_entity = logic.getFromKeyNameOrID(start)
if not start_entity:
return {'data': {start: []}}
fields['__key__ >'] = start_entity.key()
key_order, _ = getKeyOrderAndColNames(params, visibility)
column = params.get('%s_field_extra' % visibility, lambda *args: {})
row = params.get('%s_row_extra' % visibility, lambda *args: {})
button = params.get('%s_button_extra' % visibility, lambda *args: {})
no_filter = params.get('%s_field_no_filter' % visibility, [])
prefetch = params.get('%s_field_prefetch' % visibility, [])
entities = logic.getForFields(filter=fields, limit=limit, prefetch=prefetch)
extract_args = [key_order, no_filter, column, button, row, args]
columns = [entityToRowDict(i, *extract_args) for i in entities]
data = {
start: columns,
}
contents = {'data': data}
return contents
def getListGenerator(request, params, visibility=None, order=[], idx=0):
"""Returns a dict with fields used for rendering lists.
Args:
request: the Django HTTP request object
params: a dict with params for the View this list belongs to
idx: the index of this list
"""
if not visibility:
visibility = 'public'
configuration = getListConfiguration(request, params, visibility, order)
content = {
'idx': idx,
'configuration': simplejson.dumps(configuration),
'description': params['list_description'],
}
return content
| {
"content_hash": "c0b197e214df7ad8ee9f00fa27aa2d42",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 78,
"avg_line_length": 24.014749262536874,
"alnum_prop": 0.6552020636285468,
"repo_name": "SRabbelier/Melange",
"id": "7fab20130bb619b796330a2ccb81e75c20b79c31",
"size": "8751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/views/helper/lists.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
import logging
import os
import tempfile
from os.path import dirname, exists, join
from shutil import copy
from easyprocess import EasyProcess
from entrypoint2 import entrypoint
from PIL import Image # type: ignore
from eagexp import __version__
from eagexp.cmd import EagleError, command_eagle
from eagexp.util import norm_path, read_text, write_text
log = logging.getLogger(__name__)
log.debug("version=" + __version__)
@entrypoint
def export_image3d(
input, output, size=(800, 600), pcb_rotate=(0, 0, 0), timeout=20, showgui=False
):
"""
Exporting eagle .brd file into 3D image file
using Eagle3D and povray.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
:param input: eagle .brd file name
:param output: image file name (.png)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param size: tuple(width, size), image size
:rtype: None
"""
input = norm_path(input)
output = norm_path(output)
if not output.endswith(".png"):
raise ValueError("use .png extension!")
ext = os.path.splitext(input)[1]
if ext not in [".brd"]:
raise ValueError('Input extension is not ".brd", brd=' + str(input))
commands = []
eagle3d = join(dirname(__file__), "eagle3d")
ulp = norm_path(join(eagle3d, "3d50.ulp"))
if not exists(ulp):
raise EagleError("missing file:%s" % ulp)
commands += ["RUN " + ulp]
commands += ["QUIT"]
def render(dir, f):
# povray has strange file access policy,
# better to generate under tmp
# cli doc:
# http://library.thinkquest.org/3285/language/cmdln.html
templ = "#local pcb_rotate_%s = %s"
pov = f.replace(".brd", ".pov")
if not exists(pov):
raise EagleError("missing pov file: %s" % pov)
# log.debug("pov file %s content: %s", pov, pov.read_text())
if pcb_rotate != (0, 0, 0):
s = read_text(pov)
s = s.replace(templ % ("x", 0), templ % ("x", pcb_rotate[0]))
s = s.replace(templ % ("y", 0), templ % ("y", pcb_rotate[1]))
s = s.replace(templ % ("z", 0), templ % ("z", pcb_rotate[2]))
write_text(pov, s)
fpng = f.replace(".brd", ".png")
cmd = []
cmd += ["povray"]
cmd += ["-d"] # no display
cmd += ["-a"] # anti-aliasing
cmd += ["+W" + str(size[0])] # width
cmd += ["+H" + str(size[1])] # height
cmd += ["-o" + fpng]
cmd += ["-L" + eagle3d]
cmd += [pov]
p = EasyProcess(cmd).call()
if not exists(fpng):
raise EagleError("povray error, proc=%s" % p)
copy(fpng, output)
command_eagle(
input=input,
timeout=timeout,
commands=commands,
showgui=showgui,
callback=render,
)
def pil_image3d(
input, size=(800, 600), pcb_rotate=(0, 0, 0), timeout=20, showgui=False
):
"""
same as export_image3d, but there is no output file, PIL object is returned instead
"""
with tempfile.TemporaryDirectory() as temp_dir:
output = join(temp_dir, "out.png")
export_image3d(
input,
output=output,
size=size,
pcb_rotate=pcb_rotate,
timeout=timeout,
showgui=showgui,
)
im = Image.open(output)
return im
| {
"content_hash": "797c6c631d52a3b8bc22da49ba9eac10",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 122,
"avg_line_length": 30.878260869565217,
"alnum_prop": 0.5775837792171219,
"repo_name": "ponty/eagexp",
"id": "4146100ad9d19cb20a3d502752661017a021ac80",
"size": "3551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eagexp/image3d.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "POV-Ray SDL",
"bytes": "1002650"
},
{
"name": "Python",
"bytes": "35571"
},
{
"name": "Ruby",
"bytes": "458"
},
{
"name": "Shell",
"bytes": "3259"
}
],
"symlink_target": ""
} |
import sys
import os
from glob import glob
import platform
def running_under_virtualenv():
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
if os.getenv('VIRTUAL_ENV', False):
return True
return False
if os.environ.get('USE_SETUPTOOLS'):
from setuptools import setup
setup_kwargs = dict(zip_safe=0)
else:
from distutils.core import setup
setup_kwargs = dict()
if os.name == 'nt':
pgm_files = os.environ["ProgramFiles"]
base_files = os.path.join(pgm_files, 'diamond')
data_files = [
(base_files, ['LICENSE', 'version.txt']),
(os.path.join(base_files, 'user_scripts'), []),
(os.path.join(base_files, 'conf'), glob('conf/*.conf.*')),
(os.path.join(base_files, 'collectors'), glob('conf/collectors/*')),
(os.path.join(base_files, 'handlers'), glob('conf/handlers/*')),
]
install_requires = ['configobj', 'psutil', ],
else:
data_files = [
('share/diamond', ['LICENSE', 'version.txt']),
('share/diamond/user_scripts', []),
]
distro = platform.dist()[0]
distro_major_version = platform.dist()[1].split('.')[0]
if not distro:
if 'amzn' in platform.uname()[2]:
distro = 'centos'
if running_under_virtualenv():
data_files.append(('etc/diamond',
glob('conf/*.conf.*')))
data_files.append(('etc/diamond/collectors',
glob('conf/collectors/*')))
data_files.append(('etc/diamond/handlers',
glob('conf/handlers/*')))
else:
data_files.append(('/etc/diamond',
glob('conf/*.conf.*')))
data_files.append(('/etc/diamond/collectors',
glob('conf/collectors/*')))
data_files.append(('/etc/diamond/handlers',
glob('conf/handlers/*')))
if distro == 'Ubuntu':
data_files.append(('/etc/init',
['debian/diamond.upstart']))
if distro in ['centos', 'redhat', 'debian', 'fedora', 'oracle']:
data_files.append(('/etc/init.d',
['bin/init.d/diamond']))
data_files.append(('/var/log/diamond',
['.keep']))
if distro_major_version >= 7 and not distro == 'debian':
data_files.append(('/usr/lib/systemd/system',
['rpm/systemd/diamond.service']))
elif distro_major_version >= 6 and not distro == 'debian':
data_files.append(('/etc/init',
['rpm/upstart/diamond.conf']))
# Support packages being called differently on different distros
# Are we in a virtenv?
if running_under_virtualenv():
install_requires = ['configobj', 'psutil', ]
else:
if distro == ['debian', 'ubuntu']:
install_requires = ['python-configobj', 'python-psutil', ]
# Default back to pip style requires
else:
install_requires = ['configobj', 'psutil', ]
def get_version():
"""
Read the version.txt file to get the new version string
Generate it if version.txt is not available. Generation
is required for pip installs
"""
try:
f = open('version.txt')
except IOError:
os.system("./version.sh > version.txt")
f = open('version.txt')
version = ''.join(f.readlines()).rstrip()
f.close()
return version
def pkgPath(root, path, rpath="/"):
"""
Package up a path recursively
"""
global data_files
if not os.path.exists(path):
return
files = []
for spath in os.listdir(path):
# Ignore test directories
if spath == 'test':
continue
subpath = os.path.join(path, spath)
spath = os.path.join(rpath, spath)
if os.path.isfile(subpath):
files.append(subpath)
if os.path.isdir(subpath):
pkgPath(root, subpath, spath)
data_files.append((root + rpath, files))
if os.name == 'nt':
pkgPath(os.path.join(base_files, 'collectors'), 'src/collectors', '\\')
else:
pkgPath('share/diamond/collectors', 'src/collectors')
version = get_version()
setup(
name='diamond',
version=version,
url='https://github.com/python-diamond/Diamond',
author='The Diamond Team',
author_email='[email protected]',
license='MIT License',
description='Smart data producer for graphite graphing package',
package_dir={'': 'src'},
packages=['diamond', 'diamond.handler', 'diamond.utils'],
scripts=['bin/diamond', 'bin/diamond-setup'],
data_files=data_files,
install_requires=install_requires,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
],
** setup_kwargs
)
| {
"content_hash": "3cbae3eba688723c8de4e27ea3395d06",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 76,
"avg_line_length": 32.80921052631579,
"alnum_prop": 0.5564467615801083,
"repo_name": "gg7/diamond",
"id": "0c4f2df06630896aaa0064db6c1ec6d3dd07de17",
"size": "5025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4465"
},
{
"name": "Python",
"bytes": "1594105"
},
{
"name": "Roff",
"bytes": "23868"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "12795"
}
],
"symlink_target": ""
} |
session.journalOptions.setValues(replayGeometry=COORDINATE,recoverGeometry=COORDINATE)
#-----------------------------------------------------
# Create a model.
modelName='GuoxiSuspensionBridge'
myModel = mdb.Model(name=modelName)
#-----------------------------------------------------
import GuoxiPackage.CreatePart
cp=GuoxiPackage.CreatePart.CreatePart(myModel)
#-----------------------------------------------------
| {
"content_hash": "282630538427eb795b0e8fb84217794e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 86,
"avg_line_length": 26.5625,
"alnum_prop": 0.5129411764705882,
"repo_name": "zjkl19/AbaqusPython",
"id": "d1be97c7ff9fef4aa20d5064d8ce2a0a80bc8912",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GuoxiSuspensionBridge/GuoxiSuspensionBridge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "Python",
"bytes": "387298"
}
],
"symlink_target": ""
} |
import os
import shutil
import subprocess
import sys
import tempfile
def setup_git_repo(worktree=False):
git_repo_dir = tempfile.mkdtemp()
to_rm = [git_repo_dir]
try:
subprocess.check_output(["git", "init", "."], cwd=git_repo_dir)
with open(f"{git_repo_dir}/committed", "w") as committed_f:
committed_f.write("normal committed file\n")
subprocess.check_output(["git", "add", "committed"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/committed-ignored", "w") as gitignore_f:
gitignore_f.write("this file is gitignored, but committed already")
subprocess.check_output(["git", "add", "committed-ignored"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/.gitignore", "w") as gitignore_f:
gitignore_f.write("ignored\n" "committed-ignored\n")
subprocess.check_output(["git", "add", ".gitignore"], cwd=git_repo_dir)
# NOTE: explicitly set the author so this test passes in the CI.
subprocess.check_output(
[
"git",
"-c",
"user.name=Unit Test",
"-c",
"[email protected]",
"commit",
"-m",
"initial commit",
],
cwd=git_repo_dir,
)
if worktree:
worktree_dir = tempfile.mkdtemp()
to_rm.append(worktree_dir)
subprocess.check_output(["git", "worktree", "add", worktree_dir], cwd=git_repo_dir)
git_repo_dir = worktree_dir
with open(f"{git_repo_dir}/ignored", "w") as gitignore_f:
gitignore_f.write("this file is gitignored")
with open(f"{git_repo_dir}/added-to-index", "w") as added_f:
added_f.write("only added to git index\n")
subprocess.check_output(["git", "add", "added-to-index"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/ignored-added-to-index", "w") as ignored_f:
ignored_f.write("this file is gitignored but in the index already\n")
subprocess.check_output(["git", "add", "-f", "ignored-added-to-index"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/untracked", "w") as untracked_f:
untracked_f.write("this file is untracked\n")
os.mkdir(f"{git_repo_dir}/subdir")
with open(f"{git_repo_dir}/subdir/untracked", "w") as untracked_f:
untracked_f.write("this file is untracked\n")
with open(f"{git_repo_dir}/subdir/untracked2", "w") as untracked_f:
untracked_f.write("this file is also untracked\n")
return git_repo_dir, to_rm
except Exception:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
raise
def run_test(repo_path, passed_files, filtered_files):
test_input = (
"\n".join(
passed_files
+ filtered_files
+ [f"./{f}" for f in passed_files]
+ [f"./{f}" for f in filtered_files]
)
+ "\n"
)
test_script_dir = f"{repo_path}/test-script-dir"
os.mkdir(test_script_dir)
filter_script_path = f"{test_script_dir}/filter_untracked.py"
test_script_dirname = os.path.dirname(__file__) or os.getcwd()
shutil.copy(
os.path.realpath(f"{test_script_dirname}/../../lint/filter_untracked.py"),
filter_script_path,
)
filter_proc = subprocess.Popen(
[sys.executable, filter_script_path],
cwd=repo_path,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
)
filter_output, _ = filter_proc.communicate(test_input)
filter_output_lines = [l for l in filter_output.split("\n") if l]
for pass_f in passed_files:
assert (
pass_f in filter_output_lines
), f"expected in filter output: {pass_f}\filter output: {filter_output}"
assert (
f"./{pass_f}" in filter_output_lines
), f"expected in filter output: ./{pass_f}\filter output: {filter_output}"
for filter_f in filtered_files:
assert (
filter_f not in filter_output_lines
), f"expected not in filter output: {filter_f}\nfilter_output: {filter_output}"
assert (
f"./{filter_f}" not in filter_output_lines
), f"expected not in filter output: ./{filter_f}\nfilter_output: {filter_output}"
assert len(filter_output_lines) == 2 * len(
passed_files
), f"expected {len(filter_output_lines)} == 2 * {len(passed_files)}"
def test_filter_untracked():
repo_path, to_rm = setup_git_repo()
try:
passed_files = [
"committed",
"committed-ignored",
"added-to-index",
"ignored-added-to-index",
]
filtered_files = [
"ignored",
"untracked",
"subdir/untracked",
"subdir/untracked2",
]
run_test(repo_path, passed_files, filtered_files)
finally:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
def test_worktree():
repo_path, to_rm = setup_git_repo(worktree=True)
try:
passed_files = [
"committed",
"committed-ignored",
"added-to-index",
"ignored-added-to-index",
]
filtered_files = [
"ignored",
"untracked",
"subdir/untracked",
"subdir/untracked2",
".git",
]
run_test(repo_path, passed_files, filtered_files)
finally:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
if __name__ == "__main__":
test_filter_untracked()
test_worktree()
| {
"content_hash": "ee3f85b9c144076d30d96ba9d158ba56",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 97,
"avg_line_length": 31.75977653631285,
"alnum_prop": 0.55584872471416,
"repo_name": "tqchen/tvm",
"id": "6080a4193dc0d169d3c95783cf2bb1fac79a84da",
"size": "6471",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tests/python/unittest/test_filter_untracked.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4104"
},
{
"name": "C",
"bytes": "205781"
},
{
"name": "C++",
"bytes": "8124041"
},
{
"name": "CMake",
"bytes": "135007"
},
{
"name": "Cuda",
"bytes": "6677"
},
{
"name": "Go",
"bytes": "111558"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "200193"
},
{
"name": "JavaScript",
"bytes": "15075"
},
{
"name": "Makefile",
"bytes": "48206"
},
{
"name": "Objective-C",
"bytes": "18506"
},
{
"name": "Objective-C++",
"bytes": "56786"
},
{
"name": "Python",
"bytes": "10300435"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "327078"
},
{
"name": "Shell",
"bytes": "157176"
},
{
"name": "TypeScript",
"bytes": "94435"
}
],
"symlink_target": ""
} |
from __main__ import vtk, qt, ctk, slicer
import string
import collections
import FeatureWidgetHelperLib
class CheckableTabWidget(qt.QTabWidget):
def __init__(self, parent=None):
super(CheckableTabWidget, self).__init__(parent)
self.featureClassFeatureWidgets = collections.OrderedDict()
# hack ( QTabWidget.setTabBar() and tabBar() are protected )
self.tab_bar = self.findChildren(qt.QTabBar)[0]
# Bold font style
self.boldFont = qt.QFont()
self.boldFont.setBold(True)
self.tab_bar.setFont(self.boldFont)
self.tab_bar.setContextMenuPolicy(3)
self.tab_bar.installEventFilter(self)
def addTab(self, widget, featureClass, featureWidgets, checkStatus=True):
qt.QTabWidget.addTab(self, widget, featureClass)
checkBox = FeatureWidgetHelperLib.FeatureWidget()
checkBox.Setup(featureName=featureClass, featureClassFlag=True, checkStatus=checkStatus)
self.featureClassFeatureWidgets[featureClass] = checkBox
self.tab_bar.setTabButton(self.tab_bar.count-1, qt.QTabBar.LeftSide, checkBox)
self.connect(checkBox, qt.SIGNAL('stateChanged(int)'), lambda checkState: self.stateChanged(checkBox, checkState, featureWidgets))
def isChecked(self, index):
return self.tab_bar.tabButton(index, qt.QTabBar.LeftSide).checkState() != 0
def setCheckState(self, index, checkState):
self.tab_bar.tabButton(index, qt.QTabBar.LeftSide).setCheckState(checkState)
def stateChanged(self, checkBox, checkState, featureWidgets):
# uncheck all checkboxes in QObject # may not need to pass list?
index = self.featureClassFeatureWidgets.values().index(checkBox)
if checkState == 0:
for widget in featureWidgets:
widget.checked = False
elif checkState == 2:
for widget in featureWidgets:
widget.checked = True
def eventFilter(self, object, event):
# context menu request (right-click) on QTabBar is forwarded to the QCheckBox (FeatureWidget)
if object == self.tab_bar and event.type() == qt.QEvent.ContextMenu:
tabIndex = object.tabAt(event.pos())
pos = self.featureClassFeatureWidgets.values()[tabIndex].mapFrom(self.tab_bar, event.pos())
if tabIndex > -1:
qt.QCoreApplication.sendEvent(self.featureClassFeatureWidgets.values()[tabIndex], qt.QContextMenuEvent(0, pos))
return True
return False
def getFeatureClassWidgets(self):
return(self.featureClassFeatureWidgets.values())
def addParameter(self, featureClass, parameter):
self.featureClassFeatureWidgets[featureClass].addParameter(parameter)
class FeatureWidget(qt.QCheckBox):
def __init__(self, parent=None):
super(FeatureWidget, self).__init__(parent)
def Setup(self, featureName="", featureClassFlag=False, checkStatus=True):
self.featureName = featureName
self.checked = checkStatus
if featureClassFlag:
self.descriptionLabel = FeatureWidgetHelperLib.FeatureClassDescriptionLabel()
self.descriptionLabel.setDescription(self.featureName)
else:
self.descriptionLabel = FeatureWidgetHelperLib.FeatureDescriptionLabel()
self.descriptionLabel.setDescription(self.featureName)
self.setText(self.featureName)
self.setContextMenuPolicy(3)
self.widgetMenu = FeatureWidgetHelperLib.ContextMenu(self)
self.widgetMenu.Setup(self.featureName, self.descriptionLabel)
self.customContextMenuRequested.connect(lambda point: self.connectMenu(point))
def connectMenu(self, pos):
self.widgetMenu.popup(self.mapToGlobal(pos))
def addParameter(self, parameterName):
self.widgetMenu.addParameter(parameterName)
def getParameterDict(self):
parameterDict = collections.OrderedDict()
for k,v in self.widgetMenu.parameters.items():
value = v['Edit Window'].getValue()
parameterDict[k] = value
return (parameterDict)
def getParameterEditWindow(self, parameterName):
return(self.widgetMenu.parameters[parameterName]['Edit Window'])
def getName(self):
return(self.featureName)
class ContextMenu(qt.QMenu):
def __init__(self, parent=None):
super(ContextMenu, self).__init__(parent)
def Setup(self, featureName, descriptionLabel="Description:"):
self.featureName = featureName
self.descriptionLabel = descriptionLabel
self.parameters = collections.OrderedDict()
self.descriptionAction = qt.QWidgetAction(self)
self.descriptionAction.setDefaultWidget(self.descriptionLabel)
self.closeAction = qt.QAction("Close", self)
self.reloadActions()
def reloadActions(self):
self.addAction(self.descriptionAction)
for parameter in self.parameters:
self.addAction(self.parameters[parameter]['Action'])
self.addAction(self.closeAction)
def addParameter(self, parameterName):
self.parameters[parameterName] = {}
self.parameters[parameterName]['Action'] = qt.QAction(('Edit %s' %parameterName), self)
self.parameters[parameterName]['Edit Window'] = FeatureWidgetHelperLib.ParameterEditWindow(self, self.featureName, parameterName)
self.parameters[parameterName]['Action'].connect('triggered()', lambda parameterName=parameterName: self.parameters[parameterName]['Edit Window'].showWindow())
self.reloadActions()
def getParameters(self):
return(self.parameters)
class ParameterEditWindow(qt.QInputDialog):
def __init__(self, parent=None, featureName="", parameterName=""):
super(ParameterEditWindow, self).__init__(parent)
self.featureName = featureName
self.parameterName = parameterName
self.helpString = "Edit " + parameterName + " (" + self.featureName + ")"
self.setLabelText(self.helpString + "\nCurrent Value = " + str(self.getValue()) + ": ")
self.setInputMode(1) #integer input only #make this modifiable
def showWindow(self):
self.resetLabel()
self.open()
def resetLabel(self):
self.setLabelText(self.helpString + " (Current Value = " + str(self.getValue()) + "): ")
def getValue(self):
return(self.intValue())
| {
"content_hash": "5d0c4640b4f6a410dd9b92b0c590d9af",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 163,
"avg_line_length": 38.58024691358025,
"alnum_prop": 0.70304,
"repo_name": "vnarayan13/Slicer-OpenCAD",
"id": "7b39afd50fd70b46cd2d7ef963752e86da8fa42a",
"size": "6250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HeterogeneityCAD/FeatureWidgetHelperLib/FeatureWidgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "2870"
},
{
"name": "Python",
"bytes": "146475"
}
],
"symlink_target": ""
} |
"""This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
PROTOCOL_TLSv1_1
PROTOCOL_TLSv1_2
The following constants identify various SSL alert message descriptions as per
http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6
ALERT_DESCRIPTION_CLOSE_NOTIFY
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE
ALERT_DESCRIPTION_BAD_RECORD_MAC
ALERT_DESCRIPTION_RECORD_OVERFLOW
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE
ALERT_DESCRIPTION_HANDSHAKE_FAILURE
ALERT_DESCRIPTION_BAD_CERTIFICATE
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE
ALERT_DESCRIPTION_CERTIFICATE_REVOKED
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN
ALERT_DESCRIPTION_ILLEGAL_PARAMETER
ALERT_DESCRIPTION_UNKNOWN_CA
ALERT_DESCRIPTION_ACCESS_DENIED
ALERT_DESCRIPTION_DECODE_ERROR
ALERT_DESCRIPTION_DECRYPT_ERROR
ALERT_DESCRIPTION_PROTOCOL_VERSION
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY
ALERT_DESCRIPTION_INTERNAL_ERROR
ALERT_DESCRIPTION_USER_CANCELLED
ALERT_DESCRIPTION_NO_RENEGOTIATION
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
ALERT_DESCRIPTION_UNRECOGNIZED_NAME
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
"""
import textwrap
import re
import sys
import os
from collections import namedtuple
from contextlib import closing
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_add
try:
from _ssl import RAND_egd
except ImportError:
# LibreSSL does not provide RAND_egd
pass
def _import_symbols(prefix):
for n in dir(_ssl):
if n.startswith(prefix):
globals()[n] = getattr(_ssl, n)
_import_symbols('OP_')
_import_symbols('ALERT_DESCRIPTION_')
_import_symbols('SSL_ERROR_')
_import_symbols('PROTOCOL_')
_import_symbols('VERIFY_')
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN
from _ssl import _OPENSSL_API_VERSION
_PROTOCOL_NAMES = {value: name for name, value in globals().items() if name.startswith('PROTOCOL_')}
try:
_SSLv2_IF_EXISTS = PROTOCOL_SSLv2
except NameError:
_SSLv2_IF_EXISTS = None
from socket import socket, _fileobject, _delegate_methods, error as socket_error
if sys.platform == "win32":
from _ssl import enum_certificates, enum_crls
from socket import socket, AF_INET, SOCK_STREAM, create_connection
from socket import SOL_SOCKET, SO_TYPE
import base64 # for DER-to-PEM translation
import errno
import warnings
if _ssl.HAS_TLS_UNIQUE:
CHANNEL_BINDING_TYPES = ['tls-unique']
else:
CHANNEL_BINDING_TYPES = []
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
# Enable a better set of ciphers by default
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Disable NULL authentication, NULL encryption, and MD5 MACs for security
# reasons
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5'
)
# Restricted and more secure ciphers for the server side
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, and RC4 for
# security reasons
_RESTRICTED_SERVER_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
pieces = dn.split(r'.')
leftmost = pieces[0]
remainder = pieces[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
DefaultVerifyPaths = namedtuple("DefaultVerifyPaths",
"cafile capath openssl_cafile_env openssl_cafile openssl_capath_env "
"openssl_capath")
def get_default_verify_paths():
"""Return paths to default cafile and capath.
"""
parts = _ssl.get_default_verify_paths()
# environment vars shadow paths
cafile = os.environ.get(parts[0], parts[1])
capath = os.environ.get(parts[2], parts[3])
return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None,
capath if os.path.isdir(capath) else None,
*parts)
class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")):
"""ASN.1 object identifier lookup
"""
__slots__ = ()
def __new__(cls, oid):
return super(_ASN1Object, cls).__new__(cls, *_txt2obj(oid, name=False))
@classmethod
def fromnid(cls, nid):
"""Create _ASN1Object from OpenSSL numeric ID
"""
return super(_ASN1Object, cls).__new__(cls, *_nid2obj(nid))
@classmethod
def fromname(cls, name):
"""Create _ASN1Object from short name, long name or OID
"""
return super(_ASN1Object, cls).__new__(cls, *_txt2obj(name, name=True))
class Purpose(_ASN1Object):
"""SSLContext purpose flags with X509v3 Extended Key Usage objects
"""
Purpose.SERVER_AUTH = Purpose('1.3.6.1.5.5.7.3.1')
Purpose.CLIENT_AUTH = Purpose('1.3.6.1.5.5.7.3.2')
class SSLContext(_SSLContext):
"""An SSLContext holds various SSL-related configuration options and
data, such as certificates and possibly a private key."""
__slots__ = ('protocol', '__weakref__')
_windows_cert_stores = ("CA", "ROOT")
def __new__(cls, protocol, *args, **kwargs):
self = _SSLContext.__new__(cls, protocol)
if protocol != _SSLv2_IF_EXISTS:
self.set_ciphers(_DEFAULT_CIPHERS)
return self
def __init__(self, protocol):
self.protocol = protocol
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self)
def set_npn_protocols(self, npn_protocols):
protos = bytearray()
for protocol in npn_protocols:
b = protocol.encode('ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('NPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_npn_protocols(protos)
def set_alpn_protocols(self, alpn_protocols):
protos = bytearray()
for protocol in alpn_protocols:
b = protocol.encode('ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('ALPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_alpn_protocols(protos)
def _load_windows_store_certs(self, storename, purpose):
certs = bytearray()
try:
for cert, encoding, trust in enum_certificates(storename):
# CA certs are never PKCS#7 encoded
if encoding == "x509_asn":
if trust is True or purpose.oid in trust:
certs.extend(cert)
except OSError:
warnings.warn("unable to enumerate Windows certificate store")
if certs:
self.load_verify_locations(cadata=certs)
return certs
def load_default_certs(self, purpose=Purpose.SERVER_AUTH):
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
if sys.platform == "win32":
for storename in self._windows_cert_stores:
self._load_windows_store_certs(storename, purpose)
self.set_default_verify_paths()
def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
# Prefer the server's ciphers by default so that we get stronger
# encryption
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_unverified_context(protocol=PROTOCOL_SSLv23, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(protocol)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
# Backwards compatibility alias, even though it's not a public name.
_create_stdlib_context = _create_unverified_context
# PEP 493: Verify HTTPS by default, but allow envvar to override that
_https_verify_envvar = 'PYTHONHTTPSVERIFY'
def _get_https_context_factory():
if not sys.flags.ignore_environment:
config_setting = os.environ.get(_https_verify_envvar)
if config_setting == '0':
return _create_unverified_context
return create_default_context
_create_default_https_context = _get_https_context_factory()
# PEP 493: "private" API to configure HTTPS defaults without monkeypatching
def _https_verify_certificates(enable=True):
"""Verify server HTTPS certificates by default?"""
global _create_default_https_context
if enable:
_create_default_https_context = create_default_context
else:
_create_default_https_context = _create_unverified_context
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None):
self._makefile_refs = 0
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if server_side and server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if self._context.check_hostname and not server_hostname:
raise ValueError("check_hostname requires server_hostname")
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
# See if we are connected
try:
self.getpeername()
except socket_error as e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
self._sslobj = self._context._wrap_socket(self._sock, server_side,
server_hostname, ssl_sock=self)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except (OSError, ValueError):
self.close()
raise
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
def dup(self):
raise NotImplemented("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=1024, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Read on closed or unwrapped SSL socket.")
try:
if buffer is not None:
v = self._sslobj.read(len, buffer)
else:
v = self._sslobj.read(len)
return v
except SSLError as x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Write on closed or unwrapped SSL socket.")
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def selected_alpn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_ALPN:
return None
else:
return self._sslobj.selected_alpn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
try:
v = self._sslobj.write(data)
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return self._sock.send(data, flags)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return self._sock.sendto(data, flags_or_addr)
else:
return self._sock.sendto(data, flags_or_addr, addr)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return self._sock.recv(buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
return self.read(nbytes, buffer)
else:
return self._sock.recv_into(buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom(buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom_into(buffer, nbytes, flags)
def pending(self):
self._checkClosed()
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def shutdown(self, how):
self._checkClosed()
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def _real_close(self):
self._sslobj = None
socket._real_close(self)
def do_handshake(self, block=False):
"""Perform a TLS/SSL handshake."""
self._check_connected()
timeout = self.gettimeout()
try:
if timeout == 0.0 and block:
self.settimeout(None)
self._sslobj.do_handshake()
finally:
self.settimeout(timeout)
if self.context.check_hostname:
if not self.server_hostname:
raise ValueError("check_hostname needs server_hostname "
"argument")
match_hostname(self.getpeercert(), self.server_hostname)
def _real_connect(self, addr, connect_ex):
if self.server_side:
raise ValueError("can't connect in server-side mode")
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname, ssl_sock=self)
try:
if connect_ex:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
self._connected = True
if self.do_handshake_on_connect:
self.do_handshake()
return rc
except (OSError, ValueError):
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock = self.context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True)
return newsock, addr
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
if self._sslobj is None:
return None
return self._sslobj.tls_unique_cb()
def version(self):
"""
Return a string identifying the protocol version used by the
current SSL channel, or None if there is no established channel.
"""
if self._sslobj is None:
return None
return self._sslobj.version()
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None):
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Return the time in seconds since the Epoch, given the timestring
representing the "notBefore" or "notAfter" date from a certificate
in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale).
"notBefore" or "notAfter" dates must use UTC (RFC 5280).
Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
UTC should be specified as GMT (see ASN1_TIME_print())
"""
from time import strptime
from calendar import timegm
months = (
"Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"
)
time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT
try:
month_number = months.index(cert_time[:3].title()) + 1
except ValueError:
raise ValueError('time data %r does not match '
'format "%%b%s"' % (cert_time, time_format))
else:
# found valid month
tt = strptime(cert_time[3:], time_format)
# return an integer, the previous mktime()-based implementation
# returned a float (fractional seconds are always zero here).
return timegm((tt[0], month_number) + tt[2:6])
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
f = base64.standard_b64encode(der_cert_bytes).decode('ascii')
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d.encode('ASCII', 'strict'))
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
context = _create_stdlib_context(ssl_version,
cert_reqs=cert_reqs,
cafile=ca_certs)
with closing(create_connection(addr)) as sock:
with closing(context.wrap_socket(sock)) as sslsock:
dercert = sslsock.getpeercert(True)
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ctx = SSLContext(PROTOCOL_SSLv23)
if keyfile or certfile:
ctx.load_cert_chain(certfile, keyfile)
ssl_sock = ctx._wrap_socket(sock, server_side=False)
try:
sock.getpeername()
except socket_error:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
| {
"content_hash": "1831a4641dfc50fd1fadcfb45a24b397",
"timestamp": "",
"source": "github",
"line_count": 1034,
"max_line_length": 116,
"avg_line_length": 36.09090909090909,
"alnum_prop": 0.6124926309019776,
"repo_name": "franekp/millandict",
"id": "60703808793345f08ad3f4b71b61a47bba837ce4",
"size": "37434",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "ankidict/thirdparty/ssl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "913"
},
{
"name": "CSS",
"bytes": "1117"
},
{
"name": "Python",
"bytes": "86639"
},
{
"name": "QMake",
"bytes": "158"
},
{
"name": "VimL",
"bytes": "31"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from numpy import pi, arange, sin
import numpy as np
import time
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.glyphs import Circle
from bokeh.objects import (
Plot, DataRange1d, DatetimeAxis,
ColumnDataSource, PanTool, WheelZoomTool
)
from bokeh.resources import INLINE
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.arange(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
plot = Plot(x_range=xdr, y_range=ydr, min_border=80)
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
plot.add_glyph(source, circle)
plot.add_layout(DatetimeAxis(), 'below')
plot.add_layout(DatetimeAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool())
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "dateaxis.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Date Axis Example"))
print("Wrote %s" % filename)
view(filename)
| {
"content_hash": "7a73c61087ce9ae6f66480c854ae96bb",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 26.26530612244898,
"alnum_prop": 0.6985236985236986,
"repo_name": "jakevdp/bokeh",
"id": "30132385802ae5f0c02e2c34fb856423422f4476",
"size": "1287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/glyphs/dateaxis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Create pidstore branch."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "f615cee99600"
down_revision = None
branch_labels = ("invenio_pidstore",)
depends_on = "dbdbc1b19cf2"
def upgrade():
"""Upgrade database."""
def downgrade():
"""Downgrade database."""
| {
"content_hash": "488ccc909e6a78d57417828841ef7f0e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 40,
"avg_line_length": 18.166666666666668,
"alnum_prop": 0.7003058103975535,
"repo_name": "inveniosoftware/invenio-pidstore",
"id": "500943ad5553fc16af108949cca820b79db84e19",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_pidstore/alembic/f615cee99600_create_pidstore_branch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126932"
},
{
"name": "Shell",
"bytes": "839"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from optparse import OptionParser
from os import path
import pipes
from sys import stderr
from util import check_output
def mvn(action):
return ['mvn', '--file', path.join(root, 'fake_pom_%s.xml' % action)]
def mvn(action):
return ['mvn', '--file', path.join(root, 'fake_pom_%s.xml' % action)]
opts = OptionParser()
opts.add_option('--repository', help='maven repository id')
opts.add_option('--url', help='maven repository url')
opts.add_option('-o')
opts.add_option('-g', help='maven group id')
opts.add_option('-a', help='action (valid actions are: install,deploy)')
opts.add_option('-v', help='package version')
opts.add_option('-s', action='append', help='triplet of artifactId:type:path')
args, ctx = opts.parse_args()
if not args.g:
print('group is empty', file=stderr)
exit(1)
if not args.v:
print('version is empty', file=stderr)
exit(1)
common = [
'-DgroupId=%s' % args.g,
'-Dversion=%s' % args.v,
]
root = path.abspath(__file__)
while not path.exists(path.join(root, '.buckconfig')):
root = path.dirname(root)
if 'install' == args.a:
cmd = mvn(args.a) + ['install:install-file'] + common
elif 'deploy' == args.a:
cmd = mvn(args.a) + [
'deploy:deploy-file',
'-DrepositoryId=%s' % args.repository,
'-Durl=%s' % args.url,
] + common
else:
print("unknown action -a %s" % args.a, file=stderr)
exit(1)
for spec in args.s:
artifact, packaging_type, src = spec.split(':')
cmds = cmd + [
'-DartifactId=%s' % artifact,
'-Dpackaging=%s' % packaging_type,
'-Dfile=%s' % src,
]
try:
check_output(cmds)
except Exception as e:
cmds_str = ' '.join(pipes.quote(c) for c in cmds)
print("%s command failed: `%s`: %s" % (args.a, cmds_str, e), file=stderr)
exit(1)
with open(args.o, 'w') as fd:
if args.repository:
print('Repository: %s' % args.repository, file=fd)
if args.url:
print('URL: %s' % args.url, file=fd)
print('Version: %s' % args.v, file=fd)
| {
"content_hash": "dfc55a842d699cc13aba4d806fb4f254",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 27.569444444444443,
"alnum_prop": 0.6392947103274559,
"repo_name": "JulienGenoud/gitiles",
"id": "107db609da7a5b0f546758f5420e5b55005e5924",
"size": "2599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bucklets/tools/mvn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16459"
},
{
"name": "Java",
"bytes": "556704"
},
{
"name": "Python",
"bytes": "17149"
}
],
"symlink_target": ""
} |
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import CTransaction, msg_block, ToHex
from test_framework.p2p import P2PInterface
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
from io import BytesIO
CLTV_HEIGHT = 1351
VB_TOP_BITS = 0x20000000
def cltv_invalidate(tx):
'''Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in the scriptSig itself.
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def cltv_validate(node, tx, height):
'''Modify the signature in vin 0 of the tx to pass CLTV
Prepends <height> CLTV DROP in the scriptSig, and sets
the locktime to height'''
tx.vin[0].nSequence = 0
tx.nLockTime = height
# Need to re-sign, since nSequence and nLockTime changed
signed_result = node.signrawtransactionwithwallet(ToHex(tx))
new_tx = CTransaction()
new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex'])))
new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(new_tx.vin[0].scriptSig)))
return new_tx
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'[email protected]',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
'-vbparams=mweb:-2:0',
]]
self.setup_clean_chain = True
self.rpc_timeout = 480
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_cltv_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip65'],
{
"active": is_active,
"height": CLTV_HEIGHT,
"type": "buried",
},
)
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
self.test_cltv_info(is_active=False)
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
block.nVersion = VB_TOP_BITS
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.test_cltv_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
peer.send_and_ping(msg_block(block))
self.test_cltv_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version VB_TOP_BITS")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = VB_TOP_BITS
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for CLTV by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Negative locktime)'}],
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Negative locktime)'.format(block.vtx[-1].hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that a version VB_TOP_BITS block with a valid-according-to-CLTV transaction is accepted")
spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
spendtx.rehash()
block.vtx.pop(1)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.test_cltv_info(is_active=True) # Not active as of current tip, but next block must obey rules
peer.send_and_ping(msg_block(block))
self.test_cltv_info(is_active=True) # Active as of current tip
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP65Test().main()
| {
"content_hash": "840a79612debd108f09db18969da36a4",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 179,
"avg_line_length": 40.782051282051285,
"alnum_prop": 0.6468091795033009,
"repo_name": "litecoin-project/litecoin",
"id": "07ee6e61e28fcde478ebf240118cb66e9d372e8b",
"size": "6576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/feature_cltv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "898000"
},
{
"name": "C",
"bytes": "1594708"
},
{
"name": "C++",
"bytes": "8860047"
},
{
"name": "CMake",
"bytes": "29310"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "226003"
},
{
"name": "Makefile",
"bytes": "123607"
},
{
"name": "Objective-C++",
"bytes": "5489"
},
{
"name": "Python",
"bytes": "2267056"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "31382"
},
{
"name": "Scheme",
"bytes": "7554"
},
{
"name": "Shell",
"bytes": "150309"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from requests import HTTPError
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
LOG = logging.getLogger(__name__)
class ListenerServiceBuilder(object):
u"""Create LBaaS v2 Listener on BIG-IPs.
Handles requests to create, update, delete LBaaS v2 listener
objects on one or more BIG-IP systems. Maps LBaaS listener
defined in service object to a BIG-IP virtual server.
"""
def __init__(self, service_adapter, cert_manager, parent_ssl_profile=None):
self.cert_manager = cert_manager
self.parent_ssl_profile = parent_ssl_profile
self.vs_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.service_adapter = service_adapter
LOG.debug("ListenerServiceBuilder: using parent_ssl_profile %s ",
parent_ssl_profile)
def create_listener(self, service, bigips, esd=None):
u"""Create listener on set of BIG-IPs.
Create a BIG-IP virtual server to represent an LBaaS
Listener object.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigips: Array of BigIP class instances to create Listener.
"""
loadbalancer = service.get('loadbalancer', dict())
listener = service.get('listener', dict())
network_id = loadbalancer.get('network_id', "")
vip = self.service_adapter.get_virtual(service)
tls = self.service_adapter.get_tls(service)
if tls:
tls['name'] = vip['name']
tls['partition'] = vip['partition']
persist = listener.get("session_persistence", None)
error = None
for bigip in bigips:
self.service_adapter.get_vlan(vip, bigip, network_id)
if tls:
self.add_ssl_profile(tls, vip, bigip)
if persist and persist.get('type', "") == "APP_COOKIE":
self._add_cookie_persist_rule(vip, persist, bigip)
try:
self.vs_helper.create(bigip, vip)
except HTTPError as err:
if err.response.status_code == 409:
LOG.debug("Virtual server already exists...updating")
try:
self.vs_helper.update(bigip, vip)
except Exception as err:
error = f5_ex.VirtualServerUpdateException(
err.message)
LOG.error("Virtual server update error: %s" %
error.message)
else:
error = f5_ex.VirtualServerCreationException(
err.message)
LOG.error("Virtual server creation error: %s" %
error.message)
except Exception as err:
error = f5_ex.VirtualServerCreationException(
err.message)
LOG.error("Virtual server creation error: %s" %
error.message)
if not persist:
try:
self._remove_cookie_persist_rule(vip, bigip)
except HTTPError as err:
LOG.exception(err.message)
return error
def get_listener(self, service, bigip):
u"""Retrieve BIG-IP virtual from a single BIG-IP system.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigip: Array of BigIP class instances to create Listener.
"""
vip = self.service_adapter.get_virtual_name(service)
obj = self.vs_helper.load(bigip=bigip,
name=vip["name"],
partition=vip["partition"])
return obj
def delete_listener(self, service, bigips):
u"""Delete Listener from a set of BIG-IP systems.
Delete virtual server that represents a Listener object.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigips: Array of BigIP class instances to delete Listener.
"""
vip = self.service_adapter.get_virtual_name(service)
tls = self.service_adapter.get_tls(service)
if tls:
tls['name'] = vip['name']
tls['partition'] = vip['partition']
error = None
for bigip in bigips:
try:
self.vs_helper.delete(bigip,
name=vip["name"],
partition=vip["partition"])
except HTTPError as err:
if err.response.status_code != 404:
error = f5_ex.VirtualServerDeleteException(err.message)
LOG.error("Virtual server delete error: %s",
error.message)
except Exception as err:
error = f5_ex.VirtualServerDeleteException(err.message)
LOG.error("Virtual server delete error: %s",
error.message)
# delete ssl profiles
self.remove_ssl_profiles(tls, bigip)
# delete cookie perist rules
try:
self._remove_cookie_persist_rule(vip, bigip)
except HTTPError as err:
LOG.exception(err.message)
return error
def add_ssl_profile(self, tls, vip, bigip):
if "default_tls_container_id" in tls:
container_ref = tls["default_tls_container_id"]
self._create_ssl_profile(
container_ref, bigip, vip, True)
if "sni_containers" in tls and tls["sni_containers"]:
for container in tls["sni_containers"]:
container_ref = container["tls_container_id"]
self._create_ssl_profile(container_ref, bigip, vip, False)
def _create_ssl_profile(
self, container_ref, bigip, vip, sni_default=False):
cert = self.cert_manager.get_certificate(container_ref)
intermediates = self.cert_manager.get_intermediates(container_ref)
key = self.cert_manager.get_private_key(container_ref)
key_passphrase = self.cert_manager.get_private_key_passphrase(
container_ref)
chain = None
if intermediates:
chain = '\n'.join(list(intermediates))
name = self.cert_manager.get_name(container_ref,
self.service_adapter.prefix)
try:
# upload cert/key and create SSL profile
ssl_profile.SSLProfileHelper.create_client_ssl_profile(
bigip, name, cert, key, key_passphrase=key_passphrase,
sni_default=sni_default, intermediates=chain,
parent_profile=self.parent_ssl_profile)
except HTTPError as err:
if err.response.status_code != 409:
LOG.error("SSL profile creation error: %s" %
err.message)
finally:
del key_passphrase
del cert
del chain
del key
# add ssl profile to virtual server
if 'profiles' not in vip:
vip['profiles'] = list()
client_ssl_profile = {'name': name, 'context': "clientside"}
if client_ssl_profile not in vip['profiles']:
vip['profiles'].append(client_ssl_profile)
def remove_ssl_profiles(self, tls, bigip):
if "default_tls_container_id" in tls and \
tls["default_tls_container_id"]:
container_ref = tls["default_tls_container_id"]
try:
i = container_ref.rindex("/") + 1
except ValueError as error:
LOG.exception(error.message)
else:
name = self.service_adapter.prefix + container_ref[i:]
self._remove_ssl_profile(name, bigip)
if "sni_containers" in tls and tls["sni_containers"]:
for container in tls["sni_containers"]:
container_ref = container["tls_container_id"]
try:
i = container_ref.rindex("/") + 1
except ValueError as error:
LOG.exception(error.message)
else:
name = self.service_adapter.prefix + container_ref[i:]
self._remove_ssl_profile(name, bigip)
def _remove_ssl_profile(self, name, bigip):
"""Delete profile.
:param name: Name of profile to delete.
:param bigip: Single BigIP instances to update.
"""
try:
ssl_client_profile = bigip.tm.ltm.profile.client_ssls.client_ssl
if ssl_client_profile.exists(name=name, partition='Common'):
obj = ssl_client_profile.load(name=name, partition='Common')
obj.delete()
except Exception as err:
# Not necessarily an error -- profile might be referenced
# by another virtual server.
LOG.warning(
"Unable to delete profile %s. "
"Response message: %s." % (name, err.message))
def delete_orphaned_listeners(self, service, bigips):
if 'listeners' not in service:
ip_address = service['loadbalancer']['vip_address']
if str(ip_address).endswith('%0'):
ip_address = ip_address[:-2]
for bigip in bigips:
vses = bigip.tm.ltm.virtuals.get_collection()
for vs in vses:
if str(vs.destination).startswith(ip_address):
vs.delete()
else:
listeners = service['listeners']
for listener in listeners:
svc = {"loadbalancer": service["loadbalancer"],
"listener": listener}
vip = self.service_adapter.get_virtual(svc)
for bigip in bigips:
vses = bigip.tm.ltm.virtuals.get_collection()
orphaned = True
for vs in vses:
if vip['destination'] == vs.destination:
if vip['name'] == vs.name:
orphaned = False
else:
orphaned = False
if orphaned:
for vs in vses:
if vip['name'] == vs.name:
vs.delete()
def _add_cookie_persist_rule(self, vip, persistence, bigip):
"""Add cookie persist rules to virtual server instance.
:param vip: Dictionary which contains name and partition of
virtual server.
:param persistence: Persistence definition.
:param bigip: Single BigIP instances to update.
"""
LOG.error("SP_DEBUG: adding cookie persist: %s -- %s",
persistence, vip)
cookie_name = persistence.get('cookie_name', None)
if not cookie_name:
return
rule_name = 'app_cookie_' + vip['name']
rule_def = self._create_app_cookie_persist_rule(cookie_name)
r = bigip.tm.ltm.rules.rule
if not r.exists(name=rule_name, partition=vip["partition"]):
try:
r.create(name=rule_name,
apiAnonymous=rule_def,
partition=vip["partition"])
LOG.debug("Created rule %s" % rule_name)
except Exception as err:
LOG.error("Failed to create rule %s", rule_name)
u = bigip.tm.ltm.persistence.universals.universal
if not u.exists(name=rule_name, partition=vip["partition"]):
try:
u.create(name=rule_name,
rule=rule_name,
partition=vip["partition"])
LOG.debug("Created persistence universal %s" % rule_name)
except Exception as err:
LOG.error("Failed to create persistence universal %s" %
rule_name)
LOG.exception(err)
def _create_app_cookie_persist_rule(self, cookiename):
"""Create cookie persistence rule.
:param cookiename: Name to substitute in rule.
"""
rule_text = "when HTTP_REQUEST {\n"
rule_text += " if { [HTTP::cookie " + str(cookiename)
rule_text += "] ne \"\" }{\n"
rule_text += " persist uie [string tolower [HTTP::cookie \""
rule_text += cookiename + "\"]] 3600\n"
rule_text += " }\n"
rule_text += "}\n\n"
rule_text += "when HTTP_RESPONSE {\n"
rule_text += " if { [HTTP::cookie \"" + str(cookiename)
rule_text += "\"] ne \"\" }{\n"
rule_text += " persist add uie [string tolower [HTTP::cookie \""
rule_text += cookiename + "\"]] 3600\n"
rule_text += " }\n"
rule_text += "}\n\n"
return rule_text
def _remove_cookie_persist_rule(self, vip, bigip):
"""Delete cookie persist rule.
:param vip: Dictionary which contains name and partition of
virtual server.
:param bigip: Single BigIP instances to update.
"""
rule_name = 'app_cookie_' + vip['name']
u = bigip.tm.ltm.persistence.universals.universal
if u.exists(name=rule_name, partition=vip["partition"]):
obj = u.load(name=rule_name, partition=vip["partition"])
obj.delete()
LOG.debug("Deleted persistence universal %s" % rule_name)
r = bigip.tm.ltm.rules.rule
if r.exists(name=rule_name, partition=vip["partition"]):
obj = r.load(name=rule_name, partition=vip["partition"])
obj.delete()
LOG.debug("Deleted rule %s" % rule_name)
def get_stats(self, service, bigips, stat_keys):
"""Return stat values for a single virtual.
Stats to collect are defined as an array of strings in input stats.
Values are summed across one or more BIG-IPs defined in input bigips.
:param service: Has listener name/partition
:param bigips: One or more BIG-IPs to get listener stats from.
:param stat_keys: Array of strings that define which stats to collect.
:return: A dict with key/value pairs for each stat defined in
input stats.
"""
collected_stats = {}
for stat_key in stat_keys:
collected_stats[stat_key] = 0
virtual = self.service_adapter.get_virtual(service)
part = virtual["partition"]
for bigip in bigips:
try:
vs_stats = self.vs_helper.get_stats(
bigip,
name=virtual["name"],
partition=part,
stat_keys=stat_keys)
for stat_key in stat_keys:
if stat_key in vs_stats:
collected_stats[stat_key] += vs_stats[stat_key]
except Exception as e:
# log error but continue on
LOG.error("Error getting virtual server stats: %s", e.message)
return collected_stats
| {
"content_hash": "50cdef07fc3e78df91dfa54e9f549869",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 79,
"avg_line_length": 40.012953367875646,
"alnum_prop": 0.5467788928455811,
"repo_name": "F5Networks/f5-openstack-agent",
"id": "ebb49a64e5b83af05fbe67d25b3b1c78e432409e",
"size": "16053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2220"
},
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "1395055"
},
{
"name": "Ruby",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "15836"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class BlogPost(models.Model):
post_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
edited = models.BooleanField()
draft = models.BooleanField()
author = models.ForeignKey(User)
title = models.CharField(max_length=80)
content = models.TextField()
| {
"content_hash": "245ec5c34752a795835ba41355d83921",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 36.09090909090909,
"alnum_prop": 0.7355163727959698,
"repo_name": "d3matt/d3matt.com",
"id": "78c1b4ce0ce334876860ae6dcf0fdd7c272d2910",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/d3matt/blog/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5722"
},
{
"name": "Shell",
"bytes": "1267"
}
],
"symlink_target": ""
} |
from .users import *
from .departments import *
from .auth import * | {
"content_hash": "5dcc04231b0b64e3d0687ae9cf8cf3a0",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.746268656716418,
"repo_name": "MishaGarbuz/WinVault",
"id": "542b26ac771a46f2c3bf9c32ed45dfd24dafecfd",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Application/controllers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1499300"
},
{
"name": "HTML",
"bytes": "846"
},
{
"name": "JavaScript",
"bytes": "1500793"
},
{
"name": "Python",
"bytes": "2377"
}
],
"symlink_target": ""
} |
class SanicMeta(type):
@classmethod
def __prepare__(metaclass, name, bases, **kwds):
cls = super().__prepare__(metaclass, name, bases, **kwds)
cls["__slots__"] = ()
return cls
| {
"content_hash": "ec6209f530dcbe77daaed8f58055544d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 65,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5528846153846154,
"repo_name": "ashleysommer/sanic",
"id": "2c6870c2598422fb8e2588d725a10a2e4bce0744",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sanic/base/meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "159"
},
{
"name": "Go",
"bytes": "482"
},
{
"name": "HTML",
"bytes": "1173"
},
{
"name": "Makefile",
"bytes": "2412"
},
{
"name": "Python",
"bytes": "962491"
}
],
"symlink_target": ""
} |
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.core.paginator import ObjectPaginator, InvalidPage
import re
import urlparse
from datetime import datetime, timedelta
from gabaluu.form_choices import COMMENT_TYPES
class Site(object):
pass
class Tag(models.Model):
name = models.CharField(maxlength=100)
count = models.IntegerField()
def _get_font_size(self):
count = self.count + 10
return "font-size: %spx;" % count
font_size = property(_get_font_size)
@classmethod
def find_all(cls, site):
return Tag.objects.all()
@classmethod
def find_or_create(cls, name, new_synopsis=True):
tag, created = Tag.objects.get_or_create(name=name, defaults={'count': 0 })
if new_synopsis or (not new_synopsis and created):
tag.count = tag.count + 1
return tag
@classmethod
def find_by(cls, name):
tags = Tag.objects.filter(name=str(name).strip())
if tags.count() == 0:
return None
return tags[0]
@classmethod
def parse(cls, tags):
"""
Parses a comma separated list of tags into
tag names handles all kinds of different tags.
(comma seperated, space seperated)
Todo...Enhance to support more formats (in quotes)
"""
return re.split('[,\\s]+', tags)
class Synopsis(models.Model):
"""
A synopsis of a web resource.
"""
title = models.CharField(maxlength=50)
permalink = models.CharField(maxlength=100)
text = models.CharField(maxlength=5000)
article_site_url = models.CharField(maxlength=500)
article_url = models.CharField(maxlength=500)
pub_date = models.DateTimeField(editable=False)
updated_date = models.DateTimeField(editable=False, blank=True, null=True)
author = models.ForeignKey(User, related_name='author_id')
tags = models.ManyToManyField(Tag)
score = models.IntegerField()
def __str__( self ):
return self.title
def get_absolute_url(self):
return "http://zenopsis.com/synopsis/%s/" % self.permalink
def is_author_of_synopsis(self, user):
if self.author.username == user.username:
return True
return False
@classmethod
def create_permalink(cls, title, article_url):
url_parts = urlparse.urlparse(article_url)
title = Synopsis._remove_special_char(title)
return "_".join((url_parts[1].replace('.', '_'), title))
@classmethod
def _remove_special_char(cls, string):
string = string.strip().replace(' ', '_')
string = string.replace("'", "")
string = string.replace("$", "")
string = string.replace("&", "")
string = string.replace("<", "")
string = string.replace(">", "")
string = string.replace("*", "")
string = string.replace("@", "")
string = string.replace(".", "")
string = string.replace(":", "")
string = string.replace("|", "")
string = string.replace("~", "")
string = string.replace("`", "")
string = string.replace("(", "")
string = string.replace(")", "")
string = string.replace("%", "")
string = string.replace("#", "")
string = string.replace("^", "")
string = string.replace("?", "")
string = string.replace("/", "")
string = string.replace("{", "")
string = string.replace("}", "")
string = string.replace(",", "")
string = string.replace(";", "")
string = string.replace("!", "")
string = string.replace("+", "")
string = string.replace("=", "")
string = string.replace("-", "_")
return string
@classmethod
def _run_query(cls, query, pagination=False, current_page=0, items_per_page=10):
if pagination:
paginator = ObjectPaginator(query, items_per_page)
synopsis_list = paginator.get_page(current_page)
else:
paginator = None
synopsis_list = query
return synopsis_list, paginator
@classmethod
def most_recent(cls, pagination=False, current_page=0, items_per_page=10):
return Synopsis._run_query(Synopsis.objects.all().order_by('-pub_date'),
pagination=pagination,
current_page=current_page,
items_per_page=items_per_page)
@classmethod
def popular(cls, pagination=False, current_page=0, items_per_page=10):
return Synopsis._run_query(Synopsis.objects.all().order_by('-score', '-pub_date'),
pagination=pagination,
current_page=current_page,
items_per_page=items_per_page)
@classmethod
def by_tag(cls, tag, pagination=False, current_page=0, items_per_page=10):
return Synopsis._run_query(Synopsis.objects.filter(tags__name__exact=tag).order_by('-pub_date'),
pagination=pagination,
current_page=current_page,
items_per_page=items_per_page)
@classmethod
def user_posted_synopsis_list(cls, user):
return Synopsis.objects.filter(author=user).order_by('-pub_date')
@classmethod
def user_voted_synopsis_list(cls, user):
election_list = Election.objects.filter(user=user).order_by('-date')
return [election.synopsis for election in election_list]
@classmethod
def user_commented_synopsis_list(cls, user):
comment_list = Comment.objects.filter(posted_by=user).order_by('-date')
return [comment.synopsis for comment in comment_list]
@classmethod
def query(cls, query):
# todo -- improve search logic to search across all model fields
return Synopsis.objects.filter(title__icontains=query)
@classmethod
def by_article_site(cls, site):
article_site_url = "http://%s" % site.replace('_', '.')
return Synopsis.objects.filter(article_site_url=article_site_url).order_by('-pub_date')
def _published(self):
"""Return the synopsis formated published date."""
return self.pub_date.strftime('%B %d, %Y')
published = property(_published)
def _tags_as_string(self):
tags_string = ""
for tag in self.tags.all():
tags_string = ' '.join((tags_string, tag.name))
return tags_string.strip()
tags_as_string = property(_tags_as_string)
def _short_text(self):
text = "%s <a href='/synopsis/%s'>More >> </a>" % (self.text[:200],
self.permalink)
return text
short_text = property(_short_text)
def _short_text_preview(self):
text = "<h2>%s</h2> %s <a href='/synopsis/%s' target='_top'>More >> </a>" % (self.title,
self.text[:200],
self.permalink)
return text
short_text_preview = property(_short_text_preview)
def _get_comments(self):
"""Return list of comments on this synopsis. """
return Comment.objects.filter(synopsis=self, type_of="1", reply_to__isnull=True)
comments = property(_get_comments)
def _number_of_comments(self):
count = len(Comment.objects.filter(synopsis=self, type_of="1"))
if count == 1:
return str(count) + " comment"
else:
return str(count) + " comments"
number_of_comments = property(_number_of_comments)
def _get_revisions(self):
"""Return list of revision comments on this synopsis. """
return Comment.objects.filter(synopsis=self, type_of="2")
revisions = property(_get_revisions)
def _get_votes(self):
count = self.score
if count == 1:
return str(count) + " vote"
else:
return str(count) + " votes"
votes = property(_get_votes)
@classmethod
def article_site_list(cls):
from django.db import connection
cursor = connection.cursor()
sql = """select article_site_url, count(article_site_url) from synopsis_synopsis
group by article_site_url having count(article_site_url) >= 1 """
cursor.execute(sql)
rows = cursor.fetchall()
site_list = []
for row in rows:
url_parts = urlparse.urlparse(row[0])
site = Site()
site.key = url_parts[1].replace('.', '_')
site.url = url_parts[1]
site.count = row[1]
site_list.append(site)
return site_list
@classmethod
def tag_and_article_site_list(cls):
site_list = Synopsis.article_site_list()
tag_list = Tag.objects.all()
tags = []
for t in tag_list:
t.uri = "/tags/%s/" % t.name
tags.append(t)
for site in site_list:
tag = Tag()
tag.uri = "/sites/%s/" % site.key
tag.name = site.key.replace('_', '.')
tag.name = tag.name.replace('www.', '')
tag.count = site.count
tags.append(tag)
return tags
class Election(models.Model):
"""
Map if a user voted on a synopsis.
"""
synopsis = models.ForeignKey(Synopsis)
user = models.ForeignKey(User)
date = models.DateTimeField(editable=False)
@classmethod
def has_voted(cls, user, synopsis):
try:
election = Election.objects.get(user__id=user.id,
synopsis__id=synopsis.id)
except:
return False
return True
@classmethod
def vote(cls, synopsis, user):
election = Election.objects.create(user=user,
synopsis=synopsis,
date=datetime.now())
synopsis.score = synopsis.score + 1
synopsis.save()
return synopsis.votes
class Comment(models.Model):
posted_by = models.ForeignKey(User, related_name='posted_by_id')
text = models.CharField(maxlength=5000)
date = models.DateTimeField(editable=False)
type_of = models.CharField(maxlength=50)
synopsis = models.ForeignKey(Synopsis)
reply_to = models.IntegerField(blank=True, null=True)
def _get_formated_dated(self):
"""Return formated date. """
return self.date.strftime('%m/%d/%Y')
formated_date = property(_get_formated_dated)
def _get_comments(self):
"""Return list of comments on this comment. """
return Comment.objects.filter(reply_to=self.id)
comments = property(_get_comments)
def _get_comment_type(self):
"""Return comment type. """
comment_type = "None"
for t in COMMENT_TYPES:
if t[0] == int(self.type_of):
comment_type = t[1]
return comment_type
comment_type = property(_get_comment_type)
def _short_text(self):
text = "%s..<br/><a href='/synopsis/%s#comments-section'>More >></a>" % (self.text[:80],
self.synopsis.permalink)
return text
short_text = property(_short_text)
def is_revision(self):
if self.type_of == "2":
return True
@classmethod
def most_recent(cls):
return Comment.objects.all().order_by('-date')[:8]
@classmethod
def type_from_friendly_name(cls, name):
for t in COMMENT_TYPES:
if t[1].lower() == name.lower():
return t[0]
return "1"
| {
"content_hash": "2d7cc7585ebf110a954d4ad513d5c74f",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 106,
"avg_line_length": 35.090395480225986,
"alnum_prop": 0.5378360972468201,
"repo_name": "CarlosGabaldon/zenopsis",
"id": "35656abf2b1b7e5d755289bbd91a38189a027842",
"size": "12422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synopsis/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85945"
},
{
"name": "Python",
"bytes": "68596"
}
],
"symlink_target": ""
} |
from setuptools import setup
with open("README.rst") as f:
long_description = f.read()
version = "0.0.1"
setup(
name="lunar",
version=version,
packages=["lunar"],
author="jasonlyu",
author_email="[email protected]",
description="lunar is a WSGI based webframework in pure Python, without any third-party dependency.",
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description=long_description,
)
| {
"content_hash": "3d57ade3c2508ccf093954571ac8c24f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 105,
"avg_line_length": 30.125,
"alnum_prop": 0.6445366528354081,
"repo_name": "jasonlvhit/lunar",
"id": "15b0ffca935f94cc34d96048d77affbafab8d84c",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "108332"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from scipy.stats.distributions import chi2, norm
from statsmodels.graphics import utils
def _calc_survfunc_right(time, status, weights=None, compress=True,
retall=True):
"""
Calculate the survival function and its standard error for a single
group.
"""
# Convert the unique times to ranks (0, 1, 2, ...)
utime, rtime = np.unique(time, return_inverse=True)
# Number of deaths at each unique time.
if weights is None:
d = np.bincount(rtime, weights=status)
else:
d = np.bincount(rtime, weights=status*weights)
# Size of risk set just prior to each event time.
if weights is None:
n = np.bincount(rtime)
else:
n = np.bincount(rtime, weights=weights)
n = np.cumsum(n[::-1])[::-1]
# Only retain times where an event occured.
if compress:
ii = np.flatnonzero(d > 0)
d = d[ii]
n = n[ii]
utime = utime[ii]
# The survival function probabilities.
sp = 1 - d / n.astype(np.float64)
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
if not retall:
return sp, utime, rtime, n, d
# Standard errors
if weights is None:
# Greenwood's formula
se = d / (n * (n - d)).astype(np.float64)
se = np.cumsum(se)
se = np.sqrt(se)
se *= sp
else:
# Tsiatis' (1981) formula
se = d / (n * n).astype(np.float64)
se = np.cumsum(se)
se = np.sqrt(se)
return sp, se, utime, rtime, n, d
def _calc_incidence_right(time, status, weights=None):
"""
Calculate the cumulative incidence function and its standard error.
"""
# Calculate the all-cause survival function.
status0 = (status >= 1).astype(np.float64)
sp, utime, rtime, n, d = _calc_survfunc_right(time, status0, weights,
compress=False, retall=False)
ngrp = status.max()
# Number of cause-specific deaths at each unique time.
d = []
for k in range(ngrp):
status0 = (status == k + 1).astype(np.float64)
if weights is None:
d0 = np.bincount(rtime, weights=status0, minlength=len(utime))
else:
d0 = np.bincount(rtime, weights=status0*weights,
minlength=len(utime))
d.append(d0)
# The cumulative incidence function probabilities.
ip = []
sp0 = np.r_[1, sp[:-1]] / n
for k in range(ngrp):
ip0 = np.cumsum(sp0 * d[k])
ip.append(ip0)
# The standard error of the cumulative incidence function.
if weights is not None:
return ip, None, utime
se = []
da = sum(d)
for k in range(ngrp):
ra = da / (n * (n - da))
v = ip[k]**2 * np.cumsum(ra)
v -= 2 * ip[k] * np.cumsum(ip[k] * ra)
v += np.cumsum(ip[k]**2 * ra)
ra = (n - d[k]) * d[k] / n
v += np.cumsum(sp0**2 * ra)
ra = sp0 * d[k] / n
v -= 2 * ip[k] * np.cumsum(ra)
v += 2 * np.cumsum(ip[k] * ra)
se.append(np.sqrt(v))
return ip, se, utime
def _checkargs(time, status, freq_weights):
if len(time) != len(status):
raise ValueError("time and status must have the same length")
if freq_weights is not None and (len(freq_weights) != len(time)):
raise ValueError("weights, time and status must have the same length")
class CumIncidenceRight(object):
"""
Estimation and inference for a cumulative incidence function.
If J = 1, 2, ... indicates the event type, the cumulative
incidence function for cause j is:
I(t, j) = P(T <= t and J=j)
Only right censoring is supported. If frequency weights are provided,
the point estimate is returned without a standard error.
Parameters
----------
time : array-like
An array of times (censoring times or event times)
status : array-like
If status >= 1 indicates which event occured at time t. If
status = 0, the subject was censored at time t.
title : string
Optional title used for plots and summary output.
freq_weights : array-like
Optional frequency weights
Attributes
----------
times : array-like
The distinct times at which the incidence rates are estimated
cinc : list of arrays
cinc[k-1] contains the estimated cumulative incidence rates
for outcome k=1,2,...
cinc_se : list of arrays
The standard errors for the values in `cinc`.
References
----------
The Stata stcompet procedure:
http://www.stata-journal.com/sjpdf.html?articlenum=st0059
Dinse, G. E. and M. G. Larson. 1986. A note on semi-Markov models
for partially censored data. Biometrika 73: 379-386.
Marubini, E. and M. G. Valsecchi. 1995. Analysing Survival Data
from Clinical Trials and Observational Studies. Chichester, UK:
John Wiley & Sons.
"""
def __init__(self, time, status, title=None, freq_weights=None):
_checkargs(time, status, freq_weights)
time = self.time = np.asarray(time)
status = self.status = np.asarray(status)
if freq_weights is not None:
freq_weights = self.freq_weights = np.asarray(freq_weights)
x = _calc_incidence_right(time, status, freq_weights)
self.cinc = x[0]
self.cinc_se = x[1]
self.times = x[2]
self.title = "" if not title else title
class SurvfuncRight(object):
"""
Estimation and inference for a survival function.
The survival function S(t) = P(T > t) is the probability that an
event time T is greater than t.
This class currently only supports right censoring.
Parameters
----------
time : array-like
An array of times (censoring times or event times)
status : array-like
Status at the event time, status==1 is the 'event'
(e.g. death, failure), meaning that the event
occurs at the given value in `time`; status==0
indicates that censoring has occured, meaning that
the event occurs after the given value in `time`.
title : string
Optional title used for plots and summary output.
freq_weights : array-like
Optional frequency weights
Attributes
----------
surv_prob : array-like
The estimated value of the survivor function at each time
point in `surv_times`.
surv_prob_se : array-like
The standard errors for the values in `surv_prob`.
surv_times : array-like
The points where the survival function changes.
n_risk : array-like
The number of subjects at risk just before each time value in
`surv_times`.
n_events : array-like
The number of events (e.g. deaths) that occur at each point
in `surv_times`.
"""
def __init__(self, time, status, title=None, freq_weights=None):
_checkargs(time, status, freq_weights)
time = self.time = np.asarray(time)
status = self.status = np.asarray(status)
if freq_weights is not None:
freq_weights = self.freq_weights = np.asarray(freq_weights)
x = _calc_survfunc_right(time, status, freq_weights)
self.surv_prob = x[0]
self.surv_prob_se = x[1]
self.surv_times = x[2]
self.n_risk = x[4]
self.n_events = x[5]
self.title = "" if not title else title
def plot(self, ax=None):
"""
Plot the survival function.
Examples
--------
Change the line color:
>>> import statsmodels.api as sm
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf = sm.SurvfuncRight(df["futime"], df["death"])
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[0].set_color('purple')
>>> li[1].set_color('purple')
Don't show the censoring points:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[1].set_visible(False)
"""
return plot_survfunc(self, ax)
def quantile(self, p):
"""
Estimated quantile of a survival distribution.
Parameters
----------
p : float
The probability point at which the quantile
is determined.
Returns the estimated quantile.
"""
# SAS uses a strict inequality here.
ii = np.flatnonzero(self.surv_prob < 1 - p)
if len(ii) == 0:
return np.nan
return self.surv_times[ii[0]]
def quantile_ci(self, p, alpha=0.05, method='cloglog'):
"""
Returns a confidence interval for a survival quantile.
Parameters
----------
p : float
The probability point for which a confidence interval is
determined.
alpha : float
The confidence interval has nominal coverage probability
1 - `alpha`.
method : string
Function to use for g-transformation, must be ...
Returns
-------
lb : float
The lower confidence limit.
ub : float
The upper confidence limit.
Notes
-----
The confidence interval is obtained by inverting Z-tests. The
limits of the confidence interval will always be observed
event times.
References
----------
The method is based on the approach used in SAS, documented here:
http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
"""
tr = norm.ppf(1 - alpha / 2)
method = method.lower()
if method == "cloglog":
g = lambda x: np.log(-np.log(x))
gprime = lambda x: -1 / (x * np.log(x))
elif method == "linear":
g = lambda x: x
gprime = lambda x: 1
elif method == "log":
g = lambda x: np.log(x)
gprime = lambda x: 1 / x
elif method == "logit":
g = lambda x: np.log(x / (1 - x))
gprime = lambda x: 1 / (x * (1 - x))
elif method == "asinsqrt":
g = lambda x: np.arcsin(np.sqrt(x))
gprime = lambda x: 1 / (2 * np.sqrt(x) * np.sqrt(1 - x))
else:
raise ValueError("unknown method")
r = g(self.surv_prob) - g(1 - p)
r /= (gprime(self.surv_prob) * self.surv_prob_se)
ii = np.flatnonzero(np.abs(r) <= tr)
if len(ii) == 0:
return np.nan, np.nan
lb = self.surv_times[ii[0]]
if ii[-1] == len(self.surv_times) - 1:
ub = np.inf
else:
ub = self.surv_times[ii[-1] + 1]
return lb, ub
def summary(self):
"""
Return a summary of the estimated survival function.
The summary is a datafram containing the unique event times,
estimated survival function values, and related quantities.
"""
df = pd.DataFrame(index=self.surv_times)
df.index.name = "Time"
df["Surv prob"] = self.surv_prob
df["Surv prob SE"] = self.surv_prob_se
df["num at risk"] = self.n_risk
df["num events"] = self.n_events
return df
def simultaneous_cb(self, alpha=0.05, method="hw", transform="log"):
"""
Returns a simultaneous confidence band for the survival function.
Parameters
----------
alpha : float
`1 - alpha` is the desired simultaneous coverage
probability for the confidence region. Currently alpha
must be set to 0.05, giving 95% simultaneous intervals.
method : string
The method used to produce the simultaneous confidence
band. Only the Hall-Wellner (hw) method is currently
implemented.
transform : string
The used to produce the interval (note that the returned
interval is on the survival probability scale regardless
of which transform is used). Only `log` and `arcsin` are
implemented.
Returns
-------
lcb : array-like
The lower confidence limits corresponding to the points
in `surv_times`.
ucb : array-like
The upper confidence limits corresponding to the points
in `surv_times`.
"""
method = method.lower()
if method != "hw":
msg = "only the Hall-Wellner (hw) method is implemented"
raise ValueError(msg)
if alpha != 0.05:
raise ValueError("alpha must be set to 0.05")
transform = transform.lower()
s2 = self.surv_prob_se**2 / self.surv_prob**2
nn = self.n_risk
if transform == "log":
denom = np.sqrt(nn) * np.log(self.surv_prob)
theta = 1.3581 * (1 + nn * s2) / denom
theta = np.exp(theta)
lcb = self.surv_prob**(1/theta)
ucb = self.surv_prob**theta
elif transform == "arcsin":
k = 1.3581
k *= (1 + nn * s2) / (2 * np.sqrt(nn))
k *= np.sqrt(self.surv_prob / (1 - self.surv_prob))
f = np.arcsin(np.sqrt(self.surv_prob))
v = np.clip(f - k, 0, np.inf)
lcb = np.sin(v)**2
v = np.clip(f + k, -np.inf, np.pi/2)
ucb = np.sin(v)**2
else:
raise ValueError("Unknown transform")
return lcb, ucb
def survdiff(time, status, group, weight_type=None, strata=None, **kwargs):
"""
Test for the equality of two survival distributions.
Parameters:
-----------
time : array-like
The event or censoring times.
status : array-like
The censoring status variable, status=1 indicates that the
event occured, status=0 indicates that the observation was
censored.
group : array-like
Indicators of the two groups
weight_type : string
The following weight types are implemented:
None (default) : logrank test
fh : Fleming-Harrington, weights by S^(fh_p),
requires exponent fh_p to be provided as keyword
argument; the weights are derived from S defined at
the previous event time, and the first weight is
always 1.
gb : Gehan-Breslow, weights by the number at risk
tw : Tarone-Ware, weights by the square root of the number
at risk
strata : array-like
Optional stratum indicators for a stratified test
Returns
--------
chisq : The chi-square (1 degree of freedom) distributed test
statistic value
pvalue : The p-value for the chi^2 test
"""
# TODO: extend to handle more than two groups
time = np.asarray(time)
status = np.asarray(status)
group = np.asarray(group)
gr = np.unique(group)
if len(gr) != 2:
raise ValueError("logrank only supports two groups")
if strata is None:
obs, var = _survdiff(time, status, group, weight_type, gr,
**kwargs)
else:
strata = np.asarray(strata)
stu = np.unique(strata)
obs, var = 0., 0.
for st in stu:
# could be more efficient?
ii = (strata == st)
obs1, var1 = _survdiff(time[ii], status[ii], group[ii],
weight_type, gr, **kwargs)
obs += obs1
var += var1
zstat = obs / np.sqrt(var)
# The chi^2 test statistic and p-value.
chisq = zstat**2
pvalue = 1 - chi2.cdf(chisq, 1)
return chisq, pvalue
def _survdiff(time, status, group, weight_type, gr, **kwargs):
# logrank test for one stratum
ii = (group == gr[0])
time1 = time[ii]
status1 = status[ii]
ii = (group == gr[1])
time2 = time[ii]
status2 = status[ii]
# Get the unique times.
utimes = np.unique(time)
status1 = status1.astype(np.bool)
status2 = status2.astype(np.bool)
# The positions of the observed event times in each group, in the
# overall list of unique times.
ix1 = np.searchsorted(utimes, time1[status1])
ix2 = np.searchsorted(utimes, time2[status2])
# Number of events observed at each time point, per group and
# overall.
obs1 = np.bincount(ix1, minlength=len(utimes))
obs2 = np.bincount(ix2, minlength=len(utimes))
obs = obs1 + obs2
# Risk set size at each time point, per group and overall.
nvec = []
for time0 in time1, time2:
ix = np.searchsorted(utimes, time0)
n = np.bincount(ix, minlength=len(utimes))
n = np.cumsum(n)
n = np.roll(n, 1)
n[0] = 0
n = len(time0) - n
nvec.append(n)
n1, n2 = tuple(nvec)
n = n1 + n2
# The variance of event counts in the first group.
r = n1 / n.astype(np.float64)
var = obs * r * (1 - r) * (n - obs) / (n - 1)
# The expected number of events in the first group.
exp1 = obs * r
weights = None
if weight_type is not None:
weight_type = weight_type.lower()
if weight_type == "gb":
weights = n
elif weight_type == "tw":
weights = np.sqrt(n)
elif weight_type == "fh":
if "fh_p" not in kwargs:
msg = "weight_type type 'fh' requires specification of fh_p"
raise ValueError(msg)
fh_p = kwargs["fh_p"]
# Calculate the survivor function directly to avoid the
# overhead of creating a SurvfuncRight object
sp = 1 - obs / n.astype(np.float64)
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
weights = sp**fh_p
weights = np.roll(weights, 1)
weights[0] = 1
else:
raise ValueError("weight_type not implemented")
# The Z-scale test statistic (compare to normal reference
# distribution).
ix = np.flatnonzero(n > 1)
if weights is None:
obs = np.sum(obs1[ix] - exp1[ix])
var = np.sum(var[ix])
else:
obs = np.dot(weights[ix], obs1[ix] - exp1[ix])
var = np.dot(weights[ix]**2, var[ix])
return obs, var
def plot_survfunc(survfuncs, ax=None):
"""
Plot one or more survivor functions.
Parameters
----------
survfuncs : object or array-like
A single SurvfuncRight object, or a list or SurvfuncRight
objects that are plotted together.
Returns
-------
A figure instance on which the plot was drawn.
Examples
--------
Add a legend:
>>> import statsmodels.api as sm
>>> from statsmodels.duration.survfunc import plot_survfunc
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf0 = sm.SurvfuncRight(df["futime"], df["death"])
>>> sf1 = sm.SurvfuncRight(3.0 * df["futime"], df["death"])
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> leg = fig.legend((ha[0], ha[1]), (lb[0], lb[1]), 'center right')
Change the line colors:
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> ha[0].set_color('purple')
>>> ha[1].set_color('orange')
"""
fig, ax = utils.create_mpl_ax(ax)
# If we have only a single survival function to plot, put it into
# a list.
try:
assert(type(survfuncs[0]) is SurvfuncRight)
except:
survfuncs = [survfuncs]
for gx, sf in enumerate(survfuncs):
# The estimated survival function does not include a point at
# time 0, include it here for plotting.
surv_times = np.concatenate(([0], sf.surv_times))
surv_prob = np.concatenate(([1], sf.surv_prob))
# If the final times are censoring times they are not included
# in the survival function so we add them here
mxt = max(sf.time)
if mxt > surv_times[-1]:
surv_times = np.concatenate((surv_times, [mxt]))
surv_prob = np.concatenate((surv_prob, [surv_prob[-1]]))
label = getattr(sf, "title", "Group %d" % (gx + 1))
li, = ax.step(surv_times, surv_prob, '-', label=label, lw=2,
where='post')
# Plot the censored points.
ii = np.flatnonzero(np.logical_not(sf.status))
ti = sf.time[ii]
jj = np.searchsorted(surv_times, ti) - 1
sp = surv_prob[jj]
ax.plot(ti, sp, '+', ms=12, color=li.get_color(),
label=label + " points")
ax.set_ylim(0, 1.01)
return fig
| {
"content_hash": "b4ba3a5303fa153f58914aa649c6d7f9",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 120,
"avg_line_length": 31.227340267459137,
"alnum_prop": 0.5614293871336125,
"repo_name": "yl565/statsmodels",
"id": "abe0a84425292fedd864afd3040deb771a8391f6",
"size": "21016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsmodels/duration/survfunc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "9670281"
},
{
"name": "R",
"bytes": "55204"
},
{
"name": "Stata",
"bytes": "54989"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import hashes
@utils.register_interface(hashes.HashContext)
class _HashContext(object):
def __init__(self, backend, algorithm, ctx=None):
self._algorithm = algorithm
self._backend = backend
if ctx is None:
ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new()
ctx = self._backend._ffi.gc(
ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free
)
evp_md = self._backend._lib.EVP_get_digestbyname(
algorithm.name.encode("ascii"))
if evp_md == self._backend._ffi.NULL:
raise UnsupportedAlgorithm(
"{0} is not a supported hash on this backend.".format(
algorithm.name),
_Reasons.UNSUPPORTED_HASH
)
res = self._backend._lib.EVP_DigestInit_ex(ctx, evp_md,
self._backend._ffi.NULL)
self._backend.openssl_assert(res != 0)
self._ctx = ctx
algorithm = utils.read_only_property("_algorithm")
def copy(self):
copied_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new()
copied_ctx = self._backend._ffi.gc(
copied_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free
)
res = self._backend._lib.EVP_MD_CTX_copy_ex(copied_ctx, self._ctx)
self._backend.openssl_assert(res != 0)
return _HashContext(self._backend, self.algorithm, ctx=copied_ctx)
def update(self, data):
res = self._backend._lib.EVP_DigestUpdate(self._ctx, data, len(data))
self._backend.openssl_assert(res != 0)
def finalize(self):
buf = self._backend._ffi.new("unsigned char[]",
self._backend._lib.EVP_MAX_MD_SIZE)
outlen = self._backend._ffi.new("unsigned int *")
res = self._backend._lib.EVP_DigestFinal_ex(self._ctx, buf, outlen)
self._backend.openssl_assert(res != 0)
self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size)
return self._backend._ffi.buffer(buf)[:outlen[0]]
| {
"content_hash": "4f7c9548ac9ab28f8cee9f059d56e434",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 40.92982456140351,
"alnum_prop": 0.5885126446635234,
"repo_name": "zlsun/XX-Net",
"id": "2c8fce1a43c5442c895303d7e060b83c0a5aa515",
"size": "2514",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "code/default/python27/1.0/lib/darwin/cryptography/hazmat/backends/openssl/hashes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3884"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "86883"
},
{
"name": "HTML",
"bytes": "188948"
},
{
"name": "JavaScript",
"bytes": "6274"
},
{
"name": "Python",
"bytes": "15347559"
},
{
"name": "Shell",
"bytes": "7812"
},
{
"name": "Visual Basic",
"bytes": "1700"
}
],
"symlink_target": ""
} |
"""
Tests of neo.io.asciispiketrainio
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division
import unittest
from neo.io import AsciiSpikeTrainIO
from neo.test.iotest.common_io_test import BaseTestIO
class TestAsciiSpikeTrainIO(BaseTestIO, unittest.TestCase, ):
ioclass = AsciiSpikeTrainIO
files_to_download = ['File_ascii_spiketrain_1.txt']
files_to_test = files_to_download
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "8557b558f109b89019a7a7f1b86ef4b5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7331932773109243,
"repo_name": "rgerkin/python-neo",
"id": "b796ca607e1d88da80e94b3e37010c6c54f1ec54",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/test/iotest/test_asciispiketrainio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2486594"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Culture.abvdcode'
db.delete_column('cultures', 'abvdcode')
def backwards(self, orm):
# Adding field 'Culture.abvdcode'
db.add_column('cultures', 'abvdcode',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.culture': {
'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'core.language': {
'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'core.section': {
'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.source': {
'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'year': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core'] | {
"content_hash": "d8f63e7c1e82631c292170816dae67fd",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 195,
"avg_line_length": 73.86138613861387,
"alnum_prop": 0.5459785522788204,
"repo_name": "shh-dlce/pulotu",
"id": "b428739fa08df42392b920904cafbf6b6b6895bf",
"size": "7484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/apps/core/migrations/0013_auto__del_field_culture_abvdcode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56056"
},
{
"name": "HTML",
"bytes": "87074"
},
{
"name": "JavaScript",
"bytes": "348481"
},
{
"name": "Python",
"bytes": "1438334"
}
],
"symlink_target": ""
} |
import code
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: [email protected]
##
'''
Implement the logic for obtaining compute nodes information
Resource Availability Descriptor
'''
__author__="Pablo Montes"
#TODO: remove warnings, remove unused things
from definitionsClass import definitionsClass
from auxiliary_functions import get_ssh_connection
import libvirt
from xml.etree import ElementTree
import paramiko
import re
import yaml
def getCredentials(creds, data):
"""Used as a backup for libvirt.openAuth in order to provide password that came with data,
not used by the moment
"""
print "RADclass:getCredentials", creds, data
for cred in creds:
print cred[1] + ": ",
if cred[0] == libvirt.VIR_CRED_AUTHNAME:
cred[4] = data
elif cred[0] == libvirt.VIR_CRED_PASSPHRASE:
cred[4] = data
else:
return -1
return 0
class RADclass():
def __init__(self):
self.name = None
self.machine = None
self.user = None
self.password = None
self.nodes = dict() #Dictionary of nodes. Keys are the node id, values are Node() elements
self.nr_processors = None #Integer. Number of processors in the system
self.processor_family = None #If all nodes have the same value equal them, otherwise keep as None
self.processor_manufacturer = None #If all nodes have the same value equal them, otherwise keep as None
self.processor_version = None #If all nodes have the same value equal them, otherwise keep as None
self.processor_features = None #If all nodes have the same value equal them, otherwise keep as None
self.memory_type = None #If all nodes have the same value equal them, otherwise keep as None
self.memory_freq = None #If all nodes have the same value equal them, otherwise keep as None
self.memory_nr_channels = None #If all nodes have the same value equal them, otherwise keep as None
self.memory_size = None #Integer. Sum of the memory in all nodes
self.memory_hugepage_sz = None
self.hypervisor = Hypervisor() #Hypervisor information
self.os = OpSys() #Operating system information
self.ports_list = list() #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system
def obtain_RAD(self, user, password, machine):
"""This function obtains the RAD information from the remote server.
It uses both a ssh and a libvirt connection.
It is desirable in future versions get rid of the ssh connection, but currently
libvirt does not provide all the needed information.
Returns (True, Warning) in case of success and (False, <error>) in case of error"""
warning_text=""
try:
#Get virsh and ssh connection
(return_status, code) = get_ssh_connection(machine, user, password)
if not return_status:
print 'RADclass.obtain_RAD() error:', code
return (return_status, code)
ssh_conn = code
self.connection_IP = machine
#print "libvirt open pre"
virsh_conn=libvirt.open("qemu+ssh://"+user+'@'+machine+"/system")
#virsh_conn=libvirt.openAuth("qemu+ssh://"+user+'@'+machine+"/system",
# [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_USERNAME], getCredentials, password],
# 0)
#print "libvirt open after"
# #Set connection infomation
# (return_status, code) = self.set_connection_info(machine, user, password)
# if not return_status:
# return (return_status, 'Error in '+machine+': '+code)
#Set server name
machine_name = get_hostname(virsh_conn)
(return_status, code) = self.set_name(machine_name)
if not return_status:
return (return_status, 'Error at self.set_name in '+machine+': '+code)
warning_text += code
#Get the server processors information
processors = dict()
(return_status, code) = get_processor_information(ssh_conn, virsh_conn, processors)
if not return_status:
return (return_status, 'Error at get_processor_information in '+machine+': '+code)
warning_text += code
#Get the server memory information
memory_nodes = dict()
(return_status, code) = get_memory_information(ssh_conn, virsh_conn, memory_nodes)
if not return_status:
return (return_status, 'Error at get_memory_information in '+machine+': '+code)
warning_text += code
#Get nics information
nic_topology = dict()
# (return_status, code) = get_nic_information_old(ssh_conn, nic_topology)
(return_status, code) = get_nic_information(ssh_conn, virsh_conn, nic_topology)
if not return_status:
return (return_status, 'Error at get_nic_informationin '+machine+': '+code)
warning_text += code
#Pack each processor, memory node and nics in a node element
#and add the node to the RAD element
for socket_id, processor in processors.iteritems():
node = Node()
if not socket_id in nic_topology:
nic_topology[socket_id] = list()
(return_status, code) = node.set(processor, memory_nodes[socket_id], nic_topology[socket_id])
# else:
# (return_status, code) = node.set(processor, memory_nodes[socket_id])
if not return_status:
return (return_status, 'Error at node.set in '+machine+': '+code)
warning_text += code
(return_status, code) = self.insert_node(node)
if not return_status:
return (return_status, 'Error at self.insert_node in '+machine+': '+code)
if code not in warning_text:
warning_text += code
#Fill os data
os = OpSys()
(return_status, code) = get_os_information(ssh_conn, os)
if not return_status:
return (return_status, 'Error at get_os_information in '+machine+': '+code)
warning_text += code
(return_status, code) = self.set_os(os)
if not return_status:
return (return_status, 'Error at self.set_os in '+machine+': '+code)
warning_text += code
#Fill hypervisor data
hypervisor = Hypervisor()
(return_status, code) = get_hypervisor_information(virsh_conn, hypervisor)
if not return_status:
return (return_status, 'Error at get_hypervisor_information in '+machine+': '+code)
warning_text += code
(return_status, code) = self.set_hypervisor(hypervisor)
if not return_status:
return (return_status, 'Error at self.set_hypervisor in '+machine+': '+code)
warning_text += code
ssh_conn.close()
return (True, warning_text)
except libvirt.libvirtError, e:
text = e.get_error_message()
print 'RADclass.obtain_RAD() exception:', text
return (False, text)
except paramiko.ssh_exception.SSHException, e:
text = e.args[0]
print "obtain_RAD ssh Exception:", text
return False, text
def set_name(self,name):
"""Sets the machine name.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(name,str):
return (False, 'The variable \'name\' must be text')
self.name = name
return (True, "")
def set_connection_info(self, machine, user, password):
"""Sets the connection information.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(machine,str):
return (False, 'The variable \'machine\' must be text')
if not isinstance(user,str):
return (False, 'The variable \'user\' must be text')
# if not isinstance(password,str):
# return (False, 'The variable \'password\' must be text')
(self.machine, self.user, self.password) = (machine, user, password)
return (True, "")
def insert_node(self,node):
"""Inserts a new node and updates class variables.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(node,Node):
return (False, 'The variable \'node\' must be a Node element')
if node.id_ in self.nodes:
return (False, 'The node is already present in the nodes list.')
#Check if network ports have not been inserted previously as part of another node
for port_key in node.ports_list:
if port_key in self.ports_list:
return (False, 'Network port '+port_key+' defined multiple times in the system')
self.ports_list.append(port_key)
#Insert the new node
self.nodes[node.id_] = node
#update variables
self.update_variables()
return (True, "")
def update_variables(self):
"""Updates class variables.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
warning_text=""
#The number of processors and nodes is the same
self.nr_processors = len(self.nodes)
#If all processors are the same get the values. Otherwise keep them as none
prev_processor_family = prev_processor_manufacturer = prev_processor_version = prev_processor_features = None
different_processor_family = different_processor_manufacturer = different_processor_version = different_processor_features = False
for node in self.nodes.itervalues():
(self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features) = node.get_processor_info()
if prev_processor_family != None and self.processor_family != prev_processor_family:
different_processor_family = True
if prev_processor_manufacturer != None and self.processor_manufacturer != prev_processor_manufacturer:
different_processor_manufacturer = True
if prev_processor_version != None and self.processor_version != prev_processor_version:
different_processor_version = True
if prev_processor_features != None and self.processor_features != prev_processor_features:
different_processor_features = True
(prev_processor_family, prev_processor_manufacturer, prev_processor_version, prev_processor_features) = (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features)
if different_processor_family:
self.processor_family = None
if different_processor_features:
self.processor_features = None
if different_processor_manufacturer:
self.processor_manufacturer = None
if different_processor_version:
self.processor_version = None
#If all memory nodes are the same get the values. Otherwise keep them as none
#Sum the total memory
self.memory_size = 0
different_memory_freq = different_memory_nr_channels = different_memory_type = different_memory_hugepage_sz = False
prev_memory_freq = prev_memory_nr_channels = prev_memory_type = prev_memory_hugepage_sz = None
for node in self.nodes.itervalues():
(self.memory_freq, self.memory_nr_channels, self.memory_type, memory_size, self.memory_hugepage_sz) = node.get_memory_info()
self.memory_size += memory_size
if prev_memory_freq != None and self.memory_freq != prev_memory_freq:
different_memory_freq = True
if prev_memory_nr_channels != None and self.memory_nr_channels != prev_memory_nr_channels:
different_memory_nr_channels = True
if prev_memory_type != None and self.memory_type != prev_memory_type:
different_memory_type = True
if prev_memory_hugepage_sz != None and self.memory_hugepage_sz != prev_memory_hugepage_sz:
different_memory_hugepage_sz = True
(prev_memory_freq, prev_memory_nr_channels, prev_memory_type, prev_memory_hugepage_sz) = (self.memory_freq, self.memory_nr_channels, self.memory_type, self.memory_hugepage_sz)
if different_memory_freq:
self.memory_freq = None
if different_memory_nr_channels:
self.memory_nr_channels = None
if different_memory_type:
self.memory_type = None
if different_memory_hugepage_sz:
warning_text += 'Detected different hugepages size in different sockets\n'
return (True, warning_text)
def set_hypervisor(self,hypervisor):
"""Sets the hypervisor.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(hypervisor,Hypervisor):
return (False, 'The variable \'hypervisor\' must be of class Hypervisor')
self.hypervisor.assign(hypervisor)
return (True, "")
def set_os(self,os):
"""Sets the operating system.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(os,OpSys):
return (False, 'The variable \'os\' must be of class OpSys')
self.os.assign(os)
return (True, "")
def to_text(self):
text= 'name: '+self.name+'\n'
text+= 'processor:\n'
text+= ' nr_processors: '+str(self.nr_processors)+'\n'
text+= ' family: '+self.processor_family+'\n'
text+= ' manufacturer: '+self.processor_manufacturer+'\n'
text+= ' version: '+self.processor_version+'\n'
text+= ' features: '+str(self.processor_features)+'\n'
text+= 'memory:\n'
text+= ' type: '+self.memory_type+'\n'
text+= ' freq: '+str(self.memory_freq)+'\n'
text+= ' nr_channels: '+str(self.memory_nr_channels)+'\n'
text+= ' size: '+str(self.memory_size)+'\n'
text+= 'hypervisor:\n'
text+= self.hypervisor.to_text()
text+= 'os:\n'
text+= self.os.to_text()
text+= 'resource topology:\n'
text+= ' nr_nodes: '+ str(len(self.nodes))+'\n'
text+= ' nodes:\n'
for node_k, node_v in self.nodes.iteritems():
text+= ' node'+str(node_k)+':\n'
text+= node_v.to_text()
return text
def to_yaml(self):
return yaml.load(self.to_text())
class Node():
def __init__(self):
self.id_ = None #Integer. Node id. Unique in the system
self.processor = ProcessorNode() #Information about the processor in the node
self.memory = MemoryNode() #Information about the memory in the node
self.nic_list = list() #List of Nic() containing information about the nics associated to the node
self.ports_list = list() #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system
def get_processor_info(self):
"""Gets the processor information. Returns (processor_family, processor_manufacturer, processor_version, processor_features)"""
return self.processor.get_info()
def get_memory_info(self):
"""Gets the memory information. Returns (memory_freq, memory_nr_channels, memory_type, memory_size)"""
return self.memory.get_info()
# def set(self, *args):
# """Sets the node information. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
# if len(args)==2:
# processor = args[0]
# memory = args[1]
# nics = False
# elif len(args)==3:
# processor = args[0]
# memory = args[1]
# nic_list = args[2]
# nics = True
# else:
# return (False, 'Wrong number of elements calling Node().set()')
def set(self, processor, memory, nic_list):
(status, return_code) = self.processor.assign(processor)
if not status:
return (status, return_code)
self.id_ = processor.id_
(status, return_code) = self.memory.assign(memory)
if not status:
return (status, return_code)
# if nics:
for nic in nic_list:
if not isinstance(nic,Nic):
return (False, 'The nics must be of type Nic')
self.nic_list.append(nic)
for port_key in nic.ports.iterkeys():
if port_key in self.ports_list:
return (False, 'Network port '+port_key+'defined multiple times in the same node')
self.ports_list.append(port_key)
return (True,"")
def assign(self, node):
"""Sets the node information.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
warning_text=""
processor = node.processor
memory = node.memory
nic_list = node.nic_list
(status, return_code) = self.processor.assign(processor)
if not status:
return (status, return_code)
self.id_ = processor.id_
(status, return_code) = self.memory.assign(memory)
if not status:
return (status, return_code)
warning_text += code
for nic in nic_list:
if not isinstance(nic,Nic):
return (False, 'The nics must be of type Nic')
self.nic_list.append(nic)
for port_key in nic.ports.iterkeys():
if port_key in self.ports_list:
return (False, 'Network port '+port_key+'defined multiple times in the same node')
self.ports_list.append(port_key)
return (True,warning_text)
def to_text(self):
text= ' id: '+str(self.id_)+'\n'
text+= ' cpu:\n'
text += self.processor.to_text()
text+= ' memory:\n'
text += self.memory.to_text()
if len(self.nic_list) > 0:
text+= ' nics:\n'
nic_index = 0
for nic in self.nic_list:
text+= ' nic '+str(nic_index)+':\n'
text += nic.to_text()
nic_index += 1
return text
class ProcessorNode():
#Definition of the possible values of processor variables
possible_features = definitionsClass.processor_possible_features
possible_manufacturers = definitionsClass.processor_possible_manufacturers
possible_families = definitionsClass.processor_possible_families
possible_versions = definitionsClass.processor_possible_versions
def __init__(self):
self.id_ = None #Integer. Numeric identifier of the socket
self.family = None #Text. Family name of the processor
self.manufacturer = None #Text. Manufacturer of the processor
self.version = None #Text. Model version of the processor
self.features = list() #list. List of features offered by the processor
self.cores = list() #list. List of cores in the processor. In case of hyperthreading the coupled cores are expressed as [a,b]
self.eligible_cores = list()#list. List of cores that can be used
#self.decicated_cores
#self.shared_cores -> this should also contain information to know if cores are being used
def assign(self, processor):
"""Sets the processor information.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(processor,ProcessorNode):
return (False, 'The variable \'processor\' must be of class ProcessorNode')
self.id_ = processor.id_
self.family = processor.family
self.manufacturer = processor.manufacturer
self.version = processor.version
self.features = processor.features
self.cores = processor.cores
self.eligible_cores = processor.eligible_cores
return (True, "")
def set(self, id_, family, manufacturer, version, features, cores):
"""Sets the processor information.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
warning_text = ""
if not isinstance(id_,int):
return (False, 'The processor id_ must be of type int')
if not isinstance(family,str):
return (False, 'The processor family must be of type str')
if not isinstance(manufacturer,str):
return (False, 'The processor manufacturer must be of type str')
if not isinstance(version,str):
return (False, 'The processor version must be of type str')
if not isinstance(features,list):
return (False, 'The processor features must be of type list')
if not isinstance(cores,list):
return (False, 'The processor cores must be of type list')
(self.id_, self.family, self.manufacturer, self.version) = (id_, family, manufacturer, version)
if not manufacturer in self.possible_manufacturers:
warning_text += "processor manufacturer '%s' not among: %s\n" %(manufacturer, str(self.possible_manufacturers))
if not family in self.possible_families:
warning_text += "family '%s' not among: %s\n" % (family, str(self.possible_families))
# if not version in self.possible_versions:
# warning_text += 'The version %s is not one of these: %s\n' % (version, str(self.possible_versions))
for feature in features:
if not feature in self.possible_features:
warning_text += "processor feature '%s' not among: %s\n" % (feature, str(self.possible_versions))
self.features.append(feature)
#If hyperthreading is active cores must be coupled in the form of [[a,b],[c,d],...]
if 'ht' in self.features:
for iterator in sorted(cores):
if not isinstance(iterator,list) or len(iterator) != 2 or not isinstance(iterator[0],int) or not isinstance(iterator[1],int):
return (False, 'The cores list for an hyperthreaded processor must be coupled in the form of [[a,b],[c,d],...] where a,b,c,d are of type int')
self.cores.append(iterator)
#If hyperthreading is not active the cores are a single list in the form of [a,b,c,d,...]
else:
for iterator in sorted(cores):
if not isinstance(iterator,int):
return (False, 'The cores list for a non hyperthreaded processor must be in the form of [a,b,c,d,...] where a,b,c,d are of type int')
self.cores.append(iterator)
self.set_eligible_cores()
return (True,warning_text)
def set_eligible_cores(self):
"""Set the default eligible cores, this is all cores non used by the host operating system"""
not_first = False
for iterator in self.cores:
if not_first:
self.eligible_cores.append(iterator)
else:
not_first = True
return
def get_info(self):
"""Returns processor parameters (self.family, self.manufacturer, self.version, self.features)"""
return (self.family, self.manufacturer, self.version, self.features)
def to_text(self):
text= ' id: '+str(self.id_)+'\n'
text+= ' family: '+self.family+'\n'
text+= ' manufacturer: '+self.manufacturer+'\n'
text+= ' version: '+self.version+'\n'
text+= ' features: '+str(self.features)+'\n'
text+= ' cores: '+str(self.cores)+'\n'
text+= ' eligible_cores: '+str(self.eligible_cores)+'\n'
return text
class MemoryNode():
def __init__(self):
self.modules = list() #List of MemoryModule(). List of all modules installed in the node
self.nr_channels = None #Integer. Number of modules installed in the node
self.node_size = None #Integer. Total size in KiB of memory installed in the node
self.eligible_memory = None #Integer. Size in KiB of eligible memory in the node
self.hugepage_sz = None #Integer. Size in KiB of hugepages
self.hugepage_nr = None #Integer. Number of hugepages allocated in the module
self.eligible_hugepage_nr = None #Integer. Number of eligible hugepages in the node
self.type_ = None #Text. Type of memory modules. If modules have a different value keep it as None
self.freq = None #Integer. Frequency of the modules in MHz. If modules have a different value keep it as None
self.module_size = None #Integer. Size of the modules in KiB. If modules have a different value keep it as None
self.form_factor = None #Text. Form factor of the modules. If modules have a different value keep it as None
def assign(self, memory_node):
return self.set(memory_node.modules, memory_node.hugepage_sz, memory_node.hugepage_nr)
def set(self, modules, hugepage_sz, hugepage_nr):
"""Set the memory node information. hugepage_sz must be expressed in KiB.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(modules, list):
return (False, 'The modules must be a list of elements of class MemoryModule')
if not isinstance(hugepage_sz,int):
return (False, 'The hugepage_sz variable must be an int expressing the size in KiB')
if not isinstance(hugepage_nr,int):
return (False, 'The hugepage_nr variable must be of type int')
(self.hugepage_sz, self.hugepage_nr) = (hugepage_sz, hugepage_nr)
self.node_size = self.nr_channels = 0
different_type = different_freq = different_module_size = different_form_factor = False
prev_type = prev_freq = prev_module_size = prev_form_factor = None
for iterator in modules:
if not isinstance(iterator,MemoryModule):
return (False, 'The modules must be a list of elements of class MemoryModule')
self.modules.append(iterator)
(self.type_, self.freq, self.module_size, self.form_factor) = (iterator.type_, iterator.freq, iterator.size, iterator.form_factor)
self.node_size += self.module_size
self.nr_channels += 1
if prev_type != None and prev_type != self.type_:
different_type = True
if prev_freq != None and prev_freq != self.freq:
different_freq = True
if prev_module_size != None and prev_module_size != self.module_size:
different_module_size = True
if prev_form_factor != None and prev_form_factor != self.form_factor:
different_form_factor = True
(prev_type, prev_freq, prev_module_size, prev_form_factor) = (self.type_, self.freq, self.module_size, self.form_factor)
if different_type:
self.type_ = None
if different_freq:
self.freq = None
if different_module_size:
self.module_size = None
if different_form_factor:
self.form_factor = None
(return_value, error_code) = self.set_eligible_memory()
if not return_value:
return (return_value, error_code)
return (True, "")
def set_eligible_memory(self):
"""Sets the default eligible_memory and eligible_hugepage_nr. This is all memory but 2GiB and all hugepages"""
self.eligible_memory = self.node_size - 2*1024*1024
if self.eligible_memory < 0:
return (False, "There is less than 2GiB of memory in the module")
self.eligible_hugepage_nr = self.hugepage_nr
return (True,"")
def get_info(self):
"""Return memory information (self.freq, self.nr_channels, self.type_, self.node_size)"""
return (self.freq, self.nr_channels, self.type_, self.node_size, self.hugepage_sz)
def to_text(self):
text= ' node_size: '+str(self.node_size)+'\n'
text+= ' nr_channels: '+str(self.nr_channels)+'\n'
text+= ' eligible_memory: '+str(self.eligible_memory)+'\n'
text+= ' hugepage_sz: '+str(self.hugepage_sz)+'\n'
text+= ' hugepage_nr: '+str(self.hugepage_nr)+'\n'
text+= ' eligible_hugepage_nr: '+str(self.eligible_hugepage_nr)+'\n'
text+= ' type: '+self.type_+'\n'
text+= ' freq: '+str(self.freq)+'\n'
text+= ' module_size: '+str(self.module_size)+'\n'
text+= ' form_factor: '+self.form_factor+'\n'
text+= ' modules details:\n'
for module in self.modules:
text += module.to_text()
return text
class MemoryModule():
#Definition of the possible values of module variables
possible_types = definitionsClass.memory_possible_types
possible_form_factors = definitionsClass.memory_possible_form_factors
def __init__(self):
self.locator = None #Text. Name of the memory module
self.type_ = None #Text. Type of memory module
self.freq = None #Integer. Frequency of the module in MHz
self.size = None #Integer. Size of the module in KiB
self.form_factor = None #Text. Form factor of the module
def set(self, locator, type_, freq, size, form_factor):
"""Sets the memory module information.
Frequency must be expressed in MHz and size in KiB.
Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
warning_text=""
if not isinstance(locator, str):
return (False, "The type of the variable locator must be str")
if not isinstance(type_, str):
return (False, "The type of the variable type_ must be str")
if not isinstance(form_factor, str):
return (False, "The type of the variable form_factor must be str")
if not isinstance(freq, int):
return (False, "The type of the variable freq must be int")
if not isinstance(size, int):
return (False, "The type of the variable size must be int")
if not form_factor in self.possible_form_factors:
warning_text += "memory form_factor '%s' not among: %s\n" %(form_factor, str(self.possible_form_factors))
if not type_ in self.possible_types:
warning_text += "memory type '%s' not among: %s\n" %(type_, str(self.possible_types))
(self.locator, self.type_, self.freq, self.size, self.form_factor) = (locator, type_, freq, size, form_factor)
return (True, warning_text)
def to_text(self):
text= ' '+self.locator+':\n'
text+= ' type: '+self.type_+'\n'
text+= ' freq: '+str(self.freq)+'\n'
text+= ' size: '+str(self.size)+'\n'
text+= ' form factor: '+self.form_factor+'\n'
return text
class Nic():
def __init__(self):
self.model = None #Text. Model of the nic
self.ports = dict() #Dictionary of ports. Keys are the port name, value are Port() elements
def set_model(self, model):
"""Sets the model of the nic. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(model,str):
return (False, 'The \'model\' must be of type str')
self.model = model
return (True, "")
def add_port(self, port):
"""Adds a port to the nic. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
if not isinstance(port,Port):
return (False, 'The \'port\' must be of class Port')
# port_id = str(port.pci_device_id[0])+':'+str(port.pci_device_id[1])+':'+str(port.pci_device_id[2])+'.'+str(port.pci_device_id[3])
#CHANGED
# port_id = port.name
port_id = port.pci_device_id
#CHANGED END
if port_id in self.ports:
return (False, 'The \'port\' '+port.pci_device_id+' is duplicated in the nic')
# return (False, 'The \'port\' is duplicated in the nic')
self.ports[port_id] = port
return (True, "")
def to_text(self):
text= ' model: '+ str(self.model)+'\n'
text+= ' ports: '+'\n'
for key,port in self.ports.iteritems():
text+= ' "'+key+'":'+'\n'
text += port.to_text()
return text
class Port():
def __init__(self):
self.name = None #Text. Port name
self.virtual = None #Boolean. States if the port is a virtual function
self.enabled = None #Boolean. States if the port is enabled
self.eligible = None #Boolean. States if the port is eligible
self.speed = None #Integer. Indicates the speed in Mbps
self.available_bw = None #Integer. BW in Mbps that is available.
self.mac = None #list. Indicates the mac address of the port as a list in format ['XX','XX','XX','XX','XX','XX']
self.pci_device_id_split = None #list. Indicates the pci address of the port as a list in format ['XXXX','XX','XX','X']
self.pci_device_id = None
self.PF_pci_device_id = None
# def set(self, name, virtual, enabled, speed, mac, pci_device_id, pci_device_id_split):
# """Sets the port information. The variable speed indicates the speed in Mbps. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
# if not isinstance(name,str):
# return (False, 'The variable \'name\' must be of type str')
# if not isinstance(virtual,bool):
# return (False, 'The variable \'virtual\' must be of type bool')
# if not isinstance(enabled,bool):
# return (False, 'The variable \'enabled\' must be of type bool')
# if not isinstance(enabled,bool):
# return (speed, 'The variable \'speed\' must be of type int')
# if not isinstance(mac, list) and not isinstance(mac,NoneType):
# return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType')
# if not isinstance(pci_device_id_split, list) or len(pci_device_id_split) != 4:
# return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']')
#
# expected_len = [4,2,2,1]
# index = 0
# for iterator in pci_device_id_split:
# if not isinstance(iterator,str) or not iterator.isdigit() or len(iterator) != expected_len[index]:
# return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']')
# index += 1
#
# if not isinstance(mac,NoneType):
# for iterator in mac:
# if not isinstance(iterator,str) or not iterator.isalnum() or len(iterator) != 2:
# return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType')
#
# #By default only virtual ports are eligible
# # (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.speed, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, speed, mac, pci_device_id, pci_device_id_split)
# (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, mac, pci_device_id, pci_device_id_split)
def to_text(self):
text= ' pci: "'+ str(self.pci_device_id)+'"\n'
text+= ' virtual: '+ str(self.virtual)+'\n'
if self.virtual:
text+= ' PF_pci_id: "'+self.PF_pci_device_id+'"\n'
text+= ' eligible: '+ str(self.eligible)+'\n'
text+= ' enabled: '+str(self.enabled)+'\n'
text+= ' speed: '+ str(self.speed)+'\n'
text+= ' available bw: '+ str(self.available_bw)+'\n'
text+= ' mac: '+ str(self.mac)+'\n'
text+= ' source_name: '+ str(self.name)+'\n'
return text
class Hypervisor():
#Definition of the possible values of hypervisor variables
possible_types = definitionsClass.hypervisor_possible_types
possible_domain_types = definitionsClass.hypervisor_possible_domain_types
def __init__(self):
self.type_ = None #Text. Hypervisor type_
self.version = None #int. Hypervisor version
self.lib_version = None #int. Libvirt version used to compile hypervisor
self.domains = list() #list. List of all the available domains
def set(self, hypervisor, version, lib_version, domains):
warning_text=""
if not isinstance(hypervisor,str):
return (False, 'The variable type_ must be of type str')
if not isinstance(version,int):
return (False, 'The variable version must be of type int')
if not isinstance(lib_version,int):
return (False, 'The library version must be of type int')
if not isinstance(domains,list):
return (False, 'Domains must be a list of the possible domains as str')
if not hypervisor in self.possible_types:
warning_text += "Hyperpivor '%s' not among: %s\n" % (hypervisor, str(self.possible_types))
valid_domain_found = False
for domain in domains:
if not isinstance(domain,str):
return (False, 'Domains must be a list of the possible domains as str')
if domain in self.possible_domain_types:
valid_domain_found = True
self.domains.append(domain)
if not valid_domain_found:
warning_text += 'No valid domain found among: %s\n' % str(self.possible_domain_types)
(self.version, self.lib_version, self.type_) = (version, lib_version, hypervisor)
return (True, warning_text)
def assign(self, hypervisor):
(self.version, self.lib_version, self.type_) = (hypervisor.version, hypervisor.lib_version, hypervisor.type_)
for domain in hypervisor.domains:
self.domains.append(domain)
return
def to_text(self):
text= ' type: '+self.type_+'\n'
text+= ' version: '+str(self.version)+'\n'
text+= ' libvirt version: '+ str(self.lib_version)+'\n'
text+= ' domains: '+str(self.domains)+'\n'
return text
class OpSys():
#Definition of the possible values of os variables
possible_id = definitionsClass.os_possible_id
possible_types = definitionsClass.os_possible_types
possible_architectures = definitionsClass.os_possible_architectures
def __init__(self):
self.id_ = None #Text. Identifier of the OS. Formed by <Distibutor ID>-<Release>-<Codename>. In linux this can be obtained using lsb_release -a
self.type_ = None #Text. Type of operating system
self.bit_architecture = None #Integer. Architecture
def set(self, id_, type_, bit_architecture):
warning_text=""
if not isinstance(type_,str):
return (False, 'The variable type_ must be of type str')
if not isinstance(id_,str):
return (False, 'The variable id_ must be of type str')
if not isinstance(bit_architecture,str):
return (False, 'The variable bit_architecture must be of type str')
if not type_ in self.possible_types:
warning_text += "os type '%s' not among: %s\n" %(type_, str(self.possible_types))
if not id_ in self.possible_id:
warning_text += "os release '%s' not among: %s\n" %(id_, str(self.possible_id))
if not bit_architecture in self.possible_architectures:
warning_text += "os bit_architecture '%s' not among: %s\n" % (bit_architecture, str(self.possible_architectures))
(self.id_, self.type_, self.bit_architecture) = (id_, type_, bit_architecture)
return (True, warning_text)
def assign(self,os):
(self.id_, self.type_, self.bit_architecture) = (os.id_, os.type_, os.bit_architecture)
return
def to_text(self):
text= ' id: '+self.id_+'\n'
text+= ' type: '+self.type_+'\n'
text+= ' bit_architecture: '+self.bit_architecture+'\n'
return text
def get_hostname(virsh_conn):
return virsh_conn.getHostname().rstrip('\n')
def get_hugepage_size(ssh_conn):
command = 'sudo hugeadm --page-sizes'
# command = 'hugeadm --page-sizes-all'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
mem=stdout.read()
if mem=="":
return 0
return int(mem)
def get_hugepage_nr(ssh_conn,hugepage_sz, node_id):
command = 'cat /sys/devices/system/node/node'+str(node_id)+'/hugepages/hugepages-'+str(hugepage_sz/1024)+'kB/nr_hugepages'
(_, stdout, _) = ssh_conn.exec_command(command)
#print command,
#text = stdout.read()
#print "'"+text+"'"
#return int(text)
try:
value=int(stdout.read())
except:
value=0
return value
def get_memory_information(ssh_conn, virsh_conn, memory_nodes):
warning_text=""
tree=ElementTree.fromstring(virsh_conn.getSysinfo(0))
memory_dict = dict()
for target in tree.findall("memory_device"):
locator_f = size_f = freq_f = type_f = formfactor_f = False
module_form_factor = ""
for entry in target.findall("entry"):
if entry.get("name") == 'size':
size_f = True
size_split = entry.text.split(' ')
if size_split[1] == 'MB':
module_size = int(size_split[0]) * 1024 * 1024
elif size_split[1] == 'GB':
module_size = int(size_split[0]) * 1024 * 1024 * 1024
elif size_split[1] == 'KB':
module_size = int(size_split[0]) * 1024
else:
module_size = int(size_split[0])
elif entry.get("name") == 'speed':
freq_f = True
freq_split = entry.text.split(' ')
if freq_split[1] == 'MHz':
module_freq = int(freq_split[0]) * 1024 * 1024
elif freq_split[1] == 'GHz':
module_freq = int(freq_split[0]) * 1024 * 1024 * 1024
elif freq_split[1] == 'KHz':
module_freq = int(freq_split[0]) * 1024
elif entry.get("name") == 'type':
type_f = True
module_type = entry.text
elif entry.get("name") == 'form_factor':
formfactor_f = True
module_form_factor = entry.text
elif entry.get("name") == 'locator' and not locator_f:
# other case, it is obtained by bank_locator that we give priority to
locator = entry.text
pos = locator.find(module_form_factor)
if module_form_factor == locator[0:len(module_form_factor) ]:
pos = len(module_form_factor) +1
else:
pos = 0
if locator[pos] in "ABCDEFGH":
locator_f = True
node_id = ord(locator[pos])-ord('A')
#print entry.text, node_id
elif entry.get("name") == 'bank_locator':
locator = entry.text
pos = locator.find("NODE ")
if pos >= 0 and len(locator)>pos+5:
if locator[pos+5] in ("01234567"): #len("NODE ") is 5
node_id = int(locator[pos+5])
locator_f = True
#When all module fields have been found add a new module to the list
if locator_f and size_f and freq_f and type_f and formfactor_f:
#If the memory node has not yet been created create it
if node_id not in memory_dict:
memory_dict[node_id] = []
#Add a new module to the memory node
module = MemoryModule()
(return_status, code) = module.set(locator, module_type, module_freq, module_size, module_form_factor)
if not return_status:
return (return_status, code)
memory_dict[node_id].append(module)
if code not in warning_text:
warning_text += code
#Fill memory nodes
#Hugepage size is constant for all nodes
hugepage_sz = get_hugepage_size(ssh_conn)
for node_id, modules in memory_dict.iteritems():
memory_node = MemoryNode()
memory_node.set(modules, hugepage_sz, get_hugepage_nr(ssh_conn,hugepage_sz, node_id))
memory_nodes[node_id] = memory_node
return (True, warning_text)
def get_cpu_topology_ht(ssh_conn, topology):
command = 'cat /proc/cpuinfo'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
sockets = []
cores = []
core_map = {}
core_details = []
core_lines = {}
for line in stdout.readlines():
if len(line.strip()) != 0:
name, value = line.split(":", 1)
core_lines[name.strip()] = value.strip()
else:
core_details.append(core_lines)
core_lines = {}
for core in core_details:
for field in ["processor", "core id", "physical id"]:
if field not in core:
return(False,'Error getting '+field+' value from /proc/cpuinfo')
core[field] = int(core[field])
if core["core id"] not in cores:
cores.append(core["core id"])
if core["physical id"] not in sockets:
sockets.append(core["physical id"])
key = (core["physical id"], core["core id"])
if key not in core_map:
core_map[key] = []
core_map[key].append(core["processor"])
for s in sockets:
hyperthreaded_cores = list()
for c in cores:
hyperthreaded_cores.append(core_map[(s,c)])
topology[s] = hyperthreaded_cores
return (True, "")
def get_processor_information(ssh_conn, vish_conn, processors):
warning_text=""
#Processor features are the same for all processors
#TODO (at least using virsh capabilities)nr_numa_nodes
capabilities = list()
tree=ElementTree.fromstring(vish_conn.getCapabilities())
for target in tree.findall("host/cpu/feature"):
if target.get("name") == 'pdpe1gb':
capabilities.append('lps')
elif target.get("name") == 'dca':
capabilities.append('dioc')
elif target.get("name") == 'vmx' or target.get("name") == 'svm':
capabilities.append('hwsv')
elif target.get("name") == 'ht':
capabilities.append('ht')
target = tree.find("host/cpu/arch")
if target.text == 'x86_64' or target.text == 'amd64':
capabilities.append('64b')
command = 'cat /proc/cpuinfo | grep flags'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
line = stdout.readline()
if 'ept' in line or 'npt' in line:
capabilities.append('tlbps')
#Find out if IOMMU is enabled
command = 'dmesg |grep -e Intel-IOMMU'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
if 'enabled' in stdout.read():
capabilities.append('iommu')
#Equivalent for AMD
command = 'dmesg |grep -e AMD-Vi'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
if len(stdout.read()) > 0:
capabilities.append('iommu')
#-----------------------------------------------------------
topology = dict()
#In case hyperthreading is active it is necessary to determine cpu topology using /proc/cpuinfo
if 'ht' in capabilities:
(return_status, code) = get_cpu_topology_ht(ssh_conn, topology)
if not return_status:
return (return_status, code)
warning_text += code
#Otherwise it is possible to do it using virsh capabilities
else:
for target in tree.findall("host/topology/cells/cell"):
socket_id = int(target.get("id"))
topology[socket_id] = list()
for cpu in target.findall("cpus/cpu"):
topology[socket_id].append(int(cpu.get("id")))
#-----------------------------------------------------------
#Create a dictionary with the information of all processors
#p_fam = p_man = p_ver = None
tree=ElementTree.fromstring(vish_conn.getSysinfo(0))
#print vish_conn.getSysinfo(0)
#return (False, 'forces error for debuging')
not_populated=False
socket_id = -1 #in case we can not determine the socket_id we assume incremental order, starting by 0
for target in tree.findall("processor"):
count = 0
socket_id += 1
#Get processor id, family, manufacturer and version
for entry in target.findall("entry"):
if entry.get("name") == "status":
if entry.text[0:11] == "Unpopulated":
not_populated=True
elif entry.get("name") == 'socket_destination':
socket_text = entry.text
if socket_text.startswith('CPU'):
socket_text = socket_text.strip('CPU')
socket_text = socket_text.strip() #removes trailing spaces
if socket_text.isdigit() and int(socket_text)<9 and int(socket_text)>0:
socket_id = int(socket_text) - 1
elif entry.get("name") == 'family':
family = entry.text
count += 1
elif entry.get("name") == 'manufacturer':
manufacturer = entry.text
count += 1
elif entry.get("name") == 'version':
version = entry.text.strip()
count += 1
if count != 3:
return (False, 'Error. Not all expected fields could be found in processor')
#Create and fill processor structure
if not_populated:
continue #avoid inconsistence of some machines where more socket detected than
processor = ProcessorNode()
(return_status, code) = processor.set(socket_id, family, manufacturer, version, capabilities, topology[socket_id])
if not return_status:
return (return_status, code)
if code not in warning_text:
warning_text += code
#Add processor to the processors dictionary
processors[socket_id] = processor
return (True, warning_text)
def get_nic_information(ssh_conn, virsh_conn, nic_topology):
warning_text=""
#Get list of net devices
net_devices = virsh_conn.listDevices('net',0)
print virsh_conn.listDevices('net',0)
for device in net_devices:
try:
#Get the XML descriptor of the device:
net_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(device).XMLDesc(0))
#print "net_XML:" , net_XML
#obtain the parent
parent = net_XML.find('parent')
if parent == None:
print 'No parent was found in XML for device '+device
#Error. continue?-------------------------------------------------------------
continue
if parent.text == 'computer':
continue
if not parent.text.startswith('pci_'):
print device + ' parent is neither computer nor pci'
#Error. continue?-------------------------------------------------------------
continue
interface = net_XML.find('capability/interface').text
mac = net_XML.find('capability/address').text
#Get the pci XML
pci_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(parent.text).XMLDesc(0))
#print pci_XML
#Get pci
name = pci_XML.find('name').text.split('_')
pci = name[1]+':'+name[2]+':'+name[3]+'.'+name[4]
#If slot == 0 it is a PF, otherwise it is a VF
capability = pci_XML.find('capability')
if capability.get('type') != 'pci':
print device + 'Capability is not of type pci in '+parent.text
#Error. continue?-------------------------------------------------------------
continue
slot = capability.find('slot').text
bus = capability.find('bus').text
node_id = None
numa_ = capability.find('numa')
if numa_ != None:
node_id = numa_.get('node');
if node_id != None: node_id =int(node_id)
if slot == None or bus == None:
print device + 'Bus and slot not detected in '+parent.text
#Error. continue?-------------------------------------------------------------
continue
if slot != '0':
# print ElementTree.tostring(pci_XML)
virtual = True
capability_pf = capability.find('capability')
if capability_pf.get('type') != 'phys_function':
print 'physical_function not found in VF '+parent.text
#Error. continue?-------------------------------------------------------------
continue
PF_pci = capability_pf.find('address').attrib
PF_pci_text = PF_pci['domain'].split('x')[1]+':'+PF_pci['bus'].split('x')[1]+':'+PF_pci['slot'].split('x')[1]+'.'+PF_pci['function'].split('x')[1]
else:
virtual = False
#Obtain node for the port
if node_id == None:
node_id = int(bus)>>6
#print "node_id:", node_id
#Only for non virtual interfaces: Obtain speed and if link is detected (this must be done using ethtool)
if not virtual:
command = 'sudo ethtool '+interface+' | grep -e Speed -e "Link detected"'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error) >0:
print 'Error running '+command+'\n'+error
#Error. continue?-------------------------------------------------------------
continue
for line in stdout.readlines():
line = line.strip().rstrip('\n').split(': ')
if line[0] == 'Speed':
if line[1].endswith('Mb/s'):
speed = int(line[1].split('M')[0])*int(1e6)
elif line[1].endswith('Gb/s'):
speed = int(line[1].split('G')[0])*int(1e9)
elif line[1].endswith('Kb/s'):
speed = int(line[1].split('K')[0])*int(1e3)
else:
#the interface is listed but won't be used
speed = 0
elif line[0] == 'Link detected':
if line[1] == 'yes':
enabled = True
else:
enabled = False
else:
print 'Unnexpected output of command '+command+':'
print line
#Error. continue?-------------------------------------------------------------
continue
if not node_id in nic_topology:
nic_topology[node_id] = list()
#With this implementation we make the RAD with only one nic per node and this nic has all ports, TODO: change this by including parent information of PF
nic_topology[node_id].append(Nic())
#Load the appropriate nic
nic = nic_topology[node_id][0]
#Create a new port and fill it
port = Port()
port.name = interface
port.virtual = virtual
port.speed = speed
if virtual:
port.available_bw = 0
port.PF_pci_device_id = PF_pci_text
else:
port.available_bw = speed
if speed == 0:
port.enabled = False
else:
port.enabled = enabled
port.eligible = virtual #Only virtual ports are eligible
port.mac = mac
port.pci_device_id = pci
port.pci_device_id_split = name[1:]
#Save the port information
nic.add_port(port)
except Exception,e:
print 'Error: '+str(e)
#set in vitual ports if they are enabled
for nic in nic_topology.itervalues():
for port in nic[0].ports.itervalues():
# print port.pci_device_id
if port.virtual:
enabled = nic[0].ports.get(port.PF_pci_device_id)
if enabled == None:
return(False, 'The PF '+port.PF_pci_device_id+' (VF '+port.pci_device_id+') is not present in ports dict')
#Only if the PF is enabled the VF can be enabled
if nic[0].ports[port.PF_pci_device_id].enabled:
port.enabled = True
else:
port.enabled = False
return (True, warning_text)
def get_nic_information_old(ssh_conn, nic_topology):
command = 'lstopo-no-graphics --of xml'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
tree=ElementTree.fromstring(stdout.read())
for target in tree.findall("object/object"):
#Find numa nodes
if target.get("type") != "NUMANode":
continue
node_id = int(target.get("os_index"))
nic_topology[node_id] = list()
#find nics in numa node
for entry in target.findall("object/object"):
if entry.get("type") != 'Bridge':
continue
nic_name = entry.get("name")
model = None
nic = Nic()
#find ports in nic
for pcidev in entry.findall("object"):
if pcidev.get("type") != 'PCIDev':
continue
enabled = speed = mac = pci_busid = None
port = Port()
model = pcidev.get("name")
virtual = False
if 'Virtual' in model:
virtual = True
pci_busid = pcidev.get("pci_busid")
for osdev in pcidev.findall("object"):
name = osdev.get("name")
for info in osdev.findall("info"):
if info.get("name") != 'Address':
continue
mac = info.get("value")
#get the port speed and status
command = 'sudo ethtool '+name
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
return (False, 'Error obtaining '+name+' information: '+error)
ethtool = stdout.read()
if '10000baseT/Full' in ethtool:
speed = 10e9
elif '1000baseT/Full' in ethtool:
speed = 1e9
elif '100baseT/Full' in ethtool:
speed = 100e6
elif '10baseT/Full' in ethtool:
speed = 10e6
else:
return (False, 'Speed not detected in '+name)
enabled = False
if 'Link detected: yes' in ethtool:
enabled = True
if speed != None and mac != None and pci_busid != None:
mac = mac.split(':')
pci_busid_split = re.split(':|\.', pci_busid)
#Fill the port information
port.set(name, virtual, enabled, speed, mac, pci_busid, pci_busid_split)
nic.add_port(port)
if len(nic.ports) > 0:
#Fill the nic model
if model != None:
nic.set_model(model)
else:
nic.set_model(nic_name)
#Add it to the topology
nic_topology[node_id].append(nic)
return (True, "")
def get_os_information(ssh_conn, os):
warning_text=""
# command = 'lsb_release -a'
# (stdin, stdout, stderr) = ssh_conn.exec_command(command)
# cont = 0
# for line in stdout.readlines():
# line_split = re.split('\t| *', line.rstrip('\n'))
# if line_split[0] == 'Distributor' and line_split[1] == 'ID:':
# distributor = line_split[2]
# cont += 1
# elif line_split[0] == 'Release:':
# release = line_split[1]
# cont += 1
# elif line_split[0] == 'Codename:':
# codename = line_split[1]
# cont += 1
# if cont != 3:
# return (False, 'It was not possible to obtain the OS id')
# id_ = distributor+'-'+release+'-'+codename
command = 'cat /etc/redhat-release'
(_, stdout, _) = ssh_conn.exec_command(command)
id_text= stdout.read()
if len(id_text)==0:
#try with Ubuntu
command = 'lsb_release -d -s'
(_, stdout, _) = ssh_conn.exec_command(command)
id_text= stdout.read()
if len(id_text)==0:
raise paramiko.ssh_exception.SSHException("Can not determinte release neither with 'lsb_release' nor with 'cat /etc/redhat-release'")
id_ = id_text.rstrip('\n')
command = 'uname -o'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
type_ = stdout.read().rstrip('\n')
command = 'uname -i'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error)>0:
raise paramiko.ssh_exception.SSHException(command +' : '+ error)
bit_architecture = stdout.read().rstrip('\n')
(return_status, code) = os.set(id_, type_, bit_architecture)
if not return_status:
return (return_status, code)
warning_text += code
return (True, warning_text)
def get_hypervisor_information(virsh_conn, hypervisor):
type_ = virsh_conn.getType().rstrip('\n')
version = virsh_conn.getVersion()
lib_version = virsh_conn.getLibVersion()
domains = list()
tree=ElementTree.fromstring(virsh_conn.getCapabilities())
for target in tree.findall("guest"):
os_type = target.find("os_type").text
#We only allow full virtualization
if os_type != 'hvm':
continue
wordsize = int(target.find('arch/wordsize').text)
if wordsize == 64:
for domain in target.findall("arch/domain"):
domains.append(domain.get("type"))
(return_status, code) = hypervisor.set(type_, version, lib_version, domains)
if not return_status:
return (return_status, code)
return (True, code)
class RADavailableResourcesClass(RADclass):
def __init__(self, resources):
"""Copy resources from the RADclass (server resources not taking into account resources used by VMs"""
#New
self.reserved = dict() #Dictionary of reserved resources for a server. Key are VNFC names and values RADreservedResources
self.cores_consumption = None #Dictionary of cpu consumption. Key is the cpu and the value is
self.machine = resources.machine
self.user = resources.user
self.password = resources.password
self.name = resources.name
self.nr_processors = resources.nr_processors
self.processor_family = resources.processor_family
self.processor_manufacturer = resources.processor_manufacturer
self.processor_version = resources.processor_version
self.processor_features = resources.processor_features
self.memory_type = resources.memory_type
self.memory_freq = resources.memory_freq
self.memory_nr_channels = resources.memory_nr_channels
self.memory_size = resources.memory_size
self.memory_hugepage_sz = resources.memory_hugepage_sz
self.hypervisor = Hypervisor()
self.hypervisor.assign(resources.hypervisor)
self.os = OpSys()
self.os.assign(resources.os)
self.nodes = dict()
for node_k, node_v in resources.nodes.iteritems():
self.nodes[node_k] = Node()
self.nodes[node_k].assign(node_v)
return
def _get_cores_consumption_warnings(self):
"""Returns list of warning strings in case warnings are generated.
In case no warnings are generated the return value will be an empty list"""
warnings = list()
#Get the cores consumption
(return_status, code) = get_ssh_connection(self.machine, self.user, self.password)
if not return_status:
return (return_status, code)
ssh_conn = code
command = 'mpstat -P ALL 1 1 | grep Average | egrep -v CPU\|all'
(_, stdout, stderr) = ssh_conn.exec_command(command)
error = stderr.read()
if len(error) > 0:
return (False, error)
self.cores_consumption = dict()
for line in stdout.readlines():
cpu_usage_split = re.split('\t| *', line.rstrip('\n'))
usage = 100 *(1 - float(cpu_usage_split[10]))
if usage > 0:
self.cores_consumption[int(cpu_usage_split[1])] = usage
ssh_conn.close()
#Check if any core marked as available in the nodes has cpu_usage > 0
for _, node_v in self.nodes.iteritems():
cores = node_v.processor.eligible_cores
for cpu in cores:
if len(cpu) > 1:
for core in cpu:
if core in self.cores_consumption:
warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[core])+'%')
else:
if cpu in self.cores_consumption:
warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[cpu])+'%')
return warnings
def reserved_to_text(self):
text = str()
for VNFC_name, VNFC_reserved in self.reserved.iteritems():
text += ' VNFC: '+str(VNFC_name)+'\n'
text += VNFC_reserved.to_text()
return text
def obtain_usage(self):
resp = dict()
#Iterate through nodes to get cores, eligible cores, memory and physical ports (save ports usage for next section)
nodes = dict()
ports_usage = dict()
hugepage_size = dict()
for node_k, node_v in self.nodes.iteritems():
node = dict()
ports_usage[node_k] = dict()
eligible_cores = list()
for pair in node_v.processor.eligible_cores:
if isinstance(pair, list):
for element in pair:
eligible_cores.append(element)
else:
eligible_cores.append(pair)
node['cpus'] = {'cores':node_v.processor.cores,'eligible_cores':eligible_cores}
node['memory'] = {'size':str(node_v.memory.node_size/(1024*1024*1024))+'GB','eligible':str(node_v.memory.eligible_memory/(1024*1024*1024))+'GB'}
hugepage_size[node_k] = node_v.memory.hugepage_sz
ports = dict()
for nic in node_v.nic_list:
for port in nic.ports.itervalues():
if port.enabled and not port.virtual:
ports[port.name] = {'speed':str(port.speed/1000000000)+'G'}
# print '*************** ',port.name,'speed',port.speed
ports_usage[node_k][port.name] = 100 - int(100*float(port.available_bw)/float(port.speed))
node['ports'] = ports
nodes[node_k] = node
resp['RAD'] = nodes
#Iterate through reserved section to get used cores, used memory and port usage
cores = dict()
memory = dict()
#reserved_cores = list
for node_k in self.nodes.iterkeys():
if not node_k in cores:
cores[node_k] = list()
memory[node_k] = 0
for _, reserved in self.reserved.iteritems():
if node_k in reserved.node_reserved_resources:
node_v = reserved.node_reserved_resources[node_k]
cores[node_k].extend(node_v.reserved_cores)
memory[node_k] += node_v.reserved_hugepage_nr * hugepage_size[node_k]
occupation = dict()
for node_k in self.nodes.iterkeys():
ports = dict()
for name, usage in ports_usage[node_k].iteritems():
ports[name] = {'occupied':str(usage)+'%'}
# print '****************cores',cores
# print '****************memory',memory
occupation[node_k] = {'cores':cores[node_k],'memory':str(memory[node_k]/(1024*1024*1024))+'GB','ports':ports}
resp['occupation'] = occupation
return resp
class RADreservedResources():
def __init__(self):
self.node_reserved_resources = dict() #dict. keys are the RAD nodes id, values are NodeReservedResources
self.mgmt_interface_pci = None #pci in the VNF for the management interface
self.image = None #Path in remote machine of the VNFC image
def update(self,reserved):
self.image = reserved.image
self.mgmt_interface_pci = reserved.mgmt_interface_pci
for k,v in reserved.node_reserved_resources.iteritems():
if k in self.node_reserved_resources.keys():
return (False, 'Duplicated node entry '+str(k)+' in reserved resources')
self.node_reserved_resources[k]=v
return (True, "")
def to_text(self):
text = ' image: '+str(self.image)+'\n'
for node_id, node_reserved in self.node_reserved_resources.iteritems():
text += ' Node ID: '+str(node_id)+'\n'
text += node_reserved.to_text()
return text
class NodeReservedResources():
def __init__(self):
# reserved_shared_cores = None #list. List of all cores that the VNFC needs in shared mode #TODO Not used
# reserved_memory = None #Integer. Amount of KiB needed by the VNFC #TODO. Not used since hugepages are used
self.reserved_cores = list() #list. List of all cores that the VNFC uses
self.reserved_hugepage_nr = 0 #Integer. Number of hugepages needed by the VNFC
self.reserved_ports = dict() #dict. The key is the physical port pci and the value the VNFC port description
self.vlan_tags = dict()
self.cpu_pinning = None
def to_text(self):
text = ' cores: '+str(self.reserved_cores)+'\n'
text += ' cpu_pinning: '+str(self.cpu_pinning)+'\n'
text += ' hugepages_nr: '+str(self.reserved_hugepage_nr)+'\n'
for port_pci, port_description in self.reserved_ports.iteritems():
text += ' port: '+str(port_pci)+'\n'
text += port_description.to_text()
return text
# def update(self,reserved):
# self.reserved_cores = list(reserved.reserved_cores)
# self.reserved_hugepage_nr = reserved.reserved_hugepage_nr
# self.reserved_ports = dict(reserved.reserved_ports)
# self.cpu_pinning = list(reserved.cpu_pinning)
| {
"content_hash": "ba6e90e533b895522e195f5014cc4c0b",
"timestamp": "",
"source": "github",
"line_count": 1620,
"max_line_length": 244,
"avg_line_length": 47.42345679012346,
"alnum_prop": 0.5566344727045531,
"repo_name": "me-ankur/openmano",
"id": "7d0ccac99606f0a546335d2c7043110845fad00a",
"size": "76852",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openvim/utils/RADclass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10986"
},
{
"name": "HTML",
"bytes": "523"
},
{
"name": "JavaScript",
"bytes": "307304"
},
{
"name": "PHP",
"bytes": "69095"
},
{
"name": "PLpgSQL",
"bytes": "41348"
},
{
"name": "Python",
"bytes": "859995"
},
{
"name": "Shell",
"bytes": "187480"
}
],
"symlink_target": ""
} |
import time
from weakref import ref
class Engines(object):
"""
Static collector for engines to register against.
"""
def __init__(self):
self._engines = {}
def register(self, engine):
self._engines[engine.__name__] = engine
self._engines[engine.name] = engine
def __getitem__(self, key):
return self._engines[key]
def __contains__(self, key):
return self._engines.__contains__(key)
Engines = Engines()
class CacheEngineType(type):
"""
Cache Engine Metaclass that registers new engines against the cache
for named selection and use.
"""
def __init__(cls, name, bases, attrs):
super(CacheEngineType, cls).__init__(name, bases, attrs)
if name != 'CacheEngine':
# skip base class
Engines.register(cls)
class CacheEngine(object):
__metaclass__ = CacheEngineType
name = 'unspecified'
def __init__(self, parent):
self.parent = ref(parent)
def configure(self):
raise RuntimeError
def get(self, date):
raise RuntimeError
def put(self, key, value, lifetime):
raise RuntimeError
def expire(self, key):
raise RuntimeError
class CacheObject(object):
"""
Cache object class, containing one stored record.
"""
def __init__(self, key, data, lifetime=0, creation=None):
self.key = key
self.data = data
self.lifetime = lifetime
self.creation = creation if creation is not None else time.time()
def __len__(self):
return len(self.data)
@property
def expired(self):
return self.remaining == 0
@property
def remaining(self):
return max((self.creation + self.lifetime) - time.time(), 0)
| {
"content_hash": "3b884c3e0de35570440bcafe0e5b0458",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 23.626666666666665,
"alnum_prop": 0.6032731376975169,
"repo_name": "wagnerrp/pytmdb3",
"id": "11019551cbf3ce8d5c84c169a1ff4ec321808023",
"size": "2004",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "tmdb3/cache_engine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111996"
}
],
"symlink_target": ""
} |
"""
Input file writer for SES3D 4.0.
:copyright:
Lion Krischer ([email protected]), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
EARTH_RADIUS = 6371 * 1000
import numpy as np
import obspy
import os
from wfs_input_generator import rotations
# Define the required configuration items. The key is always the name of the
# configuration item and the value is a tuple. The first item in the tuple is
# the function or type that it will be converted to and the second is the
# documentation.
REQUIRED_CONFIGURATION = {
"output_folder": (str, "The output directory"),
"number_of_time_steps": (int, "The number of time steps"),
"time_increment_in_s": (float, "The time increment in seconds"),
"mesh_min_latitude": (float, "The minimum latitude of the mesh"),
"mesh_max_latitude": (float, "The maximum latitude of the mesh"),
"mesh_min_longitude": (float, "The minimum longitude of the mesh"),
"mesh_max_longitude": (float, "The maximum longitude of the mesh"),
"mesh_min_depth_in_km": (float, "The minimum depth of the mesh in km"),
"mesh_max_depth_in_km": (float, "The maximum depth of the mesh in km"),
"nx_global": (int, "Number of elements in theta directions. Please refer "
"to the SES3D manual for a more extensive description"),
"ny_global": (int, "Number of elements in phi directions. Please refer "
"to the SES3D manual for a more extensive description"),
"nz_global": (int, "Number of elements in r directions. Please refer "
"to the SES3D manual for a more extensive description"),
"px": (int, "Number of processors in theta direction"),
"py": (int, "Number of processors in phi direction"),
"pz": (int, "Number of processors in r direction"),
"source_time_function": (np.array, "The source time function.")
}
# The default configuration item. Contains everything that can sensibly be set
# to some default value. The syntax is very similar to the
# REQUIRED_CONFIGURATION except that the tuple now has three items, the first
# one being the actual default value.
DEFAULT_CONFIGURATION = {
"event_tag": ("1", str, "The name of the event. Should be numeric for "
"now."),
"is_dissipative": (True, bool, "Dissipative simulation or not"),
"output_displacement": (False, bool, "Output the displacement field"),
"displacement_snapshot_sampling": (
10000, int, "Sampling rate of output displacement field"),
"lagrange_polynomial_degree": (
4, int, "Degree of the Lagrange Polynomials"),
"simulation_type": (
"normal simulation", str, "The type of simulation to "
"perform. One of 'normal simulation', 'adjoint forward', "
"'adjoint backward'"),
"adjoint_forward_sampling_rate": (
15, int, "The sampling rate of the adjoint forward field for an "
"adjoint simulation run"),
"adjoint_forward_wavefield_output_folder": (
"", str, "The output folder of the adjoint forward field if "
"requested. If empty, it will be set to a subfolder of the the output "
"directory."),
"rotation_angle_in_degree": (
0.0, float, "A possible rotation of the mesh. All data will be "
"rotated in the opposite way. Useful for simulation close to the "
"equator."),
"rotation_axis": (
[0.0, 0.0, 1.0], lambda x: map(float, x),
"The rotation angle given as [x, y, z] in correspondance with the "
"SES3D coordinate system."),
"Q_model_relaxation_times": (
[1.7308, 14.3961, 22.9973],
lambda x: map(float, x),
"The relaxations times for the different Q model mechanisms"),
"Q_model_weights_of_relaxation_mechanisms": (
[2.5100, 2.4354, 0.0879],
lambda x: map(float, x),
"The weights for relaxations mechanisms for the Q model mechanisms")
}
def write(config, events, stations):
"""
Writes input files for SES3D version 4.0.
Can only simulate one event at a time. If more events are present, an error
will be raised.
"""
# ========================================================================
# preliminaries
# ========================================================================
if not config.adjoint_forward_wavefield_output_folder:
config.adjoint_forward_wavefield_output_folder = \
os.path.join(config.output_folder, "ADJOINT_FORWARD_FIELD")
output_files = {}
# The data needs to be rotated in the opposite direction.
if config.rotation_angle_in_degree:
config.rotation_angle_in_degree *= -1.0
# Map and assert the simulation type.
sim_map = {"normal simulation": 0, "adjoint forward": 1,
"adjoint reverse": 2}
if config.simulation_type not in sim_map:
msg = "simulation_type needs to be on of %s." % \
", ".join(sim_map.keys())
raise ValueError(msg)
simulation_type = sim_map[config.simulation_type]
# Only exactly one event is acceptable.
if len(events) != 1:
msg = "Exactly one event is required for SES3D 4.0."
raise ValueError(msg)
event = events[0]
# ========================================================================
# setup file
# =========================================================================
# Assemble the mesh to have everything in one place
mesh = obspy.core.AttribDict()
mesh.min_latitude = config.mesh_min_latitude
mesh.max_latitude = config.mesh_max_latitude
mesh.min_longitude = config.mesh_min_longitude
mesh.max_longitude = config.mesh_max_longitude
mesh.min_depth_in_km = config.mesh_min_depth_in_km
mesh.max_depth_in_km = config.mesh_max_depth_in_km
# Rotate coordinates and moment tensor if requested.
if config.rotation_angle_in_degree:
lat, lng = rotations.rotate_lat_lon(
event["latitude"],
event["longitude"], config.rotation_axis,
config.rotation_angle_in_degree)
m_rr, m_tt, m_pp, m_rt, m_rp, m_tp = rotations.rotate_moment_tensor(
event["m_rr"], event["m_tt"], event["m_pp"], event["m_rt"],
event["m_rp"], event["m_tp"], event["latitude"],
event["longitude"], config.rotation_axis,
config.rotation_angle_in_degree)
else:
lat, lng = (event["latitude"], event["longitude"])
m_rr, m_tt, m_pp, m_rt, m_rp, m_tp = (
event["m_rr"], event["m_tt"], event["m_pp"], event["m_rt"],
event["m_rp"], event["m_tp"])
# Check if the event still lies within bounds. Otherwise the whole
# simulation does not make much sense.
if _is_in_bounds(lat, lng, mesh) is False:
msg = "Event is not in the domain!"
raise ValueError(msg)
setup_file_template = (
"MODEL ==============================================================="
"================================================================="
"=====\n"
"{theta_min:<44.6f}! theta_min (colatitude) in degrees\n"
"{theta_max:<44.6f}! theta_max (colatitude) in degrees\n"
"{phi_min:<44.6f}! phi_min (longitude) in degrees\n"
"{phi_max:<44.6f}! phi_max (longitude) in degrees\n"
"{z_min:<44.6f}! z_min (radius) in m\n"
"{z_max:<44.6f}! z_max (radius) in m\n"
"{is_diss:<44d}! is_diss\n"
"{model_type:<44d}! model_type\n"
"COMPUTATIONAL SETUP (PARALLELISATION) ==============================="
"================================================================="
"=====\n"
"{nx_global:<44d}! nx_global, "
"(nx_global+px = global # elements in theta direction)\n"
"{ny_global:<44d}! ny_global, "
"(ny_global+py = global # elements in phi direction)\n"
"{nz_global:<44d}! nz_global, "
"(nz_global+pz = global # of elements in r direction)\n"
"{lpd:<44d}! lpd, LAGRANGE polynomial degree\n"
"{px:<44d}! px, processors in theta direction\n"
"{py:<44d}! py, processors in phi direction\n"
"{pz:<44d}! pz, processors in r direction\n"
"ADJOINT PARAMETERS =================================================="
"================================================================="
"=====\n"
"{adjoint_flag:<44d}! adjoint_flag (0=normal simulation, "
"1=adjoint forward, 2=adjoint reverse)\n"
"{samp_ad:<44d}! samp_ad, sampling rate of forward field\n"
"{adjoint_wavefield_folder}")
setup_file = setup_file_template.format(
# Colatitude! Swaps min and max.
theta_min=rotations.lat2colat(float(mesh.max_latitude)),
theta_max=rotations.lat2colat(float(mesh.min_latitude)),
phi_min=float(mesh.min_longitude),
phi_max=float(mesh.max_longitude),
# Min/max radius and depth are inverse to each other.
z_min=EARTH_RADIUS - (float(mesh.max_depth_in_km) * 1000.0),
z_max=EARTH_RADIUS - (float(mesh.min_depth_in_km) * 1000.0),
is_diss=1 if config.is_dissipative else 0,
model_type=1,
lpd=int(config.lagrange_polynomial_degree),
# Computation setup.
nx_global=config.nx_global,
ny_global=config.ny_global,
nz_global=config.nz_global,
px=config.px,
py=config.py,
pz=config.pz,
adjoint_flag=simulation_type,
samp_ad=config.adjoint_forward_sampling_rate,
adjoint_wavefield_folder=config
.adjoint_forward_wavefield_output_folder)
output_files["setup"] = setup_file
# =========================================================================
# event file
# =========================================================================
event_template = (
"SIMULATION PARAMETERS ==============================================="
"===================================\n"
"{nt:<44d}! nt, number of time steps\n"
"{dt:<44.6f}! dt in sec, time increment\n"
"SOURCE =============================================================="
"===================================\n"
"{xxs:<44.6f}! xxs, theta-coord. center of source in degrees\n"
"{yys:<44.6f}! yys, phi-coord. center of source in degrees\n"
"{zzs:<44.6f}! zzs, source depth in (m)\n"
"{srctype:<44d}! srctype, 1:f_x, 2:f_y, 3:f_z, 10:M_ij\n"
"{m_tt:<44.6e}! M_theta_theta\n"
"{m_pp:<44.6e}! M_phi_phi\n"
"{m_rr:<44.6e}! M_r_r\n"
"{m_tp:<44.6e}! M_theta_phi\n"
"{m_tr:<44.6e}! M_theta_r\n"
"{m_pr:<44.6e}! M_phi_r\n"
"OUTPUT DIRECTORY ===================================================="
"==================================\n"
"{output_folder}\n"
"OUTPUT FLAGS ========================================================"
"==================================\n"
"{ssamp:<44d}! ssamp, snapshot sampling\n"
"{output_displacement:<44d}! output_displacement, output displacement "
"field (1=yes,0=no)")
event_file = event_template.format(
nt=int(config.number_of_time_steps),
dt=float(config.time_increment_in_s),
# Colatitude!
xxs=rotations.lat2colat(float(lat)),
yys=float(lng),
zzs=float(event["depth_in_km"] * 1000.0),
srctype=10,
m_tt=float(m_tt),
m_pp=float(m_pp),
m_rr=float(m_rr),
m_tp=float(m_tp),
m_tr=float(m_rt),
m_pr=float(m_rp),
output_folder=config.output_folder,
ssamp=int(config.displacement_snapshot_sampling),
output_displacement=1 if config.output_displacement else 0)
# Put it in the collected dictionary.
fn = "event_%s" % config.event_tag
output_files[fn] = event_file
# =========================================================================
# event_list
# =========================================================================
# Make the event_list. Currently, only one event is used
output_files["event_list"] = "{0:<44d}! n_events = number of events\n{1}"\
.format(1, config.event_tag)
# =========================================================================
# recfile
# =========================================================================
recfile_parts = []
for station in stations:
# Also rotate each station if desired.
if config.rotation_angle_in_degree:
lat, lng = rotations.rotate_lat_lon(
station["latitude"], station["longitude"],
config.rotation_axis, config.rotation_angle_in_degree)
else:
lat, lng = (station["latitude"], station["longitude"])
# Check if the stations still lies within bounds of the mesh.
if not _is_in_bounds(lat, lng, mesh):
msg = "Stations %s is not in the domain. Will be skipped." % \
station["id"]
print msg
continue
depth = -1.0 * (station["elevation_in_m"] -
station["local_depth_in_m"])
if depth < 0:
depth = 0.0
recfile_parts.append("{network:_<2s}.{station:_<5s}.___".format(
network=station["id"].split(".")[0],
station=station["id"].split(".")[1]))
recfile_parts.append(
"{colatitude:.6f} {longitude:.6f} {depth:.1f}"
.format(colatitude=rotations.lat2colat(float(lat)),
longitude=float(lng), depth=float(depth)))
recfile_parts.insert(0, "%i" % (len(recfile_parts) // 2))
# Put it in the collected dictionary
fn = "recfile_" + config.event_tag
output_files[fn] = "\n".join(recfile_parts)
# =========================================================================
# relaxation parameters
# =========================================================================
# Write the relaxation file.
relax_file = (
"RELAXATION TIMES [s] =====================\n"
"{relax_times}\n"
"WEIGHTS OF RELAXATION MECHANISMS =========\n"
"{relax_weights}").format(
relax_times="\n".join(["%.6f" % _i for _i in
config.Q_model_relaxation_times]),
relax_weights="\n".join([
"%.6f" % _i for _i in
config.Q_model_weights_of_relaxation_mechanisms]))
output_files["relax"] = relax_file
# =========================================================================
# source-time function
# =========================================================================
# Also write the source time function.
output_files["stf"] = "\n".join(["%e" % _i for
_i in config.source_time_function])
# =========================================================================
# finalize
# =========================================================================
# Make sure all output files have an empty new line at the end.
for key in output_files.iterkeys():
output_files[key] += "\n\n"
return output_files
def _is_in_bounds(lat, lng, mesh):
if (mesh.min_latitude <= lat <= mesh.max_latitude) and \
(mesh.min_longitude <= lng <= mesh.max_longitude):
return True
return False
| {
"content_hash": "42b74ccdca76a83de7dc62050d26feac",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 79,
"avg_line_length": 43.182072829131656,
"alnum_prop": 0.5212765957446809,
"repo_name": "rafiqsaleh/VERCE",
"id": "510746e30d928bd4d0bf18714e1cd1e642703fe1",
"size": "15462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "verce-hpc-pe/src/wfs_input_generator/backends/write_ses3d_4_0.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3310"
},
{
"name": "CSS",
"bytes": "205682"
},
{
"name": "HTML",
"bytes": "282663"
},
{
"name": "Java",
"bytes": "716231"
},
{
"name": "JavaScript",
"bytes": "10335428"
},
{
"name": "Makefile",
"bytes": "2325"
},
{
"name": "Python",
"bytes": "3426039"
},
{
"name": "Shell",
"bytes": "121346"
},
{
"name": "TeX",
"bytes": "278086"
}
],
"symlink_target": ""
} |
"""Add is_draft status to queries and dashboards
Revision ID: 65fc9ede4746
Revises:
Create Date: 2016-12-07 18:08:13.395586
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError
revision = '65fc9ede4746'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in e.message:
print "Can't run this migration as you already have is_draft columns, please run:"
print "./manage.py db stamp {} # you might need to alter the command to match your environment.".format(revision)
exit()
def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
| {
"content_hash": "fa4555d11b008a88d22291201d54a5c5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 125,
"avg_line_length": 34.138888888888886,
"alnum_prop": 0.6834825061025224,
"repo_name": "luozhanxin/redash-docker",
"id": "91398a1574ddaea6162b96ce2e6e4e17673688e2",
"size": "1229",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "migrations/versions/65fc9ede4746_add_is_draft_status_to_queries_and_.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "225811"
},
{
"name": "HTML",
"bytes": "119914"
},
{
"name": "JavaScript",
"bytes": "251613"
},
{
"name": "Makefile",
"bytes": "825"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "631646"
},
{
"name": "Shell",
"bytes": "41870"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class CompanyConfig(AppConfig):
name = 'company'
verbose_name = 'Company'
| {
"content_hash": "6e6a87c4c9866eedeb10b02f123ae226",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.7203389830508474,
"repo_name": "roks0n/nomadboard",
"id": "7cd18217481c256a1c6621c1c95e4029ebaff436",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nomadboard/nomadboard/company/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79788"
},
{
"name": "HTML",
"bytes": "3798"
},
{
"name": "JavaScript",
"bytes": "90864"
},
{
"name": "Makefile",
"bytes": "1517"
},
{
"name": "Python",
"bytes": "32746"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
} |
import os
import json
import datetime
from django import forms
from django.conf import settings
from django.db import transaction
from django.db import models
from apps.game.models import Game
from apps.team.models import Team
from django.contrib.auth.models import User
import utils.files.logics as fileLogics
import copy
| {
"content_hash": "7373e14a1f924ee48a19aa18b7ec238a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.8212121212121212,
"repo_name": "zWingz/webbasketball",
"id": "a65b559e2134f7f370eb1834ed83f061fd3aaad2",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/admin/logics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50498"
},
{
"name": "HTML",
"bytes": "170566"
},
{
"name": "JavaScript",
"bytes": "30741"
},
{
"name": "Python",
"bytes": "106971"
}
],
"symlink_target": ""
} |
import sys
import time
import numpy as np
import pytest
import ray
from ray.test_utils import get_other_nodes
import ray.ray_constants as ray_constants
@pytest.mark.skip(reason="No reconstruction for objects placed in plasma yet")
@pytest.mark.parametrize(
"ray_start_cluster",
[{
# Force at least one task per node.
"num_cpus": 1,
"num_nodes": 4,
"object_store_memory": 1000 * 1024 * 1024,
"_system_config": {
# Raylet codepath is not stable with a shorter timeout.
"num_heartbeats_timeout": 10,
"object_manager_pull_timeout_ms": 1000,
"object_manager_push_timeout_ms": 1000,
},
}],
indirect=True)
def test_object_reconstruction(ray_start_cluster):
cluster = ray_start_cluster
# Submit tasks with dependencies in plasma.
@ray.remote
def large_value():
# Sleep for a bit to force tasks onto different nodes.
time.sleep(0.1)
return np.zeros(10 * 1024 * 1024)
@ray.remote
def g(x):
return
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
component_type = ray_constants.PROCESS_TYPE_RAYLET
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
num_tasks = len(worker_nodes)
xs = [large_value.remote() for _ in range(num_tasks)]
# Wait for the tasks to complete, then evict the objects from the local
# node.
for x in xs:
ray.get(x)
ray.internal.free([x], local_only=True)
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
print("F", xs)
xs = [g.remote(x) for x in xs]
print("G", xs)
ray.get(xs)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 4,
"num_nodes": 3,
"do_init": True
}],
indirect=True)
def test_actor_creation_node_failure(ray_start_cluster):
# TODO(swang): Refactor test_raylet_failed, etc to reuse the below code.
cluster = ray_start_cluster
@ray.remote
class Child:
def __init__(self, death_probability):
self.death_probability = death_probability
def ping(self):
# Exit process with some probability.
exit_chance = np.random.rand()
if exit_chance < self.death_probability:
sys.exit(-1)
num_children = 25
# Children actors will die about half the time.
death_probability = 0.5
children = [Child.remote(death_probability) for _ in range(num_children)]
while len(cluster.list_all_nodes()) > 1:
for j in range(2):
# Submit some tasks on the actors. About half of the actors will
# fail.
children_out = [child.ping.remote() for child in children]
# Wait a while for all the tasks to complete. This should trigger
# reconstruction for any actor creation tasks that were forwarded
# to nodes that then failed.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Replace any actors that died.
for i, out in enumerate(children_out):
try:
ray.get(out)
except ray.exceptions.RayActorError:
children[i] = Child.remote(death_probability)
# Remove a node. Any actor creation tasks that were forwarded to this
# node must be resubmitted.
cluster.remove_node(get_other_nodes(cluster, True)[-1])
def test_driver_lives_sequential(ray_start_regular):
ray.worker._global_node.kill_raylet()
ray.worker._global_node.kill_plasma_store()
ray.worker._global_node.kill_log_monitor()
ray.worker._global_node.kill_monitor()
ray.worker._global_node.kill_gcs_server()
# If the driver can reach the tearDown method, then it is still alive.
def test_driver_lives_parallel(ray_start_regular):
all_processes = ray.worker._global_node.all_processes
process_infos = (all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] +
all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET] +
all_processes[ray_constants.PROCESS_TYPE_LOG_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_MONITOR])
assert len(process_infos) == 5
# Kill all the components in parallel.
for process_info in process_infos:
process_info.process.terminate()
time.sleep(0.1)
for process_info in process_infos:
process_info.process.kill()
for process_info in process_infos:
process_info.process.wait()
# If the driver can reach the tearDown method, then it is still alive.
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "2606b4fe56130adf9f6f78095ee18376",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 33.7888198757764,
"alnum_prop": 0.616360294117647,
"repo_name": "richardliaw/ray",
"id": "3dc65be557c14ed7fba2e533c493d9151475d12e",
"size": "5440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_multinode_failures_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('activities', '0002_auto_20160527_0932'),
]
operations = [
migrations.RemoveField(
model_name='projectactivity',
name='allowed_submissions',
),
]
| {
"content_hash": "7dfaa86ea69ba202a1a21ed7e3a6affe",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 20.294117647058822,
"alnum_prop": 0.6115942028985507,
"repo_name": "zurfyx/simple",
"id": "a862679a4ec6f71e781fe440a0c8c3af61634b21",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple/projects/activities/migrations/0003_remove_projectactivity_allowed_submissions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11344"
},
{
"name": "HTML",
"bytes": "95034"
},
{
"name": "JavaScript",
"bytes": "2521"
},
{
"name": "Python",
"bytes": "137848"
}
],
"symlink_target": ""
} |
"""
Created on Tue May 17 01:01:31 2016
@author: Ghareeb
"""
from PythonStarterApp import app
import os
if 'VCAP_APP_PORT' in os.environ:
appPort = os.environ['VCAP_APP_PORT']
else:
appPort = 5000
if 'VCAP_APP_HOST' in os.environ:
appHost = os.environ['VCAP_APP_HOST']
else:
appHost = '0.0.0.0'
if __name__ == '__main__':
app.run(host=appHost,port=appPort, debug=False, threaded=True) | {
"content_hash": "f566f43886aa68120cc3974d6b1a866c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.6513317191283293,
"repo_name": "YDnepr/practicalcourse",
"id": "f7e1754dee410196b1a84683110ad00db87d8e77",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "welcome.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1082"
},
{
"name": "HTML",
"bytes": "5064"
},
{
"name": "Python",
"bytes": "18039"
}
],
"symlink_target": ""
} |
import operator
import numpy as np
def l1_min_c(X, y, loss='l2', fit_intercept=True, intercept_scaling=1.0):
"""
Return the maximum value for C that yields a model with coefficients
and intercept set to zero for l1 penalized classifiers,
such as LinearSVC with penalty='l1' and linear_model.LogisticRegression
with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
loss : {'l2', 'log'}, default to 'l2'
Specifies the loss function.
With 'l2' it is the l2 loss (a.k.a. squared hinge loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default: True
Specifies if the intercept should be fitted by the model.
It must match the fit() method paramenter.
intercept_scaling : float, default: 1
when fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c: float
minimum value for C
"""
import scipy.sparse as sp
if loss not in ('l2', 'log'):
raise ValueError('loss type not in ("l2", "log")')
y = np.asanyarray(y)
if sp.issparse(X):
X = sp.csc_matrix(X)
hstack = sp.hstack
dot = operator.mul
else:
X = np.asanyarray(X)
hstack = np.hstack
dot = np.dot
if fit_intercept:
bias = intercept_scaling * np.ones((np.size(y), 1))
X = hstack((X, bias))
classes = np.unique(y)
n_classes = np.size(classes)
if n_classes <= 2:
c = classes[0]
y = y.reshape((1, -1))
_y = np.empty(y.shape)
_y[y == c] = 1
_y[y != c] = -1
else:
_y = np.empty((n_classes, np.size(y)))
for i, c in enumerate(classes):
_y[i, y == c] = 1
_y[i, y != c] = -1
den = np.max(np.abs(dot(_y, X)))
if den == 0.0:
raise ValueError('Ill-posed l1_min_c calculation')
if loss == 'l2':
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
| {
"content_hash": "6b236713c68e63838f58f95a0372545f",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 30,
"alnum_prop": 0.5781746031746032,
"repo_name": "ominux/scikit-learn",
"id": "b5fb30ef03fe3c0bed97adb1934dc01f9d136bc3",
"size": "2520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/svm/bounds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "455969"
},
{
"name": "C++",
"bytes": "240380"
},
{
"name": "Makefile",
"bytes": "1411"
},
{
"name": "Python",
"bytes": "2064853"
},
{
"name": "Shell",
"bytes": "486"
}
],
"symlink_target": ""
} |
import re
import pickle
import copy
import traceback
from collections import OrderedDict
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from OpenGL.GL.shaders import glDeleteShader
import numpy as np
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import Attributes, Logger
from PyEngine3D.App import CoreManager
from PyEngine3D.OpenGLContext import OpenGLContext, CreateUniformDataFromString
from .UniformBuffer import CreateUniformBuffer, UniformTextureBase
class Material:
def __init__(self, material_name, material_datas={}):
self.valid = False
logger.info("Load %s material." % material_name)
# for save
self.material_datas = material_datas
shader_codes = material_datas.get('shader_codes')
binary_format = material_datas.get('binary_format')
binary_data = material_datas.get('binary_data')
uniforms = material_datas.get('uniforms', [])
uniform_datas = material_datas.get('uniform_datas', {})
self.material_component_names = [x[1] for x in material_datas.get('material_components', [])]
self.macros = material_datas.get('macros', OrderedDict())
self.is_translucent = True if 0 < self.macros.get('TRANSPARENT_MATERIAL', 0) else False
self.name = material_name
self.shader_name = material_datas.get('shader_name', '')
self.program = -1
self.uniform_buffers = dict() # OrderedDict() # Declaration order is important.
self.Attributes = Attributes()
if binary_format is not None and binary_data is not None:
self.compile_from_binary(binary_format, binary_data)
self.valid = self.check_validate() and self.check_linked()
if not self.valid:
logger.error("%s material has been failed to compile from binary" % self.name)
self.compile_message = ""
if not self.valid:
self.compile_from_source(shader_codes)
self.valid = self.check_validate() and self.check_linked()
if not self.valid:
logger.error("%s material has been failed to compile from source" % self.name)
if self.valid:
self.create_uniform_buffers(uniforms, uniform_datas)
def get_save_data(self):
uniform_datas = {}
for uniform_name in self.uniform_buffers:
default_value = self.uniform_buffers[uniform_name].get_default_value()
if default_value is not None:
if hasattr(default_value, 'name'):
uniform_datas[uniform_name] = default_value.name
else:
uniform_datas[uniform_name] = default_value
self.material_datas['uniform_datas'] = uniform_datas
return self.material_datas
def get_attribute(self):
self.Attributes.set_attribute('name', self.name)
self.Attributes.set_attribute('shader_name', self.shader_name)
for key in self.macros:
self.Attributes.set_attribute(key, self.macros[key])
for uniform_buffer in self.uniform_buffers.values():
self.Attributes.set_attribute(uniform_buffer.name, uniform_buffer.get_default_value())
return self.Attributes
def set_attribute(self, attribute_name, attribute_value, parent_info, attribute_index):
if attribute_name in self.macros and self.macros[attribute_name] != attribute_value:
new_macros = copy.deepcopy(self.macros)
new_macros[attribute_name] = attribute_value
# if macro was changed then create a new material.
CoreManager.instance().resource_manager.get_material(self.shader_name, new_macros)
elif attribute_name in self.uniform_buffers:
uniform_buffer = self.uniform_buffers[attribute_name]
default_value = CreateUniformDataFromString(uniform_buffer.uniform_type, attribute_value)
uniform_buffer.set_default_value(default_value)
def delete(self):
OpenGLContext.use_program(0)
glDeleteProgram(self.program)
logger.info("Deleted %s material." % self.name)
def use_program(self):
OpenGLContext.use_program(self.program)
def save_to_binary(self):
size = GLint()
glGetProgramiv(self.program, GL_PROGRAM_BINARY_LENGTH, size)
# very important - check data dtype np.ubyte
binary_data = np.zeros(size.value, dtype=np.ubyte)
binary_size = GLint()
binary_format = GLenum()
glGetProgramBinary(self.program, size.value, binary_size, binary_format, binary_data)
binary_data = pickle.dumps(binary_data)
return binary_format, binary_data
def compile_from_binary(self, binary_format, binary_data):
binary_data = pickle.loads(binary_data)
self.program = glCreateProgram()
glProgramParameteri(self.program, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE)
glProgramBinary(self.program, binary_format.value, binary_data, len(binary_data))
def compile_from_source(self, shader_codes: dict):
shaders = []
for shader_type in shader_codes:
shader = self.compile(shader_type, shader_codes[shader_type])
if shader is not None:
logger.info("Compile %s %s." % (self.name, shader_type))
shaders.append(shader)
self.program = glCreateProgram()
# glProgramParameteri(self.program, GL_PROGRAM_SEPARABLE, GL_TRUE)
glProgramParameteri(self.program, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE)
for shader in shaders:
glAttachShader(self.program, shader)
glLinkProgram(self.program)
for shader in shaders:
glDetachShader(self.program, shader)
glDeleteShader(shader)
def create_uniform_buffers(self, uniforms, default_uniform_datas={}):
# create uniform buffers from source code
active_texture_index = 0
for uniform_type, uniform_name in uniforms:
# self.uniform_datas
default_data = CreateUniformDataFromString(uniform_type, default_uniform_datas.get(uniform_name))
uniform_buffer = CreateUniformBuffer(self.program, uniform_type, uniform_name, default_data=default_data)
if uniform_buffer is not None:
# Important : set texture binding index
if issubclass(uniform_buffer.__class__, UniformTextureBase):
uniform_buffer.set_texture_index(active_texture_index)
active_texture_index += 1
self.uniform_buffers[uniform_name] = uniform_buffer
else:
logger.warn("%s material has no %s uniform variable. It may have been optimized by the compiler..)" % (self.name, uniform_name))
return True
def compile(self, shaderType, shader_code):
"""
:param shaderType: GL_VERTEX_SHADER, GL_FRAGMENT_SHADER
:param shader_code: string
"""
if shader_code == "" or shader_code is None:
return None
try:
# Compile shaders
shader = glCreateShader(shaderType)
glShaderSource(shader, shader_code)
glCompileShader(shader)
compile_status = glGetShaderiv(shader, GL_COMPILE_STATUS)
if compile_status != 1:
infoLogs = glGetShaderInfoLog(shader)
if infoLogs:
if type(infoLogs) == bytes:
infoLogs = infoLogs.decode("utf-8")
infoLogs = ("GL_COMPILE_STATUS : %d\n" % compile_status) + infoLogs
shader_code_lines = shader_code.split('\n')
infoLogs = infoLogs.split('\n')
for i, infoLog in enumerate(infoLogs):
error_line = re.match('\d\((\d+)\) : error', infoLog)
if error_line is not None:
# show prev 3 lines
error_line = int(error_line.groups()[0]) - 1
for num in range(max(0, error_line - 3), error_line):
infoLogs[i] += "\n\t %s" % (shader_code_lines[num])
# show last line
infoLogs[i] += "\n\t--> %s" % (shader_code_lines[error_line])
infoLogs = "\n".join(infoLogs)
self.compile_message = "\n".join([self.compile_message, infoLogs])
logger.error("%s %s shader compile error.\n%s" % (self.name, shaderType.name, infoLogs))
else:
# complete
logger.log(Logger.MINOR_INFO, "Complete %s %s compile." % (self.name, shaderType.name))
return shader
except BaseException:
logger.error(traceback.format_exc())
return None
def check_validate(self):
if self.program >= 0:
glValidateProgram(self.program)
validation = glGetProgramiv(self.program, GL_VALIDATE_STATUS)
if validation == GL_TRUE:
return True
else:
logger.warn("Validation failure (%s): %s" % (validation, glGetProgramInfoLog(self.program)))
else:
logger.warn("Validation failure : %s" % self.name)
# always return True
return True
def check_linked(self):
if self.program >= 0:
link_status = glGetProgramiv(self.program, GL_LINK_STATUS)
if link_status == GL_TRUE:
return True
else:
logger.error("Link failure (%s): %s" % (link_status, glGetProgramInfoLog(self.program)))
else:
logger.error("Link failure : %s" % self.name)
return False
| {
"content_hash": "dde018f7e7289676a1bc10a16d1a7a96",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 144,
"avg_line_length": 42.93421052631579,
"alnum_prop": 0.6086423536622739,
"repo_name": "ubuntunux/GuineaPig",
"id": "4e15d252c897513e7c98d17a7185cc3c96b40d97",
"size": "9789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyEngine3D/OpenGLContext/Material.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "GLSL",
"bytes": "6355"
},
{
"name": "Python",
"bytes": "216821"
}
],
"symlink_target": ""
} |
horovod = "/home/wozniak/proj/horovod"
execfile(horovod+"/examples/keras_mnist.py")
| {
"content_hash": "1de5af91e12d0b259be772e5387987b8",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 42,
"alnum_prop": 0.7619047619047619,
"repo_name": "ECP-CANDLE/Supervisor",
"id": "cddb5c20dadf4fba259eecb09de4a97ccb480f58",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scratch/horovod/horovod-1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "592"
},
{
"name": "C",
"bytes": "27472"
},
{
"name": "C++",
"bytes": "7545"
},
{
"name": "Jupyter Notebook",
"bytes": "246450"
},
{
"name": "M4",
"bytes": "3815"
},
{
"name": "Makefile",
"bytes": "8870"
},
{
"name": "Python",
"bytes": "362007"
},
{
"name": "R",
"bytes": "155474"
},
{
"name": "SWIG",
"bytes": "788"
},
{
"name": "Shell",
"bytes": "310198"
},
{
"name": "Swift",
"bytes": "124184"
},
{
"name": "TSQL",
"bytes": "2772"
},
{
"name": "Tcl",
"bytes": "2223"
}
],
"symlink_target": ""
} |
import tkinter
import math
import logging
import os
import logika_igre
import clovek
import racunalnik
import alfabeta
###########################################################################
# KONSTANTE #
###########################################################################
STRANICA_SESTKOTNIKA = 20
# visina trikotnikov v sestkotniku
VISINA_TRIKOTNIKA = 3 ** (0.5) * (0.5) * STRANICA_SESTKOTNIKA
PRAZNO = logika_igre.PRAZNO
NI_KONEC = logika_igre.NI_KONEC
NEODLOCENO = logika_igre.NEODLOCENO
# možne barvne kombinacije igralnih polj
# v primeru dodajanja novih je potrebno dopolniti še funkcijo izpis_igralca
# ter izbire v menuju barva_menu
kombinacije_barv = [('red', 'blue'), ('red', 'green'), ('blue', 'green')]
# uporabljeno v Textwidgetu
END = 'end'
###########################################################################
# GUI #
###########################################################################
class Gui():
def __init__(self, master):
# PLOSCA
self.plosca = tkinter.Canvas(master)
self.plosca.grid(row=1, column=0)
# 'naročimo' se na levi klik miške
self.plosca.bind("<Button-1>", self.plosca_klik)
# POLJE ZA SPOROCILA
self.napis = tkinter.StringVar(master, value='')
tkinter.Label(master, textvariable=self.napis).grid(row=0, column=0)
# SHRANJEVANJE PODATKOV O POLJIH
# Ključi so id, vrednosti koordinate.
self.id_koord = {}
# Ključi so koordinate (i, j), vrednosti id-ji
self.koord_id = {}
# ZAČNEMO NOVO IGRO
self.igra = None
self.igralec_1 = None # Objekt, ki igra IGRALEC_1 (nastavimo ob začetku igre)
self.igralec_2 = None # Objekt, ki igra IGRALEC_2 (nastavimo ob začetku igre)
# najprej nastavimo nastavitve za igro
# igro zacnemo v barvni kombinaciji rdeca-modra (izbira 0)
self.barva = tkinter.IntVar(value=0)
# na polju velikosti 15x15
self.velikost_matrike = tkinter.IntVar(value=15)
# v nacinu clovek-racunalnik (izbira 1)
self.nacin_igre = tkinter.IntVar(value=1)
# zacnemo igro
self.zacni_igro()
# ZAPIRANJE OKNA
# Če uporabnik zapre okno naj se poklice self.zapri_okno
master.protocol("WM_DELETE_WINDOW", lambda: self.zapri_okno(master))
# GLAVNI MENU
glavni_menu = tkinter.Menu(master)
master.config(menu=glavni_menu)
# PODMENUJI
igra_menu = tkinter.Menu(glavni_menu, tearoff=0)
glavni_menu.add_cascade(label="Igra", menu=igra_menu)
nacini_menu = tkinter.Menu(glavni_menu, tearoff=0)
glavni_menu.add_cascade(label="Nastavitve igre", menu=nacini_menu)
velikost_menu = tkinter.Menu(glavni_menu, tearoff=0)
glavni_menu.add_cascade(label="Velikost polja", menu=velikost_menu)
barva_menu = tkinter.Menu(glavni_menu, tearoff=0)
glavni_menu.add_cascade(label="Barva", menu=barva_menu)
pomoc_menu = tkinter.Menu(glavni_menu, tearoff=0)
glavni_menu.add_cascade(label="Pomoč", menu=pomoc_menu)
# IZBIRE V PODMENUJIH
igra_menu.add_command(label="Nova igra", command=lambda: self.zacni_igro())
nacini_menu.add_radiobutton(label="Človek - Človek", variable=self.nacin_igre, value=0, command=lambda: self.zacni_igro())
nacini_menu.add_radiobutton(label="Človek - Računalnik", variable=self.nacin_igre, value=1, command=lambda: self.zacni_igro())
nacini_menu.add_radiobutton(label="Računalnik - Človek", variable=self.nacin_igre, value=2, command=lambda: self.zacni_igro())
nacini_menu.add_radiobutton(label="Računalnik - Računalnik", variable=self.nacin_igre, value=3, command=lambda: self.zacni_igro())
velikost_menu.add_radiobutton(label="10x10", variable=self.velikost_matrike, value=10, command=lambda: self.zacni_igro())
velikost_menu.add_radiobutton(label="15x15", variable=self.velikost_matrike, value=15, command=lambda: self.zacni_igro())
velikost_menu.add_radiobutton(label="20x20", variable=self.velikost_matrike, value=20, command=lambda: self.zacni_igro())
barva_menu.add_radiobutton(label="rdeča-modra", variable=self.barva, value=0, command=lambda: self.zacni_igro())
barva_menu.add_radiobutton(label="rdeča-zelena", variable=self.barva, value=1, command=lambda: self.zacni_igro())
barva_menu.add_radiobutton(label="modra-zelena", variable=self.barva, value=2, command=lambda: self.zacni_igro())
pomoc_menu.add_command(label="Navodila", command=lambda: self.odpri_navodila())
##################################
# IGRA #
##################################
def zacni_igro(self):
'''Nastavi stanje igre na zacetek igre'''
# Ustavimo vsa vlakna, ki trenutno razmišljajo in pocistimo plosco
self.prekini_igralce()
self.plosca.delete('all')
# nastavimo barvo
self.nastavi_barvo_igralnih_polj()
# nastavimo velikost
self.nastavi_velikost_igralnega_polja()
# shranimo igralce
self.nastavi_nacin_igre()
# ustvarimo novo igro
self.igra = logika_igre.Igra()
# nastavimo stvari vidne uporabniku
self.napis.set('Na potezi je {0}.'.format(self.izpis_igralca(logika_igre.drugi)))
self.napolni_igralno_polje()
# prvi na potezi je igralec 2, saj je prvo polje že pobarvano
# z barvo igralca 1
self.igra.na_potezi = logika_igre.drugi
self.igralec_2.igraj()
def prekini_igralce(self):
"""Sporoči igralcem, da morajo nehati razmišljati."""
if self.igralec_1: self.igralec_1.prekini()
if self.igralec_2: self.igralec_2.prekini()
def povleci_potezo(self, i, j):
'''logiki igre naroci naj povlece potezo,
potem pa se ona ukvarja z veljavnostjo'''
barva = self.igra.na_potezi
# izvedemo potezo v logiki igre
poteza = self.igra.izvedi_potezo(i, j)
# poteza ni bila veljavna, ne naredimo nič
if poteza == None:
pass
# poteza je bila veljavna
else:
# pobarvamo polje
id = self.koord_id[(i, j)]
self.plosca.itemconfig(id, fill=barva)
# nadaljujemo igro
(zmagovalec, zmagovalna_polja) = poteza
if zmagovalec == NI_KONEC:
# poklicemo naslednjega igralca
if self.igra.na_potezi == logika_igre.prvi:
self.igralec_1.igraj()
self.napis.set('Na potezi je {0}.'.format(self.izpis_igralca(logika_igre.prvi)))
else:
self.igralec_2.igraj()
self.napis.set('Na potezi je {0}.'.format(self.izpis_igralca(logika_igre.drugi)))
else:
self.konec_igre(zmagovalec, zmagovalna_polja)
self.prekini_igralce()
self.igra.na_potezi = None
###########################################
# NASTAVITVE IGRE #
###########################################
def nastavi_velikost_igralnega_polja(self):
'''nastavi velikost igralnega polja'''
velikost_matrike = self.velikost_matrike.get()
# nastavimo velikost v logiki_igre
logika_igre.velikost_matrike = velikost_matrike
# izracunamo sirino in visino
sirina = VISINA_TRIKOTNIKA * 2 * velikost_matrike + STRANICA_SESTKOTNIKA + 1
visina = 1.5 * STRANICA_SESTKOTNIKA * velikost_matrike + 0.5 * STRANICA_SESTKOTNIKA + 1
self.plosca.config(width=sirina, height=visina)
def nastavi_barvo_igralnih_polj(self):
'''nastavi barvo igralnih polj'''
kombinacija = self.barva.get()
logika_igre.prvi = kombinacije_barv[kombinacija][0]
logika_igre.drugi = kombinacije_barv[kombinacija][1]
def nastavi_nacin_igre(self):
'''nastavi igralce'''
nacini_igre = [(clovek.Clovek(self), clovek.Clovek(self)),
(clovek.Clovek(self), racunalnik.Racunalnik(self, alfabeta.Alfabeta(alfabeta.globina))),
(racunalnik.Racunalnik(self, alfabeta.Alfabeta(alfabeta.globina)), clovek.Clovek(self)),
(racunalnik.Racunalnik(self, alfabeta.Alfabeta(alfabeta.globina)), racunalnik.Racunalnik(self, alfabeta.Alfabeta(alfabeta.globina)))]
nacin = self.nacin_igre.get()
self.igralec_1 = nacini_igre[nacin][0]
self.igralec_2 = nacini_igre[nacin][1]
###########################################
# OSTALE FUNKCIJE #
###########################################
def plosca_klik(self, event):
'''določi koordinate klika in pokliče ustreznega igralca'''
m = event.x
n = event.y
id = self.plosca.find_closest(m, n)[0]
(i, j) = self.id_koord[id]
if self.igra.na_potezi == logika_igre.prvi:
self.igralec_1.klik(i, j)
elif self.igra.na_potezi == logika_igre.drugi:
self.igralec_2.klik(i, j)
else:
pass
def narisi_sestkotnik(self, x, y):
'''nariše šestkotnik in vrne njegov id'''
a = STRANICA_SESTKOTNIKA
v = VISINA_TRIKOTNIKA
t = [x, y + a * 0.5,
x + v, y,
x + 2 * v,y + (0.5) * a,
x + 2 * v, y + 1.5 * a,
x + v, y + 2 * a,
x, y + 1.5 * a]
id = self.plosca.create_polygon(*t, fill=PRAZNO, outline='black')
return id
def napolni_igralno_polje(self):
'''nariše igralno polje sestavljeno iz šestkotnikov'''
a = STRANICA_SESTKOTNIKA
v = VISINA_TRIKOTNIKA
velikost_matrike = logika_igre.velikost_matrike
for i in range(velikost_matrike): # vrstica
# preverimo sodost/lihost in tako določimo zamik prvega šestkotnika
if i % 2 == 0: # lihe vrstice (ker začnemo šteti z 0)
zacetni_x = 2
for j in range(velikost_matrike): # stolpec
x = zacetni_x + j * 2 * v
y = i * 1.5 * a + 2
id = self.narisi_sestkotnik(x, y)
self.id_koord[id] = (i, j)
self.koord_id[(i,j)] = id
else: # sode vrstice
zacetni_x = v + 2
for j in range(velikost_matrike): # stolpec
x = zacetni_x + j * 2 * v
y = i * 1.5 * a + 2
id = self.narisi_sestkotnik(x, y)
self.id_koord[id] = (i, j)
self.koord_id[(i, j)] = id
# pobarvamo prvo polje
self.pobarvaj_prvo_polje()
def pobarvaj_prvo_polje(self):
'''pobarva prvo polje z barvo igralca 1 in spremembo zabeleži v logiko igre'''
i = logika_igre.velikost_matrike // 2
j = i
barva = logika_igre.prvi
sredina = self.koord_id[(i,j)]
self.plosca.itemconfig(sredina, fill=barva)
self.igra.zabelezi_spremembo_barve(i, j, barva)
self.igra.zadnja_poteza = (i, j)
self.igra.stevilo_pobarvanih_polj += 1
def izpis_igralca(self, igralec):
'''pravilno sklanja ime igralca, za izpis uporabniku'''
if igralec == 'red':
return 'rdeči'
elif igralec == 'blue':
return 'modri'
elif igralec == 'green':
return 'zeleni'
def konec_igre(self, zmagovalec, zmagovalna_polja):
'''uvede ustrezne spremembe v oknu'''
# igre je konec, imamo zmagovalca
if zmagovalec in [logika_igre.prvi, logika_igre.drugi]:
self.napis.set('Zmagal je {0}.'.format(self.izpis_igralca(zmagovalec)))
for (i, j) in zmagovalna_polja:
# odebelimo zmagovalna polja
id = self.koord_id[(i, j)]
self.plosca.itemconfig(id, width=3)
# igre je konec, rezultat je izenacen
else:
self.napis.set('Igra je neodločena.')
def zapri_okno(self, master):
'''Ta metoda se pokliče, ko uporabnik zapre aplikacijo.'''
self.prekini_igralce()
# Dejansko zapremo okno.
master.destroy()
###########################################
# POMOČ UPORABNIKU #
###########################################
def odpri_navodila(self):
'''odpre okno z navodili za igro'''
pomoc_igra = tkinter.Toplevel()
pomoc_igra.title("Pravila in navodila")
pomoc_igra.resizable(width=False, height=False)
navodila1 = tkinter.Text(pomoc_igra, width=65, height=3)
navodila1.grid(row=0, column=0)
navodila1.insert(END, 'Pozdravljeni! \n \n')
navodila1.insert(END, 'V igri SIX morate za zmago tvoriti enega od naslednjih vzorcev:')
navodila1.config(state='disabled')
vzorci = tkinter.PhotoImage(file=os.path.join('navodila','vzorci.gif'))
slika1 = tkinter.Label(pomoc_igra, image = vzorci)
slika1.image = vzorci
slika1.grid(row=1, column=0)
navodila2 = tkinter.Text(pomoc_igra, width=65, height=6)
navodila2.grid(row=2, column=0)
navodila2.insert(END, '')
navodila2.insert(END, 'Polje, ki ga želite izbrati, mora imeti vsaj enega že pobarvanega'
'soseda, sicer poteza ni veljavna. \n')
navodila2.insert(END, 'Nad igralnim poljem se nahaja vrstica stanja, kjer vidite \n' 'trenutno stanje igre. \n')
navodila2.insert(END, 'V primeru zmage se v vrstici stanja izpiše zmagovalec, \n' 'zmagovalni vzorec pa se poudari.')
navodila2.config(state='disabled')
if __name__.endswith('__main__'):
root = tkinter.Tk()
root.title("SIX")
root.resizable(width=False, height=False)
#logging.basicConfig(level=logging.DEBUG)
aplikacija = Gui(root)
root.iconbitmap(os.path.join('ikona','matica.ico'))
root.mainloop()
| {
"content_hash": "fefceb4b254a1302a2a02e60648815f1",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 149,
"avg_line_length": 40.894285714285715,
"alnum_prop": 0.5639628309928038,
"repo_name": "PircK/six",
"id": "ad14d249e9a39f9995c800b6f357caf7b5cd4e52",
"size": "14357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sliks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37324"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name = "bibulous",
py_modules = ['bibulous', 'bibulous_test', 'bibulous_authorextract'],
version = "1.3.2",
description = "BibTeX replacement and enhancement",
author = "Nathan Hagen",
author_email = "[email protected]",
url = "https://github.com/nzhagen/bibulous",
download_url = "https://github.com/nzhagen/bibulous/blob/master/bibulous.py",
license = "MIT",
keywords = ["bibtex", "bibliography", "parser", "tex", "latex"],
classifiers = [
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Text Processing :: Markup :: LaTeX",
"Topic :: Text Processing",
],
long_description = """\
A drop-in replacement for BibTeX based on string templates.
-----------------------------------------------------------
Bibulous provides a flexible way of accomplishing the same tasks as BibTeX, and going
beyond it in capability. Some of its advantages include:
- An integrated BibTeX database file (.bib file) parser.
- Fully internationalized: Bibulous can use bibliography databases and bibliography style
files written in any language.
- Simple and powerful customization: style templates are an ideal way of visualizing and
manipulating bibliography styles. There is no need to learn BibTeX's arcane stack-based
language in order to build or customize a bibliography style.
- Multilingual capability: templates are largely language agnostic, so that multilingual
bibliographies can be achieved almost effortlessly.
- Users can build glossaries, lists of symbols, and lists of acronyms using the same
infrastructure as used for bibliographies.
- Sorting of citations is fully localized and has no difficulty in dealing with strings
that contain Unicode, LaTeX-markup characters for non-English languages, or even
mathematics markup.
This version requires Python 2.7 or later.
"""
)
| {
"content_hash": "63f7fcd07f29bfd31144739e49482e08",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 91,
"avg_line_length": 46.355555555555554,
"alnum_prop": 0.6922339405560882,
"repo_name": "ghareth/bibulous",
"id": "f5984ef2d132faa20bb6f6e9b5d0541cef6c10ca",
"size": "2141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "257058"
},
{
"name": "TeX",
"bytes": "8034667"
}
],
"symlink_target": ""
} |
import pytest
from bayes_opt.bayesian_optimization import Queue
def test_add():
queue = Queue()
assert len(queue) == 0
assert queue.empty
queue.add(1)
assert len(queue) == 1
queue.add(1)
assert len(queue) == 2
queue.add(2)
assert len(queue) == 3
def test_queue():
queue = Queue()
with pytest.raises(StopIteration):
next(queue)
queue.add(1)
queue.add(2)
queue.add(3)
assert len(queue) == 3
assert not queue.empty
assert next(queue) == 1
assert len(queue) == 2
assert next(queue) == 2
assert next(queue) == 3
assert len(queue) == 0
if __name__ == '__main__':
r"""
CommandLine:
python tests/test_observer.py
"""
pytest.main([__file__])
| {
"content_hash": "750c3eb65c259be25a15a1fcfd98e50f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 49,
"avg_line_length": 15.63265306122449,
"alnum_prop": 0.5718015665796344,
"repo_name": "fmfn/BayesianOptimization",
"id": "0aba7b747bb261b395869a6b5e92640d28739f29",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84494"
}
],
"symlink_target": ""
} |
from django import forms
from models import *
class ChoiceField(forms.ChoiceField):
def __init__(self, choices=(), empty_label=None, required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
# prepend an empty label if it exists (and field is not required!)
if empty_label is not None:
choices = tuple([(u'', empty_label)] + list(choices))
super(ChoiceField, self).__init__(choices=choices, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
class GraphSafesForm(forms.Form):
manufacturers = ChoiceField( choices=((opt['name'], opt['name']) for opt in
Manufacturer.objects.filter(safeprofile__isnull=False, name__isnull=False).values('name').distinct()), required=True, empty_label="Select a manufacturer" )
stores = ChoiceField( choices=((opt['name'], opt['name']) for opt in
Store.objects.filter(safeprofile__isnull=False, name__isnull=False).values('name').distinct()), required=True, empty_label="Select a store" )
categories = ChoiceField( choices=((opt['name'], opt['name']) for opt in
SafeCategory.objects.filter(name__isnull=False).values('name').distinct()), required=True, empty_label="Select a category" )
volumes = ChoiceField( choices=((opt['volume'], opt['volume']) for opt in
SafeProfile.objects.filter(volume__isnull=False).values('volume').distinct()), required=True, empty_label="Select a volume" )
weights = ChoiceField( choices=((opt['weight'], opt['weight']) for opt in
SafeProfile.objects.filter(weight__isnull=False).values('weight').distinct()), required=True, empty_label="Select a weight" )
lock_ratings = ChoiceField( choices=((opt['rating'], opt['rating']) for opt in
LockRating.objects.filter(rating__isnull=False).values('rating').distinct()), required=True, empty_label="Select a lock rating" )
bolt_diameters = ChoiceField( choices=((opt['bolt_diameter'], opt['bolt_diameter']) for opt in
SafeProfile.objects.filter(bolt_diameter__isnull=False).values('bolt_diameter').distinct()), required=True, empty_label="Select a bolt diameter" )
door_thicknesses = ChoiceField( choices=((opt['door_thickness'], opt['door_thickness']) for opt in
SafeProfile.objects.filter(door_thickness__isnull=False).values('door_thickness').distinct()), required=True, empty_label="Select a door thickness" )
safe_ratings = forms.MultipleChoiceField( choices=((opt['rating'], opt['rating']) for opt in
SafeRating.objects.filter(rating__isnull=False).filter(safeprofile__isnull=False).values('rating').distinct()))
features = forms.MultipleChoiceField( choices=((opt['name'], opt['name']) for opt in
SafeFeature.objects.filter(name__isnull=False).values('name').distinct()), required=True)
class GraphSafeComponentForm(forms.Form):
manufacturers = ChoiceField( choices=((opt['name'], opt['name']) for opt in
Manufacturer.objects.filter(safecomponentprofile__isnull=False, name__isnull=False).values('name').distinct()), required=True, empty_label="Select a manufacturer" )
stores = ChoiceField( choices=((opt['name'], opt['name']) for opt in
Store.objects.filter(safecomponentprofile__isnull=False, name__isnull=False).values('name').distinct()), required=True, empty_label="Select a store" )
categories = ChoiceField( choices=((opt['name'], opt['name']) for opt in
SafeCategory.objects.filter(name__isnull=False).values('name').distinct()), required=True, empty_label="Select a category" )
| {
"content_hash": "fbc6f6c5ecb6604595761dabe1259de3",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 184,
"avg_line_length": 58.676923076923075,
"alnum_prop": 0.6604614577871002,
"repo_name": "inkasjasonk/rs",
"id": "7f461c15163dea98a7794afc7eccd0e0d32236b0",
"size": "3814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/base/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "190435"
},
{
"name": "Python",
"bytes": "43798"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "2438"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d, UnivariateSpline
NFILES = 5
FILENAME = "omw_" # prefix for files followed by filenumber
class Prior_class(object):
'''Prior class'''
def __init__(self,priorname,hyperparams):
'''Input:
priorname - array of keywords ["uniform, "gauss"] for each param
hyperparams - array of arrays [[min,max], [mu, variance],...] for each param
'''
self.priorname=priorname
self.hyperparams = hyperparams
if self.priorname == "nonstandard": #first hyperparam = column from file to be read, specify filename and number of files above
'''useful for sampling from non-standard discrete pdf e.g. Planck/WMAP chain'''
self.read_data(hyperparams[0])
self.inv_transform_spline()
self.pdf_spline()
def read_data(self,colnum):
'''Only for "nonstandard". Method to read discrete pdf for parameter from file
Input: colnum: column number to be read from file
'''
self.param=[]
for i in range(1,NFILES):
d = np.loadtxt(FILENAME+str(i)+".txt")
for j in range(len(d[:,colnum])):
self.param.append(d[:,colnum][j])
def inv_transform_spline(self):
'''Only for "nonstandard". Method to create inverse spline to discrete cumulative distribution function
to allow drawing random variables.
Warning: user should check that spline faithfully matches actual cdf.
'''
srt_param=np.sort(self.param)
cdf = np.array(range(len(self.param)))/float(len(self.param))
#create a spline
self.spline2_cdf = UnivariateSpline(cdf,srt_param,k=5)
def pdf_spline(self):
'''Only for "nonstandard". Method creates a spline to the normalised PDF for discrete parameter values.
Warning: user should check that spline faithfully matches actual pdf.
'''
hist,nbins = np.histogram(self.param,normed=True,bins=200)
self.spline2_pdf = interp1d(nbins[1:],hist)
def return_priorprob(self,value):
'''Input:
value - random variable
Returns:
probability of rv given the prior dist
'''
if self.priorname =="gamma":
x = 1./self.hyperparams[1]
return scipy.stats.gamma.pdf(value, self.hyperparams[0],scale=x)
elif self.priorname =="normal":
return scipy.stats.norm.pdf(value, loc = self.hyperparams[0],scale=self.hyperparams[1])
elif self.priorname =="uniform":
width = self.hyperparams[1] - self.hyperparams[0]
return scipy.stats.uniform.pdf(value, loc = self.hyperparams[0],scale=width)
elif self.priorname == "nonstandard":
return self.spline2_pdf(value)
def prior(self):
'''
Returns a random variable from the prior distribution
'''
np.random.seed()
if self.priorname =="gamma":
k=self.hyperparams[0]
scale = 1./self.hyperparams[1]
return float(np.random.gamma(k,scale))
elif self.priorname =="normal":
return float(np.random.normal(self.hyperparams[0],self.hyperparams[1],size=1))
elif self.priorname =="uniform":
return float(np.random.uniform(low=self.hyperparams[0],high=self.hyperparams[1],size=1))
elif self.priorname == "nonstandard":
uni_rvs = np.random.uniform()
return float(self.spline2_cdf(uni_rvs))
| {
"content_hash": "fb1c88f52c9cafb31f30e623baf57ef2",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 143,
"avg_line_length": 49.44047619047619,
"alnum_prop": 0.5294967493378281,
"repo_name": "EliseJ/astroABC",
"id": "a0cdc22d63c8168222c07305e7842bca240718f5",
"size": "4153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astroabc/priors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66042"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from permabots.models import TelegramUser, TelegramChat, TelegramMessage, TelegramUpdate, TelegramCallbackQuery
from datetime import datetime
import time
class UserSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
class Meta:
model = TelegramUser
fields = ('id', 'first_name', 'last_name', 'username')
class ChatSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
class Meta:
model = TelegramChat
fields = ('id', 'type', 'title', 'username', 'first_name', 'last_name')
class TimestampField(serializers.Field):
def to_internal_value(self, data):
return datetime.fromtimestamp(data)
def to_representation(self, value):
return int(time.mktime(value.timetuple()))
class MessageSerializer(serializers.HyperlinkedModelSerializer):
message_id = serializers.IntegerField()
# reserved word field 'from' changed dynamically
from_ = UserSerializer(many=False, source="from_user")
chat = ChatSerializer(many=False)
date = TimestampField()
def __init__(self, *args, **kwargs):
super(MessageSerializer, self).__init__(*args, **kwargs)
self.fields['from'] = self.fields['from_']
del self.fields['from_']
class Meta:
model = TelegramMessage
fields = ('message_id', 'from_', 'date', 'chat', 'text')
validators = []
class CallbackQuerySerializer(serializers.HyperlinkedModelSerializer):
from_ = UserSerializer(many=False, source="from_user")
message = MessageSerializer(many=False, required=False)
id = serializers.CharField(source="callback_id")
def __init__(self, *args, **kwargs):
super(CallbackQuerySerializer, self).__init__(*args, **kwargs)
self.fields['from'] = self.fields['from_']
del self.fields['from_']
class Meta:
model = TelegramCallbackQuery
fields = ('id', 'message', 'from_', 'data')
validators = []
class UpdateSerializer(serializers.HyperlinkedModelSerializer):
update_id = serializers.IntegerField()
message = MessageSerializer(many=False, required=False)
callback_query = CallbackQuerySerializer(many=False, required=False)
class Meta:
model = TelegramUpdate
fields = ('update_id', 'message', 'callback_query')
validators = []
class UserAPISerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = TelegramUser
fields = ('first_name', 'last_name', 'username') | {
"content_hash": "b5d1b40f3ae71c0198c1eed9eda20574",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 111,
"avg_line_length": 34.8421052631579,
"alnum_prop": 0.6623867069486404,
"repo_name": "jlmadurga/permabots",
"id": "970a5a46fafcae5bbc6c304e8016cbed6548e44f",
"size": "2648",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "permabots/serializers/telegram_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1236"
},
{
"name": "Python",
"bytes": "496583"
}
],
"symlink_target": ""
} |
import datetime as dt
from operator import attrgetter
from random import Random
from .base import TohuBaseGenerator, SeedGenerator
from .logging import logger
from .primitive_generators import as_tohu_generator, Constant, Date, Timestamp as TimestampPrimitive
from .spawn_mapping import SpawnMapping
from .utils import TohuDateError, TohuTimestampError, ensure_is_date_object, make_timestamp_formatter
__all__ = ['Apply', 'Cumsum', 'GetAttribute', 'Integer', 'Lookup', 'MultiCumsum', 'SelectMultiple', 'SelectOne', 'Tee', 'Timestamp']
class DerivedGenerator(TohuBaseGenerator):
"""
Base class for all derived generators
"""
def reset_input_generators(self, seed):
"""
Helper method which explicitly resets all input generators
to the derived generator. This should only ever be called
for testing or debugging.
"""
seed_generator = SeedGenerator().reset(seed=seed)
for gen in self.input_generators:
gen.reset(next(seed_generator))
try:
# In case `gen` is itself a derived generator,
# recursively reset its own input generators.
gen.reset_input_generators(next(seed_generator))
except AttributeError:
pass
class Apply(DerivedGenerator):
"""
Generator which applies a callable to a elements produced by a set of input generators.
"""
def __init__(self, callable, *arg_gens, max_value=None, **kwarg_gens):
super().__init__()
self.callable = callable
self.arg_gens_orig = arg_gens
self.kwarg_gens_orig = kwarg_gens
self.max_value = max_value
self.arg_gens = [g.clone() for g in self.arg_gens_orig]
self.kwarg_gens = {name: g.clone() for name, g in self.kwarg_gens_orig.items()}
self.input_generators = [g for g in self.arg_gens_orig] + [g for g in self.kwarg_gens_orig.values()]
self.constituent_generators = [g for g in self.arg_gens] + [g for g in self.kwarg_gens.values()]
for gen in self.constituent_generators:
gen.owner = self
def __next__(self):
args = [next(g) for g in self.arg_gens]
kwargs = {name: next(g) for name, g in self.kwarg_gens.items()}
return self.callable(*args, **kwargs)
def reset(self, seed):
super().reset(seed)
try:
self.callable.reset(next(self.seed_generator))
except AttributeError:
logger.debug(
f"Failed to reset callable in generator {self}. Assuming that "
"it does not contain any random generators that need resetting."
)
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_arg_gens_orig = [spawn_mapping[g] for g in self.arg_gens_orig]
new_kwarg_gens_orig = {name: spawn_mapping[g] for name, g in self.kwarg_gens_orig}
new_obj = Apply(self.callable, *new_arg_gens_orig, max_value=self.max_value, **new_kwarg_gens_orig)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
for g_self, g_other in zip(self.constituent_generators, other.constituent_generators):
g_self._set_random_state_from(g_other)
class GetAttribute(Apply):
def __init__(self, g, name):
self.g = as_tohu_generator(g) # no need to clone here because this happens in the superclass
self.name = as_tohu_generator(name)
def func(value, name):
# TODO: this is not very efficient if `name` is a constant
# string because we're re-creating the attrgetter every time.
# However, since `name` can in principle be a generator itself
# we need to keep the generality.
f = attrgetter(name)
try:
return f(value)
except AttributeError:
try:
return [f(x) for x in value]
except TypeError:
raise AttributeError(f"Could not extract attribute '{name}' from {value}")
except AttributeError:
raise AttributeError(f"Could not extract attribute '{name}' from items in sequence: {value}")
super().__init__(func, self.g, self.name)
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = GetAttribute(spawn_mapping[self.g], spawn_mapping[self.name])
new_obj._set_random_state_from(self)
return new_obj
def reset(self, seed):
super().reset(seed)
self.g.reset(seed)
return self
class Lookup(Apply):
"""
Generator which performs a lookup of elements produced by another generator.
"""
def __init__(self, key, mapping):
self.key = as_tohu_generator(key)
self.mapping = as_tohu_generator(mapping)
def f_lookup(key, mapping):
return mapping[key]
super().__init__(f_lookup, self.key, self.mapping)
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = Lookup(spawn_mapping[self.key], spawn_mapping[self.mapping])
new_obj._set_random_state_from(self)
return new_obj
class Integer(Apply):
def __init__(self, low, high):
self.low_gen = as_tohu_generator(low)
self.high_gen = as_tohu_generator(high)
self.randgen = Random()
super().__init__(self.randgen.randint, self.low_gen, self.high_gen)
self.max_value = self.high_gen.max_value
def reset(self, seed):
super().reset(seed)
self.randgen.seed(next(self.seed_generator))
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = Integer(spawn_mapping[self.low_gen], spawn_mapping[self.high_gen])
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
super()._set_random_state_from(other)
self.randgen.setstate(other.randgen.getstate())
class SelectOne(Apply):
"""
Generator which selects a single element from each sequence produced by another generator.
"""
def __init__(self, values, p=None):
self.values_gen = as_tohu_generator(values)
self.p_gen = as_tohu_generator(p)
self.randgen = Random()
def func(values, p):
return self.randgen.choices(values, weights=p)[0]
super().__init__(func, self.values_gen, self.p_gen)
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = SelectOne(spawn_mapping[self.values_gen], p=spawn_mapping[self.p_gen])
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
super()._set_random_state_from(other)
self.randgen.setstate(other.randgen.getstate())
def _spot_check_that_elements_produced_by_this_generator_have_attribute(self, name):
"""
Helper function to spot-check that the items produces by this generator have the attribute `name`.
"""
g_tmp = self.values_gen.spawn()
sample_element = next(g_tmp)[0]
try:
getattr(sample_element, name)
except AttributeError:
raise AttributeError(f"Items produced by {self} do not have the attribute '{name}'")
def __getattr__(self, name):
self._spot_check_that_elements_produced_by_this_generator_have_attribute(name)
return GetAttribute(self, name)
class SelectMultiple(Apply):
"""
Generator which selects multiple elements (without replacement)
from each sequence produced by another generator.
"""
def __init__(self, values, num):
self.values_gen = as_tohu_generator(values)
self.num_gen = as_tohu_generator(num)
self.randgen = Random()
func = self.randgen.sample
super().__init__(func, self.values_gen, k=self.num_gen)
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = SelectMultiple(spawn_mapping[self.values_gen], spawn_mapping[self.num_gen])
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
super()._set_random_state_from(other)
self.randgen.setstate(other.randgen.getstate())
def size(self):
def get_size(x):
return len(x)
g = Apply(get_size, self, max_value=self.num_gen.max_value)
return g
class Tee(Apply):
def __init__(self, g, num):
self.g_orig = g
self.num_gen = as_tohu_generator(num)
self.value_gens = [g.spawn() for _ in range(self.num_gen.max_value)]
if self.num_gen.max_value > 1000:
raise NotImplementedError(
"This Tee generator is intended to be used to produce small-ish output tuples. "
"The current implementation is not ideal for potentially large tuples, which"
"which is why we only allow sizes up to 1000 elements at the moment."
)
def make_tuple(num, *values):
return tuple(values[:num])
super().__init__(make_tuple, self.num_gen, *self.value_gens)
def reset(self, seed):
super().reset(seed)
# We need to explicitly reset the value generators because they
# are not technically input generators to this derived generator
# so they can't be externally reset.
for g in self.value_gens:
g.reset(next(self.seed_generator))
return self
def convert_to_date_object(date):
if isinstance(date, Constant):
return convert_to_date_object(date.value)
elif isinstance(date, Date) and date.start == date.end:
return date.start
else:
try:
return ensure_is_date_object(date)
except TohuDateError:
raise TohuTimestampError(f"Argument 'date' must represent some kind of constant date object. Got: {date}")
def get_start_generator(start, date):
if date is not None:
date = convert_to_date_object(date)
if start is None:
start_value = dt.datetime(date.year, date.month, date.day)
start_gen = Constant(start_value)
elif isinstance(start, str):
start_value = dt.datetime.strptime(start, "%Y-%m-%d %H:%M:%S")
start_gen = Constant(start_value)
elif isinstance(start, dt.datetime):
return Constant(start)
elif isinstance(start, Constant):
return get_start_generator(start.value, date)
elif isinstance(start, TimestampPrimitive):
# Create a new generator to strip any string formatting information in case it exists
start_without_formatting = TimestampPrimitive(start=start.start, end=start.end)
start.register_clone(start_without_formatting)
start_without_formatting.register_parent(start)
return start_without_formatting
else:
raise NotImplementedError()
return start_gen
def get_end_generator(end, date):
if date is not None:
date = convert_to_date_object(date)
if end is None:
end_value = dt.datetime(date.year, date.month, date.day, 23, 59, 59)
end_gen = Constant(end_value)
elif isinstance(end, str):
end_value = dt.datetime.strptime(end, "%Y-%m-%d %H:%M:%S")
end_gen = Constant(end_value)
elif isinstance(end, dt.datetime):
return Constant(end)
elif isinstance(end, Constant):
return get_start_generator(end.value, date)
elif isinstance(end, TimestampPrimitive):
# Create a new generator to strip any string formatting information in case it exists
end_without_formatting = TimestampPrimitive(start=end.start, end=end.end)
end.register_clone(end_without_formatting)
end_without_formatting.register_parent(end)
return end_without_formatting
else:
raise NotImplementedError()
return end_gen
def get_start_end_end_generator(start, end, date):
start_gen = get_start_generator(start, date)
end_gen = get_end_generator(end, date)
return start_gen, end_gen
def check_valid_inputs(start_gen, end_gen, date):
if date is not None:
date = convert_to_date_object(date)
if date is not None:
if isinstance(start_gen, TimestampPrimitive):
if not (start_gen.start.date() == date and start_gen.end.date() == date):
raise TohuTimestampError(
"If the 'date' argument is given, all possible 'start' timestamp values must lie on that given date."
)
if isinstance(end_gen, TimestampPrimitive):
if not (end_gen.start.date() == date and end_gen.end.date() == date):
raise TohuTimestampError(
"If the 'date' argument is given, all possible 'end' timestamp values must lie on that given date."
)
start_end_error_msg = (
"Latest possible value of 'start' generator must not be after "
"earliest possible value of 'end' generator."
)
if isinstance(start_gen, TimestampPrimitive) and isinstance(end_gen, TimestampPrimitive):
if start_gen.end > end_gen.start:
raise TohuTimestampError(start_end_error_msg)
elif isinstance(start_gen, TimestampPrimitive) and isinstance(end_gen, Constant):
if start_gen.end > end_gen.value:
raise TohuTimestampError(start_end_error_msg)
elif isinstance(start_gen, Constant) and isinstance(end_gen, TimestampPrimitive):
if start_gen.value > end_gen.start:
raise TohuTimestampError(start_end_error_msg)
elif isinstance(start_gen, Constant) and isinstance(end_gen, Constant):
if start_gen.value> end_gen.value:
raise TohuTimestampError("Start value must be before end value. Got: start={self.start}, end={self.end}")
class Timestamp(Apply):
def __init__(self, *, start=None, end=None, date=None, fmt=None, uppercase=None):
if start is None and end is None and date is None:
raise TohuTimestampError("Not all input arguments can be None.")
if start is not None and end is not None and date is not None:
raise TohuTimestampError("Arguments 'start', 'end', 'date' cannot all be provided.")
self.start_gen, self.end_gen = get_start_end_end_generator(start, end, date)
check_valid_inputs(self.start_gen, self.end_gen, date)
self.offset_randgen = Random()
def func(start, end):
interval = (end - start).total_seconds()
try:
offset = self.offset_randgen.randint(0, interval)
except ValueError:
raise TohuTimestampError(f"Start generator produced timestamp later than end generator: start={start}, end={end}")
ts = (start + dt.timedelta(seconds=offset))
return ts
super().__init__(func, self.start_gen, self.end_gen)
self.max_value = self.end_gen.max_value
self.fmt = fmt
self.uppercase = uppercase
self._maybe_format_timestamp = make_timestamp_formatter(self.fmt, self.uppercase)
def __next__(self):
ts = super().__next__()
return self._maybe_format_timestamp(ts)
def reset(self, seed):
super().reset(seed)
self.offset_randgen.seed(next(self.seed_generator))
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = Timestamp(start=spawn_mapping[self.start_gen], end=spawn_mapping[self.end_gen], fmt=self.fmt, uppercase=self.uppercase)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
super()._set_random_state_from(other)
self.offset_randgen.setstate(other.offset_randgen.getstate())
def strftime(self, fmt='%Y-%m-%d %H:%M:%S', uppercase=False):
g = Timestamp(start=self.start_gen, end=self.end_gen, fmt=fmt, uppercase=uppercase)
self.register_clone(g)
g.register_parent(self)
return g
class Cumsum(DerivedGenerator):
def __init__(self, g, *, start_with_zero=False):
super().__init__()
self.g_orig = g
self.g_internal = g.clone()
self.g_internal.owner = self
self.start_with_zero = start_with_zero
self.input_generators = [self.g_orig]
self.constituent_generators = [self.g_internal]
self.reset()
def __next__(self):
retval = self.value
self.value += next(self.g_internal)
return retval
def reset(self, seed=None):
super().reset(seed)
if self.start_with_zero:
self.value = 0
else:
# Note: for this to work correctly the input generator `g`
# needs to be reset _before_ this one.
self.value = next(self.g_internal)
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = Cumsum(spawn_mapping[self.g_orig], start_with_zero=self.start_with_zero)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
super()._set_random_state_from(other)
self.value = other.value
self.g_internal._set_random_state_from(other.g_internal)
class MultiCumsum(DerivedGenerator):
"""
TODO: document me!
"""
def __init__(self, g, attr_name, g_amount):
"""
TODO: document me!
"""
super().__init__()
self.g_amount_orig = as_tohu_generator(g_amount)
self.g_amount_internal = g_amount.clone()
self.g_amount_internal.owner = self
self.g_orig = g
self.g_internal = g.clone()
self.g_internal.owner = self
self.attr_name = attr_name
self.input_generators = [self.g_amount_orig, self.g_orig]
self.constituent_generators = [self.g_amount_internal, self.g_internal]
self.cur_values = {}
def __next__(self):
# TODO: more meaningful variable names!
x = next(self.g_internal)
try:
cur_val = self.cur_values[x]
except KeyError:
cur_val = getattr(x, self.attr_name)
self.cur_values[x] = cur_val
self.cur_values[x] += next(self.g_amount_internal)
return cur_val
def reset(self, seed=None):
super().reset(seed)
self.cur_values = {}
return self
def spawn(self, spawn_mapping=None):
spawn_mapping = spawn_mapping or SpawnMapping()
new_obj = MultiCumsum(spawn_mapping[self.g_orig], self.attr_name, spawn_mapping[self.g_amount_orig])
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.g_amount_internal._set_random_state_from(other.g_amount_internal)
self.g_internal._set_random_state_from(other.g_internal)
self.cur_values = other.cur_values.copy() ### XXX TODO: can we simply copy these over in all cases?! | {
"content_hash": "73a438d5e1cb4e348329f45fe138d8f4",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 137,
"avg_line_length": 36.137291280148425,
"alnum_prop": 0.6247047951535065,
"repo_name": "maxalbert/tohu",
"id": "208c0d93d46c8b6596283d30cdd31a0fd80db364",
"size": "19479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tohu/v6/derived_generators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "244324"
},
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "568361"
}
],
"symlink_target": ""
} |
import socket as socketlib
from _udt import *
import _udt
class socket(_udt.socket):
def connect(self, addr):
conn_addr = self._get_addr(addr)
return _udt.socket.connect(self, conn_addr)
def bind(self, addr):
bind_addr = self._get_addr(addr)
return _udt.socket.bind(self, bind_addr)
def _get_addr(self, (host, port)):
family, socktype, proto, name, addr = socketlib.getaddrinfo(
host,
port,
self.family,
0,
self.proto,
0
)[0]
return addr
class epoll(_udt.epoll):
def __init__(self):
_udt.epoll.__init__(self)
self._released = False
def release(self):
if not self._released:
_udt.epoll.release(self)
def add_usock(self, s, events):
# according to the docs, adding flags is not supported
rv = _udt.epoll.add_usock(self, s, events)
return rv
def add_ssock(self, s, events):
rv = _udt.epoll.add_ssock(self, s, events)
return rv
def remove_usock(self, s, events):
rv = _udt.epoll.remove_usock(self, s, events)
return rv
def remove_ssock(self, s, events):
rv = _udt.epoll.remove_ssock(self, s, events)
return rv
| {
"content_hash": "078832d59938330a352458bb3b0b0c7e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 68,
"avg_line_length": 26.551020408163264,
"alnum_prop": 0.5541890853189854,
"repo_name": "beano/udt_py",
"id": "e6dff33b0395afeb4c167f25a3644b9a9fe270cc",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "35679"
},
{
"name": "Python",
"bytes": "10536"
}
],
"symlink_target": ""
} |
import sys
import logging
import argparse
import getpass
from drf_client.connection import Api as RestApi, DEFAULT_HEADERS
from drf_client.exceptions import HttpClientError
LOG = logging.getLogger(__name__)
class BaseMain(object):
parser = None
args = None
api = None
options = {
'DOMAIN': None,
'API_PREFIX': 'api/v1',
'TOKEN_TYPE': 'jwt',
'TOKEN_FORMAT': 'JWT {token}',
'USERNAME_KEY': 'username',
'LOGIN': 'auth/login/',
'LOGOUT': 'auth/logout/',
'USE_DASHES': False,
}
logging_level = logging.INFO
def __init__(self):
"""
Initialize Logging configuration
Initialize argument parsing
Process any extra arguments
Only hard codes one required argument: --user
Additional arguments can be configured by overwriting the add_extra_args() method
Logging configuration can be changed by overwritting the config_logging() method
"""
self.parser = argparse.ArgumentParser(description=__doc__)
self.parser.add_argument(
'-u', '--user', dest='username', type=str, required=True,
help='Username used for login'
)
self.parser.add_argument(
'--server', dest='server', type=str, required=True,
help='Server Domain Name to use'
)
self.add_extra_args()
self.args = self.parser.parse_args()
self.config_logging()
def _critical_exit(self, msg):
LOG.error(msg)
sys.exit(1)
def main(self):
"""
Main function to call to initiate execution.
1. Get domain name and use to instantiate Api object
2. Call before_login to allow for work before logging in
3. Logging into the server
4. Call after_loging to do actual work with server data
"""
self.domain = self.get_domain()
self.api = RestApi(self.get_options())
self.before_login()
ok = self.login()
if ok:
self.after_login()
# Following functions can be overwritten if needed
# ================================================
def get_options(self):
options = self.options
options['DOMAIN'] = self.domain
return options
def config_logging(self):
"""
Overwrite to change the way the logging package is configured
:return: Nothing
"""
logging.basicConfig(level=self.logging_level,
format='[%(asctime)-15s] %(levelname)-6s %(message)s',
datefmt='%d/%b/%Y %H:%M:%S')
def add_extra_args(self):
"""
Overwrite to change the way extra arguments are added to the args parser
:return: Nothing
"""
pass
def get_domain(self) -> str:
"""
Figure out server domain URL based on --server and --customer args
"""
if 'https://' not in self.args.server:
return f'https://{self.args.server}'
return self.args.server
def login(self) -> bool:
"""
Get password from user and login
"""
password = getpass.getpass()
ok = self.api.login(username=self.args.username, password=password)
if ok:
LOG.info('Welcome {0}'.format(self.args.username))
return ok
def before_login(self):
"""
Overwrite to do work after parsing, but before logging in to the server
This is a good place to do additional custom argument checks
:return: Nothing
"""
pass
def after_login(self):
"""
This function MUST be overwritten to do actual work after logging into the Server
:return: Nothing
"""
LOG.warning('No actual work done')
| {
"content_hash": "5bb94fb2300afdb8e69b7cbf9de69579",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 89,
"avg_line_length": 30.38095238095238,
"alnum_prop": 0.5715778474399164,
"repo_name": "dkarchmer/django-rest-framework-client",
"id": "bc9af33e00b604d54a13cad007e7788d2e2e6a12",
"size": "3828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drf_client/helpers/base_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26778"
}
],
"symlink_target": ""
} |
from rip.schema.base_field import FieldTypes
from rip.schema.integer_field import IntegerField
class IdField(IntegerField):
def __init__(self, entity_attribute='id'):
super(IdField, self).__init__(field_type=FieldTypes.READONLY,
required=False,
nullable=False,
entity_attribute=entity_attribute
)
| {
"content_hash": "4478c92d7b0759aea3f4ca44d7a8a17e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 71,
"avg_line_length": 42.09090909090909,
"alnum_prop": 0.509719222462203,
"repo_name": "Aplopio/rip",
"id": "0cebc72982f2adcd30d21c2d241e20a3d76e19fb",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rip/schema/id_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1455"
},
{
"name": "Python",
"bytes": "409017"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="exponentformat", parent_name="carpet.baxis", **kwargs
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]),
**kwargs
)
| {
"content_hash": "841514c363f45573b3f107841dd05076",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 37.8,
"alnum_prop": 0.5873015873015873,
"repo_name": "plotly/python-api",
"id": "48096e5970f9bb6efb36f727f9ca27822d939c7f",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/baxis/_exponentformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s-1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s-%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| {
"content_hash": "5f4872ed7d001e87017142e29328cabb",
"timestamp": "",
"source": "github",
"line_count": 952,
"max_line_length": 82,
"avg_line_length": 40.00525210084034,
"alnum_prop": 0.4843376657476697,
"repo_name": "cjerdonek/pip",
"id": "5a161409c1e151827a97440de9d6fe12975be52d",
"size": "38268",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "pip/_vendor/distlib/wheel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2081484"
},
{
"name": "Shell",
"bytes": "7929"
}
],
"symlink_target": ""
} |
Subsets and Splits