repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
DenXX/ufindit | querydifficulty/urls.py | 1 | 1740 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
from httpproxy.views import HttpProxy
from ufindit.views import RulesView
admin.autodiscover()
urlpatterns = patterns('',
# Query difficulty URL
url(r'(?P<task_id>[0-9]+)$',
'querydifficulty.views.submit_query_difficulty',
name='submit_query_difficulty'),
url(r'(?P<task_id>[0-9]+)/u$',
'querydifficulty.views.submit_url_problem', name='submit_url_problem'),
url(r'(?P<game_id>[0-9]+)/survey$', 'querydifficulty.views.submit_survey_view',
name='submit_survey_view'),
# TODO: Ugly, but works
url(r'^game/(?P<game_id>[0-9]+)/rules/1/$', RulesView.as_view(
template_name='rules1.html'), name='rules1'),
url(r'^game/(?P<game_id>[0-9]+)/rules/2/$', RulesView.as_view(
template_name='rules2.html'), name='rules2'),
url(r'^game/(?P<game_id>[0-9]+)/rules/3/$', RulesView.as_view(
template_name='rules3.html'), name='rules3'),
url(r'^game/(?P<game_id>[0-9]+)/rules/4/$', RulesView.as_view(
template_name='rules4.html'), name='rules4'),
url(r'^game/(?P<game_id>[0-9]+)/rules/5/$', RulesView.as_view(
template_name='rules5.html'), name='rules5'),
url(r'^game/(?P<game_id>[0-9]+)/rules/6/$', RulesView.as_view(
template_name='rules6.html'), name='rules6'),
# Analytics
url(r'^qud/$', 'querydifficulty.views.query_url_problems_view',
name='query_difficulty_admin'),
url(r'^qud/(?P<game_id>[0-9]+)/$', 'querydifficulty.views.query_url_problems_view',
name='query_difficulty_game_admin'),
)
| gpl-2.0 | 3,443,864,351,986,525,700 | 41.439024 | 87 | 0.641379 | false |
jbinfo/mocodo | main/entity-tests.py | 1 | 3340 | #!/usr/bin/env python
# encoding: utf-8
import unittest
from entity import *
class EntityTest(unittest.TestCase):
def testDefault(self):
entities = [
Entity("PARTICIPANT: numero, nom, adresse"),
Entity("PARTICIPANT:numero,nom,adresse"),
Entity(" PARTICIPANT: numero, nom, adresse "),
Entity("PARTICIPANT :numero ,nom ,adresse"),
Entity("PARTICIPANT: _numero, nom, adresse"),
]
for e in entities:
self.assertEqual(e.name,"PARTICIPANT")
self.assertEqual(e.cartouche,"PARTICIPANT")
self.assertEqual([a.label for a in e.attributes], ["numero","nom","adresse"])
self.assertEqual([a.getCategory() for a in e.attributes], ["strong","simple","simple"])
def testAttributeTypes(self):
e = Entity("PARTICIPANT: numero [type1], nom [type2] , adresse[type3]")
self.assertEqual([a.label for a in e.attributes], ["numero","nom","adresse"])
self.assertEqual([a.attributeType for a in e.attributes], ["type1","type2","type3"])
e = Entity("PARTICIPANT: numero [type a,b,c], nom [type2], adresse [type3]")
self.assertEqual([a.attributeType for a in e.attributes], ["type a,b,c","type2","type3"])
e = Entity("PARTICIPANT: numero [], nom, adresse [type3]")
self.assertEqual([a.attributeType for a in e.attributes], ["",None,"type3"])
e = Entity("PARTICIPANT: numero [, nom, adresse")
self.assertEqual([a.attributeType for a in e.attributes], [None,None,None])
def testNumberedEntity(self):
e = Entity("PARTICIPANT5: numero, nom, adresse")
self.assertEqual(e.name,"PARTICIPANT5")
self.assertEqual(e.cartouche,"PARTICIPANT")
def testBlank(self):
e = Entity("MOT-CLEF: mot-clef, ,")
self.assertEqual([a.label for a in e.attributes], ["mot-clef","",""])
self.assertEqual([a.getCategory() for a in e.attributes], ["strong","phantom","phantom"])
def testAllBlank(self):
e = Entity("BLANK: , ,")
self.assertEqual([a.label for a in e.attributes], ["","",""])
self.assertEqual([a.getCategory() for a in e.attributes], ["phantom","phantom","phantom"])
def testWeak(self):
e = Entity("LIVRE: -Num. exemplaire, État du livre, Date d'achat")
self.assertEqual([a.label for a in e.attributes], ["Num. exemplaire","État du livre", "Date d'achat"])
self.assertEqual([a.getCategory() for a in e.attributes], ["weak","simple","simple"])
def testWeakWithOtherMinus(self):
e = Entity("LIVRE: -Num.-exemplaire, État-du-livre, Date-d'achat")
self.assertEqual([a.label for a in e.attributes], ["Num.-exemplaire","État-du-livre", "Date-d'achat"])
self.assertEqual([a.getCategory() for a in e.attributes], ["weak","simple","simple"])
def testMultipleStrongIdentifier(self):
e = Entity("POSITION: _abscisse, _ordonnée")
self.assertEqual([a.label for a in e.attributes], ["abscisse","ordonnée"])
self.assertEqual([a.getCategory() for a in e.attributes], ["strong","strong"])
e = Entity("POSITION: abscisse, _ordonnée")
self.assertEqual([a.label for a in e.attributes], ["abscisse","ordonnée"])
self.assertEqual([a.getCategory() for a in e.attributes], ["strong","strong"])
def testMultipleWeakIdentifier(self):
e = Entity("POSITION: -abscisse, -ordonnée")
self.assertEqual([a.label for a in e.attributes], ["abscisse","ordonnée"])
self.assertEqual([a.getCategory() for a in e.attributes], ["weak","weak"])
if __name__ == '__main__':
unittest.main() | gpl-3.0 | 5,752,707,267,186,700,000 | 44.013514 | 104 | 0.685285 | false |
jldbc/pybaseball | pybaseball/datahelpers/postprocessing.py | 1 | 7149 | import re
from datetime import datetime
from typing import Any, List, Union, Optional
import attr
import numpy as np
import pandas as pd
null_regexes = [
re.compile(r'^\s*$'),
re.compile(r'^null$', re.RegexFlag.IGNORECASE)
]
date_formats = [
# Standard statcast format
(re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$'), '%Y-%m-%d'),
# Just in case (https://github.com/jldbc/pybaseball/issues/104)
(re.compile(r'^\d{4}-\d{1,2}-\d{1,2}T\d{2}:\d{2}:\d{2}.\d{1,6}Z$'), '%Y-%m-%dT%H:%M:%S.%fZ'),
]
def try_parse_dataframe(
data: pd.DataFrame,
parse_numerics: bool = True,
null_replacement: Union[str, int, float, datetime] = np.nan,
known_percentages: Optional[List[str]] = None
) -> pd.DataFrame:
data_copy = data.copy()
if parse_numerics:
data_copy = coalesce_nulls(data_copy, null_replacement)
data_copy = data_copy.apply(
pd.to_numeric,
errors='ignore',
downcast='signed'
).convert_dtypes(convert_string=False)
string_columns = [
dtype_tuple[0] for dtype_tuple in data_copy.dtypes.items() if str(dtype_tuple[1]) in ["object", "string"]
]
for column in string_columns:
# Only check the first value of the column and test that;
# this is faster than blindly trying to convert entire columns
first_value_index = data_copy[column].first_valid_index()
if first_value_index is None:
# All nulls
continue
first_value = data_copy[column].loc[first_value_index]
if str(first_value).endswith('%') or column.endswith('%') or \
(known_percentages is not None and column in known_percentages):
data_copy[column] = data_copy[column].astype(str).str.replace("%", "").astype(float) / 100.0
else:
# Doing it this way as just applying pd.to_datetime on
# the whole dataframe just tries to gobble up ints/floats as timestamps
for date_regex, date_format in date_formats:
if isinstance(first_value, str) and date_regex.match(first_value):
data_copy[column] = data_copy[column].apply(pd.to_datetime, errors='ignore', format=date_format)
data_copy[column] = data_copy[column].convert_dtypes(convert_string=False)
break
return data_copy
# pylint: disable=too-many-return-statements
def try_parse(
value: Union[None, str, int, datetime, float],
column_name: str,
null_replacement: Union[str, int, float, datetime] = np.nan,
known_percentages: Optional[List[str]] = None
) -> Union[str, int, float, datetime]:
if value is None:
return null_replacement
if not isinstance(value, str):
return value
for regex in null_regexes:
if regex.match(value):
return null_replacement
# Is it a date?
for date_regex, date_format in date_formats:
if date_regex.match(value):
try:
return datetime.strptime(value, date_format)
except: # pylint: disable=bare-except
pass
# Is it an float or an int (including percetages)?
try:
percentage = (
value.endswith('%') or column_name.endswith('%') or \
(known_percentages is not None and column_name in known_percentages)
)
if percentage:
return try_parse_percentage(value)
if '.' in value:
return float(value)
return int(value)
except: # pylint: disable=bare-except
pass
return value
def try_parse_percentage(value: str) -> float:
return float(value.strip(' %')) / 100.0
def coalesce_nulls(data: pd.DataFrame, value: Union[str, int, float, datetime] = np.nan) -> pd.DataFrame:
# Fill missing values with NaN
for regex in null_regexes:
data.replace(regex.pattern, value, regex=True, inplace=True)
return data
def columns_except(data: pd.DataFrame, columns: List[str]) -> List[str]:
return list(np.setdiff1d(data.columns, columns))
def convert_numeric(data: pd.DataFrame, numeric_columns: List[str]) -> pd.DataFrame:
# data.loc[data[numeric_cols] == ''] = None
# data[numeric_cols] = data[numeric_cols].astype(float)
# Ideally we'd do it the pandas way ^, but it's barfing when some columns have no data
for col in numeric_columns:
data[col] = data[col].astype(float)
return data
def convert_percentages(data: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
# convert percent strings to float values
for col in columns:
# Skip if column is all NA (happens for some of the more obscure stats + in older seasons)
if col in data.columns and data[col].count() > 0:
data[col] = data[col].str.strip(' %')
data[col] = data[col].astype(float) / 100.0
else:
# print(col)
pass
return data
def compute_pa(bat_df: pd.DataFrame) -> pd.Series:
"""
Computes PA, using AB, HBP, SH, and SF. If any of those columns are null,
they're filled with 0
:param bat_df:
:return:
"""
plate_appearances = bat_df.loc[:, "AB"].fillna(0)
for stat in ["BB", "HBP", "SH", "SF"]:
plate_appearances += bat_df.loc[:, stat].fillna(0)
return plate_appearances.astype(int)
def augment_lahman_batting(bat_df: pd.DataFrame) -> pd.DataFrame:
"""
augments the Lahman batting data frame, with PA, X1B (singles), and TB.
:param bat_df:
:return:
"""
plate_appearances = compute_pa(bat_df)
singles = (
bat_df.loc[:, "H"]
- bat_df.loc[:, "2B"]
- bat_df.loc[:, "3B"]
- bat_df.loc[:, "HR"]
)
total_bases = (
bat_df.loc[:, "HR"] * 4
+ bat_df.loc[:, "3B"] * 3
+ bat_df.loc[:, "2B"] * 2
+ singles
)
return bat_df.assign(
PA=plate_appearances.astype(int),
X1B=singles.astype(int),
TB=total_bases.astype(int)
).rename({"X1B": "1B"}, axis=1)
def augment_lahman_pitching(stats_df: pd.DataFrame) -> pd.DataFrame:
"""
augments the Lahman pitching data frame. currently a noop.
:param stats_df:
:return:
"""
return stats_df
def aggregate_by_season(stats_df: pd.DataFrame) -> pd.DataFrame:
return stats_df.groupby(["playerID", "yearID"]).sum().reset_index()
# pylint: disable=unused-argument
def check_is_zero_one(instance: Any, attribute: attr.Attribute, value: Union[int, float]) -> None:
if value not in [0, 1]:
raise ValueError(f"{attribute} must be either 0 or 1, not {value}")
# pylint: disable=unused-argument
def check_greater_zero(instance: Any, attribute: attr.Attribute, value: Union[int, float]) -> None:
if value <= 0:
raise ValueError(
f"{attribute} must be greater than zero, not {value}"
)
# pylint: disable=unused-argument
def check_between_zero_one(instance: Any, attribute: attr.Attribute, value: Union[int, float]) -> None:
if not 0 <= value <= 1:
raise ValueError(
f"{attribute} must be between zero and one, not {value}"
)
| mit | 1,013,509,468,966,282,900 | 31.202703 | 116 | 0.608896 | false |
DavidLanderosAlcala/Native-Debugging-Tools | NativeDebuggingTools/desktop/simpleperf/galaxy_profiler.py | 1 | 8172 | #
# MIT License
#
# Copyright (c) 2017 David Landeros [[email protected]]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import ConfigParser
import socket
import time
import subprocess
import os.path
import time
import sys
import os
from shutil import copyfile
#
# do not touch these variables
#
ndt_path = ""
#
# it defines utils functions
#
def enableProfiling():
ensureAdbIsReady()
command = "adb shell setprop security.perf_harden 0"
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
def ensureAdbIsReady():
command = "adb start-server"
subprocess.Popen(command, stdout=subprocess.PIPE).wait();
def adbPullFile(src, dst) :
ensureAdbIsReady()
command = "adb pull " + src + " " + dst
proc = subprocess.Popen(command)
proc.wait()
def adbPushFile(src, dst) :
ensureAdbIsReady()
command = "adb push " + src + " " + dst
proc = subprocess.Popen(command)
proc.wait()
def adbIsDeviceConnected() :
ensureAdbIsReady()
command = "adb devices"
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
str = proc.stdout.readline()
str = proc.stdout.readline()
if "device" in str:
return True
return False
def system(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
def adbPidOf(packagename):
command = "adb shell ps | grep " + packagename
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
str = proc.stdout.readline()
if str == "":
return None
return filter(None, str.split(" "))[1]
def adbForward(host_port, device_port):
ensureAdbIsReady()
command = "adb forward tcp:" + str(host_port) + " tcp:" + str(device_port)
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
def adbDelete(file):
ensureAdbIsReady()
command = "adb shell rm " + file
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
def adbFileExists(filename):
command = "adb shell ls " + filename
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
str = proc.stdout.readline()
if "No such file" in str or len(str) == 0:
return False
return True
def findNdtPath():
ndt_path = "";
if adbFileExists("/data/data/com.android.ndt/lib/gdbserver.so"):
ndt_path = "/data/data/com.android.ndt/lib/"
elif adbFileExists("/data/app/com.android.ndt-1/lib/gdbserver.so"):
ndt_path = "/data/app/com.android.ndt-1/lib/"
elif adbFileExists("/data/app/com.android.ndt-2/lib/gdbserver.so"):
ndt_path = "/data/app/com.android.ndt-2/lib/"
elif adbFileExists("/data/app/com.android.ndt-3/lib/gdbserver.so"):
ndt_path = "/data/app/com.android.ndt-3/lib/"
else:
ndt_path = "";
return ndt_path;
#
# The program starts here
#
config = ConfigParser.RawConfigParser()
config.read('galaxy_profiler.config')
print "\r\n [+] Current configuration:"
print " Package Name : " + config.get("application","package_name")
print " Symbols File : " + os.path.expandvars(config.get("application","shared_lib_with_symbols")) + "\r\n"
print " [+] Checking whether device is connected "
if adbIsDeviceConnected() == False:
print " Please connect the device and run the program to be profiled"
exit(1)
print " [+] Checking whether application is running "
pid = adbPidOf(config.get("application","package_name"))
if pid == None:
print " Application is not running"
exit(1)
print " [+] Checking whether symbols file exists "
aux = os.path.expandvars(config.get("application","shared_lib_with_symbols")).split("/")
sharedobject = aux[len(aux)-1]
if not os.path.exists(os.path.expandvars(config.get("application","shared_lib_with_symbols"))):
print "\r\n [ERROR] No such file: " + os.path.expandvars(config.get("application","shared_lib_with_symbols"))
print " Did you edited galaxy_profiler.config ?"
exit(1)
adbForward(3435,3435)
enableProfiling();
if adbFileExists("/sdcard/perf.data"):
print " [+] Deleting existing record"
adbDelete("/sdcard/perf.data")
if adbFileExists("/sdcard/profiling_finished"):
adbDelete("/sdcard/profiling_finished")
print " [+] Checking whether native debugging tools are installed"
ndt_path = findNdtPath()
if len(ndt_path) is 0:
print " Installing Native Debugging tools..."
system("adb install -r -d ../../device/native-debugging-tools.apk")
ndt_path = findNdtPath()
if len(ndt_path) is 0:
print " Installation failed"
sys.exit(0);
print " Installation found : " + ndt_path
s = None
try :
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 3435))
data = s.recv(1024)
if "in-app-remote-shell" not in data:
sys.exit(0)
s.send( ndt_path + "libSimpleperf.so record -o /sdcard/perf.data " + config.get("application","record_options") + " -p {PID} --symfs . && echo ok > /sdcard/profiling_finished")
time.sleep(1)
s.close()
except:
print "\r\n Error: connection failed (127.0.0.1:3435)"
print " Does your application implement AndroidRemoteExec ?"
print " if it does, try again or try restarting the app/device"
exit(1)
print " [+] Profiling in progress ... "
if adbFileExists("/sdcard/profiling_finished"):
adbDelete("/sdcard/profiling_finished")
profilingEnded = False
while not profilingEnded:
if adbFileExists("/sdcard/profiling_finished"):
profilingEnded = True
if not profilingEnded:
time.sleep(1)
if adbFileExists("/sdcard/profiling_finished"):
adbDelete("/sdcard/profiling_finished")
print " [+] Profiling finished, Collecting data"
adbPullFile("/sdcard/perf.data", "perf.data")
# Determine installation folder name
dso1_path = ""
if(adbFileExists("/data/app/" + config.get("application","package_name") + "-1/lib/arm/")):
dso1_path = "/data/app/" + config.get("application","package_name") + "-1/lib/arm/"
if(adbFileExists("/data/app/" + config.get("application","package_name") + "-2/lib/arm/")):
dso1_path = "/data/app/" + config.get("application","package_name") + "-2/lib/arm/"
if(adbFileExists("/data/app/" + config.get("application","package_name") + "-3/lib/arm/")):
dso1_path = "/data/app/" + config.get("application","package_name") + "-3/lib/arm/"
# Create binary cache folder and copy the shared object
if not os.path.exists("./binary_cache" + dso1_path):
os.makedirs("./binary_cache" + dso1_path)
copyfile(os.path.expandvars(config.get("application", "shared_lib_with_symbols")), "./binary_cache" + dso1_path + sharedobject)
print " [+] Generating report..."
commands_file = open("generate-report.bat", "w")
commands_file.write("report.py -g --symfs ./binary_cache/ --dsos " + dso1_path + sharedobject + "\r\n")
commands_file.close()
system("generate-report.bat")
os.remove("generate-report.bat")
# libSimpleperf.so record -p 11484 -g -e cpu-cycles:u -f 3000 --duration 60 --dump-symbols -m 1024 --symfs .
# libSimpleperf.so record -p 11484 -g -e cpu-cycles:u -f 800 --duration 60 --dump-symbols --symfs . | mit | 6,311,438,534,926,842,000 | 34.228448 | 180 | 0.685879 | false |
freelawproject/recap-server | uploads/tests/test_document_manager.py | 1 | 3819 | import unittest
from datetime import datetime
from uploads.models import Document
from uploads.DocketXML import DocketXML
from uploads import DocumentManager
#TODO: This doesn't test the entirety of documentmanager
class TestDocumentManager(unittest.TestCase):
def setUp(self):
self.doc_xml = DocketXML('nysd', '1234')
self.doc_xml.add_document('1', '2')
def tearDown(self):
Document.objects.all().delete()
def test_update_local_db_basic(self):
DocumentManager.update_local_db(self.doc_xml)
created_doc = Document.objects.all()[0]
self.assertEquals(1, Document.objects.count())
self.assertEquals('nysd', created_doc.court)
self.assertEquals(1234, created_doc.casenum)
self.assertEquals(1, created_doc.docnum)
self.assertEquals(2, created_doc.subdocnum)
def test_update_local_db_updates_existing(self):
d1 = Document(court='nysd', casenum='1234', docnum='1', subdocnum='2')
d1.save()
self.assertEquals(1, Document.objects.count())
doc_meta = self.doc_xml.get_document_metadict('1', '2')
doc_meta['pacer_doc_id'] = '12'
DocumentManager.update_local_db(self.doc_xml)
created_doc = Document.objects.all()[0]
self.assertEquals(1, Document.objects.count())
self.assertEquals('12', created_doc.docid)
def test_update_local_db_doesnt_overwrite_local(self):
d1 = Document(court='nysd', casenum='1234',
docnum='1', subdocnum='2',
docid='120')
d1.save()
self.assertEquals(1, Document.objects.count())
# This document doesn't have docid, but we shouldn't overwrite
DocumentManager.update_local_db(self.doc_xml)
created_doc = Document.objects.all()[0]
self.assertEquals(1, Document.objects.count())
self.assertEquals('120', created_doc.docid)
def test_update_local_db_translates_opt_fields_correctly(self):
i_dict = {'doc_num': '2', 'attachment_num': '3',
'pacer_doc_id': '789',
'pacer_de_seq_num': '20',
'pacer_dm_id': '12',
'upload_date': '2007-12-25',
'free_import': 1,
'sha1': 'hash'}
self.doc_xml.remove_document('1', '2')
self.doc_xml.add_document('2', '3', i_dict)
DocumentManager.update_local_db(self.doc_xml)
self.assertEquals(1, Document.objects.count())
created_doc = Document.objects.all()[0]
self.assertEquals(int(i_dict['doc_num']), created_doc.docnum)
self.assertEquals(int(i_dict['attachment_num']), created_doc.subdocnum)
self.assertEquals(i_dict['pacer_doc_id'], created_doc.docid)
self.assertEquals(int(i_dict['pacer_de_seq_num']), created_doc.de_seq_num)
self.assertEquals(int(i_dict['pacer_dm_id']), created_doc.dm_id)
self.assertEquals(i_dict['sha1'], created_doc.sha1)
expected_upload_date = datetime.strptime(i_dict['upload_date'],
"%Y-%m-%d")
self.assertEquals(expected_upload_date, created_doc.lastdate)
self.assertEquals(int(i_dict['free_import']), created_doc.free_import)
def test_update_local_db_ignore_available(self):
doc_meta = self.doc_xml.get_document_metadict('1', '2')
doc_meta['available'] = '1'
DocumentManager.update_local_db(self.doc_xml)
created_doc = Document.objects.all()[0]
self.assertEquals(0, created_doc.available)
DocumentManager.update_local_db(self.doc_xml, ignore_available=0)
created_doc = Document.objects.all()[0]
self.assertEquals(1, created_doc.available)
| gpl-3.0 | 2,137,582,592,725,417,700 | 41.433333 | 82 | 0.60906 | false |
ifsmirnov/jngen | build.py | 1 | 2593 | #!/usr/bin/python3
import re, os
HEADER_REGEX = re.compile('#include "(.*)"')
# This list may contain not all headers directly, but each jngen header
# must be among the dependencies of some file from here.
LIBRARY_HEADERS = [
"array.h",
"random.h",
"common.h",
"tree.h",
"graph.h",
"geometry.h",
"math_jngen.h",
"rnda.h",
"rnds.h",
"testcases.h",
"options.h",
"printers.h",
"repr.h",
"query_builder.h",
"drawer/drawer.h",
"suites/suites.h",
]
def posix_path_to_native(posix_path):
return os.path.join(*posix_path.split('/'))
def extract_header(line):
res = HEADER_REGEX.match(line)
if res:
return res.groups()[0]
def extract_direct_deps(posix_filename):
dir = os.path.dirname(posix_filename) # check explicitly on win
res = set()
with open(posix_path_to_native(posix_filename)) as fin:
for line in fin.readlines():
t = extract_header(line)
if t and not t.endswith("_inl.h"):
res.add(dir + '/' + t if dir else t)
return res
deps = {}
def extract_deps(posix_filename):
posix_filename = os.path.normpath(posix_filename)
if posix_filename in deps:
return deps[posix_filename]
deps[posix_filename] = set((posix_filename,))
for dep in extract_direct_deps(posix_filename):
deps[posix_filename].update(extract_deps(dep))
return deps[posix_filename]
def write_file(filename, stream):
dir = os.path.dirname(filename) # check explicitly on win
with open(posix_path_to_native(filename)) as fin:
for line in fin.readlines():
include_or_not = HEADER_REGEX.match(line)
if include_or_not:
if include_or_not.groups()[0].endswith("_inl.h"):
t = include_or_not.groups()[0]
write_file(dir + '/' + t if dir else t, stream)
elif '#pragma once' not in line:
stream.write(line)
headers = set()
for h in LIBRARY_HEADERS:
headers.update(extract_deps(h))
headers = ['header.h'] + sorted(headers)
deps['footer.h'] = set(headers + ['footer.h'])
headers += ['footer.h']
deps['header.h'] = set(('header.h',))
headers_in_order = []
while headers:
for h in headers:
if len(deps[h]) == 1:
headers_in_order.append(h)
for other in deps:
deps[other].discard(h)
del deps[h]
headers.remove(h)
break
with open("jngen.h", "w") as fout:
for filename in headers_in_order:
write_file(filename, fout)
| mit | 101,050,475,562,257,710 | 24.421569 | 71 | 0.587736 | false |
d0ugal/discode-server | discode_server/db.py | 1 | 4273 | import collections
import datetime
import hashlib
import logging
from sanic import exceptions
import aiopg.sa
import sqlalchemy as sa
from discode_server.utils import baseconv
from discode_server.utils import highlight
log = logging.getLogger(__file__)
meta = sa.MetaData()
paste = sa.Table(
'pastes', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('contents', sa.Text(), nullable=False),
sa.Column('created_on', sa.DateTime, default=datetime.datetime.utcnow),
sa.Column('sha', sa.String(64), nullable=False),
sa.Column('lexer', sa.String(60), nullable=True),
sa.Column('lexer_guessed', sa.Boolean, default=False),
)
comment = sa.Table(
'comments', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('paste_id', sa.Integer,
sa.ForeignKey("pastes.id", ondelete="CASCADE"), nullable=False),
sa.Column('line', sa.Integer, nullable=False),
sa.Column('contents', sa.Text(), nullable=False),
sa.Column('created_on', sa.DateTime, default=datetime.datetime.utcnow),
)
class Paste:
def __init__(self, record, comments=None):
self._record = record
self.comments = collections.defaultdict(list)
if not comments:
return
for comment in comments:
self.comments[comment.line].append(comment.contents)
@property
def id(self):
return baseconv.base36.from_decimal(self._record.id)
@property
def decimal_id(self):
return self._record.id
@property
def contents(self):
return self._record.contents
@property
def lexer(self):
return self._record.lexer
@property
def created_on(self):
return self._record.created_on
class Comment:
def __init__(self, record):
self._record = record
@property
def id(self):
return self._record.id
@property
def contents(self):
return self._record.contents
@property
def line(self):
return self._record.line
class PasteNotFound(exceptions.NotFound):
pass
async def create_engine(db_config, loop):
return await aiopg.sa.create_engine(
**db_config,
loop=loop
)
async def get_paste(conn, paste_id):
query = sa.select([paste]).where(paste.c.id == paste_id)
result = await conn.execute(query)
p = await result.first()
comments = await get_comments(conn, paste_id)
if not p:
raise PasteNotFound("Paste Not Found")
return Paste(p, comments)
async def get_pastes(conn):
query = sa.select([paste.c.id, paste.c.created_on])
result = await conn.execute(query)
pastes = await result.fetchall()
if not pastes:
raise PasteNotFound("Paste Not Found")
return [Paste(r) for r in pastes]
async def delete_expired(conn):
try:
log.info("Deleteing expired pastes")
days = 30
utcnow = datetime.datetime.utcnow()
delete_after = utcnow - datetime.timedelta(days=days)
await conn.execute(paste.delete().where(
paste.c.created_on < delete_after))
except:
log.exception("Failed to delete expired pastes")
async def create_comment(conn, paste_id, line, contents):
result = await conn.execute(comment.insert().values(
paste_id=paste_id, line=line, contents=contents))
record = await result.fetchone()
await conn.execute(f"NOTIFY channel, %s", f"{paste_id},{line},{record.id}")
return record
async def get_comments(conn, paste_id):
query = sa.select([comment]).where(comment.c.paste_id == paste_id)
result = await conn.execute(query)
comments = await result.fetchall()
return [Comment(c) for c in comments]
async def create(conn, contents, lexer, created_on=None):
sha = hashlib.sha256(contents.encode('utf-8')).hexdigest()
lexer, detected = highlight.guess(contents, lexer)
values = {
'contents': contents,
'sha': sha,
'lexer': lexer,
'lexer_guessed': detected,
}
if created_on is not None:
values['created_on'] = created_on
result = await conn.execute(paste.insert().values(**values))
record = await result.fetchone()
if not record:
raise Exception("whelp")
return Paste(record)
| bsd-2-clause | 5,807,472,579,117,242,000 | 25.054878 | 79 | 0.646852 | false |
shoyer/xarray | xarray/core/indexing.py | 1 | 51766 | import enum
import functools
import operator
from collections import defaultdict
from contextlib import suppress
from datetime import timedelta
from typing import Any, Callable, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from . import duck_array_ops, nputils, utils
from .npcompat import DTypeLike
from .pycompat import dask_array_type, integer_types, sparse_array_type
from .utils import is_dict_like, maybe_cast_to_coords_dtype
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError("too many indices")
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def _sanitize_slice_element(x):
from .variable import Variable
from .dataarray import DataArray
if isinstance(x, (Variable, DataArray)):
x = x.values
if isinstance(x, np.ndarray):
if x.ndim != 0:
raise ValueError(
f"cannot use non-scalar arrays in a slice for xarray indexing: {x}"
)
x = x[()]
if isinstance(x, np.timedelta64):
# pandas does not support indexing with np.timedelta64 yet:
# https://github.com/pandas-dev/pandas/issues/20393
x = pd.Timedelta(x)
return x
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return isinstance(possible_tuple, tuple) and any(
isinstance(value, (tuple, list, slice)) for value in possible_tuple
)
def get_indexer_nd(index, labels, method=None, tolerance=None):
"""Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional
labels
"""
flat_labels = np.ravel(labels)
flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)
indexer = flat_indexer.reshape(labels.shape)
return indexer
def convert_label_indexer(index, label, index_name="", method=None, tolerance=None):
"""Given a pandas.Index and labels (e.g., from __getitem__) for one
dimension, return an indexer suitable for indexing an ndarray along that
dimension. If `index` is a pandas.MultiIndex and depending on `label`,
return a new pandas.Index or pandas.MultiIndex (otherwise return None).
"""
new_index = None
if isinstance(label, slice):
if method is not None or tolerance is not None:
raise NotImplementedError(
"cannot use ``method`` argument if any indexers are " "slice objects"
)
indexer = index.slice_indexer(
_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step),
)
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError(
"cannot represent labeled-based slice indexer for dimension "
f"{index_name!r} with a slice over integer positions; the index is "
"unsorted or non-unique"
)
elif is_dict_like(label):
is_nested_vals = _is_nested_tuple(tuple(label.values()))
if not isinstance(index, pd.MultiIndex):
raise ValueError(
"cannot use a dict-like object for selection on "
"a dimension that does not have a MultiIndex"
)
elif len(label) == index.nlevels and not is_nested_vals:
indexer = index.get_loc(tuple(label[k] for k in index.names))
else:
for k, v in label.items():
# index should be an item (i.e. Hashable) not an array-like
if isinstance(v, Sequence) and not isinstance(v, str):
raise ValueError(
"Vectorized selection is not "
"available along level variable: " + k
)
indexer, new_index = index.get_loc_level(
tuple(label.values()), level=tuple(label.keys())
)
# GH2619. Raise a KeyError if nothing is chosen
if indexer.dtype.kind == "b" and indexer.sum() == 0:
raise KeyError(f"{label} not found")
elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):
if _is_nested_tuple(label):
indexer = index.get_locs(label)
elif len(label) == index.nlevels:
indexer = index.get_loc(label)
else:
indexer, new_index = index.get_loc_level(
label, level=list(range(len(label)))
)
else:
label = (
label
if getattr(label, "ndim", 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label)
)
if label.ndim == 0:
if isinstance(index, pd.MultiIndex):
indexer, new_index = index.get_loc_level(label.item(), level=0)
elif isinstance(index, pd.CategoricalIndex):
if method is not None:
raise ValueError(
"'method' is not a valid kwarg when indexing using a CategoricalIndex."
)
if tolerance is not None:
raise ValueError(
"'tolerance' is not a valid kwarg when indexing using a CategoricalIndex."
)
indexer = index.get_loc(label.item())
else:
indexer = index.get_loc(
label.item(), method=method, tolerance=tolerance
)
elif label.dtype.kind == "b":
indexer = label
else:
if isinstance(index, pd.MultiIndex) and label.ndim > 1:
raise ValueError(
"Vectorized selection is not available along "
"MultiIndex variable: " + index_name
)
indexer = get_indexer_nd(index, label, method, tolerance)
if np.any(indexer < 0):
raise KeyError(f"not all values found in index {index_name!r}")
return indexer, new_index
def get_dim_indexers(data_obj, indexers):
"""Given a xarray data object and label based indexers, return a mapping
of label indexers with only dimension names as keys.
It groups multiple level indexers given on a multi-index dimension
into a single, dictionary indexer for that dimension (Raise a ValueError
if it is not possible).
"""
invalid = [
k
for k in indexers
if k not in data_obj.dims and k not in data_obj._level_coords
]
if invalid:
raise ValueError(f"dimensions or multi-index levels {invalid!r} do not exist")
level_indexers = defaultdict(dict)
dim_indexers = {}
for key, label in indexers.items():
(dim,) = data_obj[key].dims
if key != dim:
# assume here multi-index level indexer
level_indexers[dim][key] = label
else:
dim_indexers[key] = label
for dim, level_labels in level_indexers.items():
if dim_indexers.get(dim, False):
raise ValueError(
"cannot combine multi-index level indexers with an indexer for "
f"dimension {dim}"
)
dim_indexers[dim] = level_labels
return dim_indexers
def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):
"""Given an xarray data object and label based indexers, return a mapping
of equivalent location based indexers. Also return a mapping of updated
pandas index objects (in case of multi-index level drop).
"""
if method is not None and not isinstance(method, str):
raise TypeError("``method`` must be a string")
pos_indexers = {}
new_indexes = {}
dim_indexers = get_dim_indexers(data_obj, indexers)
for dim, label in dim_indexers.items():
try:
index = data_obj.indexes[dim]
except KeyError:
# no index for this dimension: reuse the provided labels
if method is not None or tolerance is not None:
raise ValueError(
"cannot supply ``method`` or ``tolerance`` "
"when the indexed dimension does not have "
"an associated coordinate."
)
pos_indexers[dim] = label
else:
coords_dtype = data_obj.coords[dim].dtype
label = maybe_cast_to_coords_dtype(label, coords_dtype)
idxr, new_idx = convert_label_indexer(index, label, dim, method, tolerance)
pos_indexers[dim] = idxr
if new_idx is not None:
new_indexes[dim] = new_idx
return pos_indexers, new_indexes
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class ExplicitIndexer:
"""Base class for explicit indexer objects.
ExplicitIndexer objects wrap a tuple of values given by their ``tuple``
property. These tuples should always have length equal to the number of
dimensions on the indexed array.
Do not instantiate BaseIndexer objects directly: instead, use one of the
sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.
"""
__slots__ = ("_key",)
def __init__(self, key):
if type(self) is ExplicitIndexer:
raise TypeError("cannot instantiate base ExplicitIndexer objects")
self._key = tuple(key)
@property
def tuple(self):
return self._key
def __repr__(self):
return f"{type(self).__name__}({self.tuple})"
def as_integer_or_none(value):
return None if value is None else operator.index(value)
def as_integer_slice(value):
start = as_integer_or_none(value.start)
stop = as_integer_or_none(value.stop)
step = as_integer_or_none(value.step)
return slice(start, stop, step)
class BasicIndexer(ExplicitIndexer):
"""Tuple for basic indexing.
All elements should be int or slice objects. Indexing follows NumPy's
rules for basic indexing: each axis is independently sliced and axes
indexed with an integer are dropped from the result.
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class OuterIndexer(ExplicitIndexer):
"""Tuple for outer/orthogonal indexing.
All elements should be int, slice or 1-dimensional np.ndarray objects with
an integer dtype. Indexing is applied independently along each axis, and
axes indexed with an integer are dropped from the result. This type of
indexing works like MATLAB/Fortran.
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if k.ndim != 1:
raise TypeError(
f"invalid indexer array for {type(self).__name__}; must have "
f"exactly 1 dimension: {k!r}"
)
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class VectorizedIndexer(ExplicitIndexer):
"""Tuple for vectorized indexing.
All elements should be slice or N-dimensional np.ndarray objects with an
integer dtype and the same number of dimensions. Indexing follows proposed
rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules
(including broadcasting) except sliced axes are always moved to the end:
https://github.com/numpy/numpy/pull/6256
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
ndim = None
for k in key:
if isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if ndim is None:
ndim = k.ndim
elif ndim != k.ndim:
ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]
raise ValueError(
"invalid indexer key: ndarray arguments "
f"have different numbers of dimensions: {ndims}"
)
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class ExplicitlyIndexed:
"""Mixin to mark support for Indexer subclasses in indexing.
"""
__slots__ = ()
class ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):
__slots__ = ()
def __array__(self, dtype=None):
key = BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):
"""Wrap an array, converting tuples into the indicated explicit indexer."""
__slots__ = ("array", "indexer_cls")
def __init__(self, array, indexer_cls=BasicIndexer):
self.array = as_indexable(array)
self.indexer_cls = indexer_cls
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
key = expanded_indexer(key, self.ndim)
result = self.array[self.indexer_cls(key)]
if isinstance(result, ExplicitlyIndexed):
return type(self)(result, self.indexer_cls)
else:
# Sometimes explicitly indexed arrays return NumPy arrays or
# scalars.
return result
class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make basic and outer indexing lazy.
"""
__slots__ = ("array", "key")
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : ExplicitIndexer, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if isinstance(array, type(self)) and key is None:
# unwrap
key = array.key
array = array.array
if key is None:
key = BasicIndexer((slice(None),) * array.ndim)
self.array = as_indexable(array)
self.key = key
def _updated_key(self, new_key):
iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))
full_key = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, integer_types):
full_key.append(k)
else:
full_key.append(_index_indexer_1d(k, next(iter_new_key), size))
full_key = tuple(full_key)
if all(isinstance(k, integer_types + (slice,)) for k in full_key):
return BasicIndexer(full_key)
return OuterIndexer(full_key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, slice):
shape.append(len(range(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
def __array__(self, dtype=None):
array = as_indexable(self.array)
return np.asarray(array[self.key], dtype=None)
def transpose(self, order):
return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)
def __getitem__(self, indexer):
if isinstance(indexer, VectorizedIndexer):
array = LazilyVectorizedIndexedArray(self.array, self.key)
return array[indexer]
return type(self)(self.array, self._updated_key(indexer))
def __setitem__(self, key, value):
if isinstance(key, VectorizedIndexer):
raise NotImplementedError(
"Lazy item assignment with the vectorized indexer is not yet "
"implemented. Load your data first by .load() or compute()."
)
full_key = self._updated_key(key)
self.array[full_key] = value
def __repr__(self):
return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})"
class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make vectorized indexing lazy.
"""
__slots__ = ("array", "key")
def __init__(self, array, key):
"""
Parameters
----------
array : array_like
Array like object to index.
key : VectorizedIndexer
"""
if isinstance(key, (BasicIndexer, OuterIndexer)):
self.key = _outer_to_vectorized_indexer(key, array.shape)
else:
self.key = _arrayize_vectorized_indexer(key, array.shape)
self.array = as_indexable(array)
@property
def shape(self):
return np.broadcast(*self.key.tuple).shape
def __array__(self, dtype=None):
return np.asarray(self.array[self.key], dtype=None)
def _updated_key(self, new_key):
return _combine_indexers(self.key, self.shape, new_key)
def __getitem__(self, indexer):
# If the indexed array becomes a scalar, return LazilyOuterIndexedArray
if all(isinstance(ind, integer_types) for ind in indexer.tuple):
key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))
return LazilyOuterIndexedArray(self.array, key)
return type(self)(self.array, self._updated_key(indexer))
def transpose(self, order):
key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))
return type(self)(self.array, key)
def __setitem__(self, key, value):
raise NotImplementedError(
"Lazy item assignment with the vectorized indexer is not yet "
"implemented. Load your data first by .load() or compute()."
)
def __repr__(self):
return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})"
def _wrap_numpy_scalars(array):
"""Wrap NumPy scalars in 0d arrays."""
if np.isscalar(array):
return np.array(array)
else:
return array
class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("array", "_copied")
def __init__(self, array):
self.array = as_indexable(array)
self._copied = False
def _ensure_copied(self):
if not self._copied:
self.array = as_indexable(np.array(self.array))
self._copied = True
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self._ensure_copied()
self.array[key] = value
class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("array",)
def __init__(self, array):
self.array = _wrap_numpy_scalars(as_indexable(array))
def _ensure_cached(self):
if not isinstance(self.array, NumpyIndexingAdapter):
self.array = NumpyIndexingAdapter(np.asarray(self.array))
def __array__(self, dtype=None):
self._ensure_cached()
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self.array[key] = value
def as_indexable(array):
"""
This function always returns a ExplicitlyIndexed subclass,
so that the vectorized indexing is always possible with the returned
object.
"""
if isinstance(array, ExplicitlyIndexed):
return array
if isinstance(array, np.ndarray):
return NumpyIndexingAdapter(array)
if isinstance(array, pd.Index):
return PandasIndexAdapter(array)
if isinstance(array, dask_array_type):
return DaskIndexingAdapter(array)
if hasattr(array, "__array_function__"):
return NdArrayLikeIndexingAdapter(array)
raise TypeError("Invalid array type: {}".format(type(array)))
def _outer_to_vectorized_indexer(key, shape):
"""Convert an OuterIndexer into an vectorized indexer.
Parameters
----------
key : Outer/Basic Indexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
VectorizedIndexer
Tuple suitable for use to index a NumPy array with vectorized indexing.
Each element is an array: broadcasting them together gives the shape
of the result.
"""
key = key.tuple
n_dim = len([k for k in key if not isinstance(k, integer_types)])
i_dim = 0
new_key = []
for k, size in zip(key, shape):
if isinstance(k, integer_types):
new_key.append(np.array(k).reshape((1,) * n_dim))
else: # np.ndarray or slice
if isinstance(k, slice):
k = np.arange(*k.indices(size))
assert k.dtype.kind in {"i", "u"}
shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)]
new_key.append(k.reshape(*shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _outer_to_numpy_indexer(key, shape):
"""Convert an OuterIndexer into an indexer for NumPy.
Parameters
----------
key : Basic/OuterIndexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
tuple
Tuple suitable for use to index a NumPy array.
"""
if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:
# If there is only one vector and all others are slice,
# it can be safely used in mixed basic/advanced indexing.
# Boolean index should already be converted to integer array.
return key.tuple
else:
return _outer_to_vectorized_indexer(key, shape).tuple
def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(
tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple))
)
@enum.unique
class IndexingSupport(enum.Enum):
# for backends that support only basic indexer
BASIC = 0
# for backends that support basic / outer indexer
OUTER = 1
# for backends that support outer indexer including at most 1 vector.
OUTER_1VECTOR = 2
# for backends that support full vectorized indexer.
VECTORIZED = 3
def explicit_indexing_adapter(
key: ExplicitIndexer,
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
raw_indexing_method: Callable,
) -> Any:
"""Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
"""
raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
result = raw_indexing_method(raw_key.tuple)
if numpy_indices.tuple:
# index the loaded np.ndarray
result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]
return result
def decompose_indexer(
indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
if isinstance(indexer, VectorizedIndexer):
return _decompose_vectorized_indexer(indexer, shape, indexing_support)
if isinstance(indexer, (BasicIndexer, OuterIndexer)):
return _decompose_outer_indexer(indexer, shape, indexing_support)
raise TypeError(f"unexpected key type: {indexer}")
def _decompose_slice(key, size):
""" convert a slice to successive two slices. The first slice always has
a positive step.
"""
start, stop, step = key.indices(size)
if step > 0:
# If key already has a positive step, use it as is in the backend
return key, slice(None)
else:
# determine stop precisely for step > 1 case
# e.g. [98:2:-2] -> [98:3:-2]
stop = start + int((stop - start - 1) / step) * step + 1
start, stop = stop + 1, start + 1
return slice(start, stop, -step), slice(None, None, -1)
def _decompose_vectorized_indexer(
indexer: VectorizedIndexer,
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer_elems = []
np_indexer_elems = []
# convert negative indices
indexer_elems = [
np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)
]
for k, s in zip(indexer_elems, shape):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer_elems.append(bk_slice)
np_indexer_elems.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer_elems.append(oind)
np_indexer_elems.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer_elems))
np_indexer = VectorizedIndexer(tuple(np_indexer_elems))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer1, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support
)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer1, np_indexer
def _decompose_outer_indexer(
indexer: Union[BasicIndexer, OuterIndexer],
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer: OuterIndexer or BasicIndexer
indexing_support: One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # outer indexing for on-memory np.ndarray.
"""
if indexing_support == IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
assert isinstance(indexer, (OuterIndexer, BasicIndexer))
backend_indexer = []
np_indexer = []
# make indexer positive
pos_indexer = []
for k, s in zip(indexer.tuple, shape):
if isinstance(k, np.ndarray):
pos_indexer.append(np.where(k < 0, k + s, k))
elif isinstance(k, integer_types) and k < 0:
pos_indexer.append(k + s)
else:
pos_indexer.append(k)
indexer_elems = pos_indexer
if indexing_support is IndexingSupport.OUTER_1VECTOR:
# some backends such as h5py supports only 1 vector in indexers
# We choose the most efficient axis
gains = [
(np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
if isinstance(k, np.ndarray)
else 0
for k in indexer_elems
]
array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
for i, (k, s) in enumerate(zip(indexer_elems, shape)):
if isinstance(k, np.ndarray) and i != array_index:
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, np.ndarray):
# Remove duplicates and sort them in the increasing order
pkey, ekey = np.unique(k, return_inverse=True)
backend_indexer.append(pkey)
np_indexer.append(ekey)
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
if indexing_support == IndexingSupport.OUTER:
for k, s in zip(indexer_elems, shape):
if isinstance(k, slice):
# slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
elif isinstance(k, integer_types):
backend_indexer.append(k)
elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
backend_indexer.append(k)
np_indexer.append(slice(None))
else:
# Remove duplicates and sort them in the increasing order
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
# basic indexer
assert indexing_support == IndexingSupport.BASIC
for k, s in zip(indexer_elems, shape):
if isinstance(k, np.ndarray):
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
def _arrayize_vectorized_indexer(indexer, shape):
""" Return an identical vindex but slices are replaced by arrays """
slices = [v for v in indexer.tuple if isinstance(v, slice)]
if len(slices) == 0:
return indexer
arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]
n_dim = arrays[0].ndim if len(arrays) > 0 else 0
i_dim = 0
new_key = []
for v, size in zip(indexer.tuple, shape):
if isinstance(v, np.ndarray):
new_key.append(np.reshape(v, v.shape + (1,) * len(slices)))
else: # slice
shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1)
new_key.append(np.arange(*v.indices(size)).reshape(shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _dask_array_with_chunks_hint(array, chunks):
"""Create a dask array using the chunks hint for dimensions of size > 1."""
import dask.array as da
if len(chunks) < array.ndim:
raise ValueError("not enough chunks in hint")
new_chunks = []
for chunk, size in zip(chunks, array.shape):
new_chunks.append(chunk if size > 1 else (1,))
return da.from_array(array, new_chunks)
def _logical_any(args):
return functools.reduce(operator.or_, args)
def _masked_result_drop_slice(key, data=None):
key = (k for k in key if not isinstance(k, slice))
chunks_hint = getattr(data, "chunks", None)
new_keys = []
for k in key:
if isinstance(k, np.ndarray):
if isinstance(data, dask_array_type):
new_keys.append(_dask_array_with_chunks_hint(k, chunks_hint))
elif isinstance(data, sparse_array_type):
import sparse
new_keys.append(sparse.COO.from_numpy(k))
else:
new_keys.append(k)
else:
new_keys.append(k)
mask = _logical_any(k == -1 for k in new_keys)
return mask
def create_mask(indexer, shape, data=None):
"""Create a mask for indexing with a fill-value.
Parameters
----------
indexer : ExplicitIndexer
Indexer with -1 in integer or ndarray value to indicate locations in
the result that should be masked.
shape : tuple
Shape of the array being indexed.
data : optional
Data for which mask is being created. If data is a dask arrays, its chunks
are used as a hint for chunks on the resulting mask. If data is a sparse
array, the returned mask is also a sparse array.
Returns
-------
mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool
Same type as data. Has the same shape as the indexing result.
"""
if isinstance(indexer, OuterIndexer):
key = _outer_to_vectorized_indexer(indexer, shape).tuple
assert not any(isinstance(k, slice) for k in key)
mask = _masked_result_drop_slice(key, data)
elif isinstance(indexer, VectorizedIndexer):
key = indexer.tuple
base_mask = _masked_result_drop_slice(key, data)
slice_shape = tuple(
np.arange(*k.indices(size)).size
for k, size in zip(key, shape)
if isinstance(k, slice)
)
expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)]
mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape)
elif isinstance(indexer, BasicIndexer):
mask = any(k == -1 for k in indexer.tuple)
else:
raise TypeError("unexpected key type: {}".format(type(indexer)))
return mask
def _posify_mask_subindexer(index):
"""Convert masked indices in a flat array to the nearest unmasked index.
Parameters
----------
index : np.ndarray
One dimensional ndarray with dtype=int.
Returns
-------
np.ndarray
One dimensional ndarray with all values equal to -1 replaced by an
adjacent non-masked element.
"""
masked = index == -1
unmasked_locs = np.flatnonzero(~masked)
if not unmasked_locs.size:
# indexing unmasked_locs is invalid
return np.zeros_like(index)
masked_locs = np.flatnonzero(masked)
prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)
new_index = index.copy()
new_index[masked_locs] = index[unmasked_locs[prev_value]]
return new_index
def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple(
_posify_mask_subindexer(k.ravel()).reshape(k.shape)
if isinstance(k, np.ndarray)
else k
for k in indexer.tuple
)
return type(indexer)(key)
def is_fancy_indexer(indexer: Any) -> bool:
"""Return False if indexer is a int, slice, a 1-dimensional list, or a 0 or
1-dimensional ndarray; in all other cases return True
"""
if isinstance(indexer, (int, slice)):
return False
if isinstance(indexer, np.ndarray):
return indexer.ndim > 1
if isinstance(indexer, list):
return bool(indexer) and not isinstance(indexer[0], int)
return True
class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a NumPy array to use explicit indexing."""
__slots__ = ("array",)
def __init__(self, array):
# In NumpyIndexingAdapter we only allow to store bare np.ndarray
if not isinstance(array, np.ndarray):
raise TypeError(
"NumpyIndexingAdapter only wraps np.ndarray. "
"Trying to wrap {}".format(type(array))
)
self.array = array
def _indexing_array_and_key(self, key):
if isinstance(key, OuterIndexer):
array = self.array
key = _outer_to_numpy_indexer(key, self.array.shape)
elif isinstance(key, VectorizedIndexer):
array = nputils.NumpyVIndexAdapter(self.array)
key = key.tuple
elif isinstance(key, BasicIndexer):
array = self.array
# We want 0d slices rather than scalars. This is achieved by
# appending an ellipsis (see
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).
key = key.tuple + (Ellipsis,)
else:
raise TypeError("unexpected key type: {}".format(type(key)))
return array, key
def transpose(self, order):
return self.array.transpose(order)
def __getitem__(self, key):
array, key = self._indexing_array_and_key(key)
return array[key]
def __setitem__(self, key, value):
array, key = self._indexing_array_and_key(key)
try:
array[key] = value
except ValueError:
# More informative exception if read-only view
if not array.flags.writeable and not array.flags.owndata:
raise ValueError(
"Assignment destination is a view. "
"Do you want to .copy() array first?"
)
else:
raise
class NdArrayLikeIndexingAdapter(NumpyIndexingAdapter):
__slots__ = ("array",)
def __init__(self, array):
if not hasattr(array, "__array_function__"):
raise TypeError(
"NdArrayLikeIndexingAdapter must wrap an object that "
"implements the __array_function__ protocol"
)
self.array = array
class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a dask array to support explicit indexing."""
__slots__ = ("array",)
def __init__(self, array):
""" This adapter is created in Variable.__getitem__ in
Variable._broadcast_indexes.
"""
self.array = array
def __getitem__(self, key):
if not isinstance(key, VectorizedIndexer):
# if possible, short-circuit when keys are effectively slice(None)
# This preserves dask name and passes lazy array equivalence checks
# (see duck_array_ops.lazy_array_equiv)
rewritten_indexer = False
new_indexer = []
for idim, k in enumerate(key.tuple):
if isinstance(k, Iterable) and duck_array_ops.array_equiv(
k, np.arange(self.array.shape[idim])
):
new_indexer.append(slice(None))
rewritten_indexer = True
else:
new_indexer.append(k)
if rewritten_indexer:
key = type(key)(tuple(new_indexer))
if isinstance(key, BasicIndexer):
return self.array[key.tuple]
elif isinstance(key, VectorizedIndexer):
return self.array.vindex[key.tuple]
else:
assert isinstance(key, OuterIndexer)
key = key.tuple
try:
return self.array[key]
except NotImplementedError:
# manual orthogonal indexing.
# TODO: port this upstream into dask in a saner way.
value = self.array
for axis, subkey in reversed(list(enumerate(key))):
value = value[(slice(None),) * axis + (subkey,)]
return value
def __setitem__(self, key, value):
raise TypeError(
"this variable's data is stored in a dask array, "
"which does not support item assignment. To "
"assign to this variable, you must first load it "
"into memory explicitly using the .load() "
"method or accessing its .values attribute."
)
def transpose(self, order):
return self.array.transpose(order)
class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a pandas.Index to preserve dtypes and handle explicit indexing.
"""
__slots__ = ("array", "_dtype")
def __init__(self, array: Any, dtype: DTypeLike = None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
if isinstance(array, pd.PeriodIndex):
dtype = np.dtype("O")
elif hasattr(array, "categories"):
# category isn't a real numpy dtype
dtype = array.categories.dtype
elif not utils.is_valid_numpy_dtype(array.dtype):
dtype = np.dtype("O")
else:
dtype = array.dtype
else:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def dtype(self) -> np.dtype:
return self._dtype
def __array__(self, dtype: DTypeLike = None) -> np.ndarray:
if dtype is None:
dtype = self.dtype
array = self.array
if isinstance(array, pd.PeriodIndex):
with suppress(AttributeError):
# this might not be public API
array = array.astype("object")
return np.asarray(array.values, dtype=dtype)
@property
def shape(self) -> Tuple[int]:
return (len(self.array),)
def __getitem__(
self, indexer
) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:
key = indexer.tuple
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
(key,) = key
if getattr(key, "ndim", 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
result = self.array[key]
if isinstance(result, pd.Index):
result = PandasIndexAdapter(result, dtype=self.dtype)
else:
# result is a scalar
if result is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
result = np.datetime64("NaT", "ns")
elif isinstance(result, timedelta):
result = np.timedelta64(getattr(result, "value", result), "ns")
elif isinstance(result, pd.Timestamp):
# Work around for GH: pydata/xarray#1932 and numpy/numpy#10668
# numpy fails to convert pd.Timestamp to np.datetime64[ns]
result = np.asarray(result.to_datetime64())
elif self.dtype != object:
result = np.asarray(result, dtype=self.dtype)
# as for numpy.ndarray indexing, we always want the result to be
# a NumPy array.
result = utils.to_0d_array(result)
return result
def transpose(self, order) -> pd.Index:
return self.array # self.array should be always one-dimensional
def __repr__(self) -> str:
return "{}(array={!r}, dtype={!r})".format(
type(self).__name__, self.array, self.dtype
)
def copy(self, deep: bool = True) -> "PandasIndexAdapter":
# Not the same as just writing `self.array.copy(deep=deep)`, as
# shallow copies of the underlying numpy.ndarrays become deep ones
# upon pickling
# >>> len(pickle.dumps((self.array, self.array)))
# 4000281
# >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))
# 8000341
array = self.array.copy(deep=True) if deep else self.array
return PandasIndexAdapter(array, self._dtype)
| apache-2.0 | -2,998,736,085,517,629,000 | 34.431896 | 98 | 0.60333 | false |
liugangabc/ccs_web | tcpclient.py | 1 | 2137 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from tornado import ioloop, httpclient, gen
from tornado.gen import Task
import pdb, time, logging
import tornado.ioloop
import tornado.iostream
import socket
#Init logging
def init_logging():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s -%(module)s:%(filename)s-L%(lineno)d-%(levelname)s: %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
logging.info("Current log level is : %s", logging.getLevelName(logger.getEffectiveLevel()))
class TCPClient(object):
def __init__(self, host, port, io_loop=None):
self.host = host
self.port = port
self.io_loop = io_loop
self.shutdown = False
self.stream = None
self.sock_fd = None
self.EOF = b' END'
def get_stream(self):
self.sock_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = tornado.iostream.IOStream(self.sock_fd)
self.stream.set_close_callback(self.on_close)
def connect(self):
self.get_stream()
self.stream.connect((self.host, self.port), self.send_message)
def on_receive(self, data):
logging.info("Received: %s", data)
self.stream.close()
def on_close(self):
if self.shutdown:
self.io_loop.stop()
def send_message(self):
logging.info("Send message....")
self.stream.write(b"Hello Server!" + self.EOF)
self.stream.read_until(self.EOF, self.on_receive)
logging.info("After send....")
def set_shutdown(self):
self.shutdown = True
def main():
init_logging()
io_loop = tornado.ioloop.IOLoop.instance()
c1 = TCPClient("127.0.0.1", 8001, io_loop)
c2 = TCPClient("127.0.0.1", 8001, io_loop)
c1.connect()
c2.connect()
c2.set_shutdown()
logging.info("**********************start ioloop******************")
io_loop.start()
if __name__ == "__main__":
try:
main()
except Exception, ex:
print "Ocurred Exception: %s" % str(ex)
quit() | apache-2.0 | -9,188,828,094,435,973,000 | 27.506667 | 110 | 0.613009 | false |
atodorov/pykickstart | tests/commands/timezone.py | 1 | 6651 | #
# Chris Lumens <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.errors import KickstartParseError
from pykickstart.commands.timezone import FC3_Timezone, F18_Timezone
class Timezone_TestCase(unittest.TestCase):
def runTest(self):
cmd = F18_Timezone()
self.assertEqual(cmd.__str__(), '')
class FC3_TestCase(CommandTest):
command = "timezone"
def runTest(self):
# assert defaults
self.assertFalse(FC3_Timezone().isUtc)
self.assertFalse(F18_Timezone().nontp)
# pass
self.assert_parse("timezone Eastern", "timezone Eastern\n")
# On FC6 and later, we write out --isUtc regardless of what the input was.
if self.__class__.__name__ == "FC3_TestCase":
self.assert_parse("timezone --utc Eastern", "timezone --utc Eastern\n")
else:
self.assert_parse("timezone --utc Eastern", "timezone --isUtc Eastern\n")
# fail
self.assert_parse_error("timezone")
self.assert_parse_error("timezone Eastern Central")
self.assert_parse_error("timezone --blah Eastern")
self.assert_parse_error("timezone --utc")
self.assert_parse_error("timezone --bogus-option")
# extra test coverage
cmd = self.handler().commands[self.command]
cmd.timezone = None
self.assertEqual(cmd.__str__(), "")
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
self.assert_parse("timezone --isUtc Eastern", "timezone --isUtc Eastern\n")
# fail
self.assert_parse_error("timezone --isUtc")
class F18_TestCase(FC6_TestCase):
def runTest(self):
# pass
self.assert_parse("timezone --utc Europe/Prague")
self.assert_parse("timezone --isUtc Europe/Prague\n")
self.assert_parse("timezone --isUtc Eastern", "timezone Eastern --isUtc\n")
self.assert_parse("timezone Europe/Prague")
self.assert_parse("timezone Europe/Prague --nontp",
"timezone Europe/Prague --nontp\n")
self.assert_parse("timezone Europe/Prague "
"--ntpservers=ntp.cesnet.cz,tik.nic.cz")
self.assert_parse("timezone Europe/Prague --ntpservers=ntp.cesnet.cz",
"timezone Europe/Prague --ntpservers=ntp.cesnet.cz\n")
# fail
self.assert_parse_error("timezone")
self.assert_parse_error("timezone Eastern Central")
self.assert_parse_error("timezone --blah Eastern")
self.assert_parse_error("timezone --utc")
self.assert_parse_error("timezone --isUtc")
self.assert_parse_error("timezone Europe/Prague --nontp "
"--ntpservers=ntp.cesnet.cz")
self.assert_parse_error("timezone Europe/Prague --ntpservers="
"ntp.cesnet.cz, tik.nic.cz")
class F23_TestCase(F18_TestCase):
def runTest(self):
# should keep multiple instances of the same URL
self.assert_parse("timezone --utc Europe/Prague --ntpservers=ntp.cesnet.cz,0.fedora.pool.ntp.org," +
"0.fedora.pool.ntp.org,0.fedora.pool.ntp.org,0.fedora.pool.ntp.org",
"timezone Europe/Prague --isUtc --ntpservers=ntp.cesnet.cz,0.fedora.pool.ntp.org," +
"0.fedora.pool.ntp.org,0.fedora.pool.ntp.org,0.fedora.pool.ntp.org\n")
self.assert_parse("timezone --utc Europe/Sofia --ntpservers=,0.fedora.pool.ntp.org,")
# fail
self.assert_parse_error("timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org")
class RHEL7_TestCase(F18_TestCase):
def runTest(self):
# since RHEL7 command version the timezone command can be used
# without a timezone specification
self.assert_parse("timezone --utc")
self.assert_parse("timezone Europe/Sofia")
self.assert_parse("timezone --isUtc")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz,tik.nic.cz")
# unknown argument
self.assert_parse_error("timezone --blah")
# more than two timezone specs
self.assert_parse_error("timezone foo bar", exception=KickstartParseError)
self.assert_parse_error("timezone --utc foo bar", exception=KickstartParseError)
# just "timezone" without any arguments is also wrong as it really dosn't make sense
self.assert_parse_error("timezone")
# fail
self.assert_parse_error("timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org")
class F25_TestCase(F23_TestCase):
def runTest(self):
# since RHEL7 command version the timezone command can be used
# without a timezone specification
self.assert_parse("timezone --utc")
self.assert_parse("timezone --isUtc")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz")
self.assert_parse("timezone --ntpservers=ntp.cesnet.cz,tik.nic.cz")
# unknown argument
self.assert_parse_error("timezone --blah")
# more than two timezone specs
self.assert_parse_error("timezone foo bar", exception=KickstartParseError)
self.assert_parse_error("timezone --utc foo bar", exception=KickstartParseError)
# just "timezone" without any arguments is also wrong as it really dosn't make sense
self.assert_parse_error("timezone")
# fail
self.assert_parse_error("timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -2,127,721,929,382,395,400 | 43.046358 | 121 | 0.658097 | false |
jerkos/cobrapy | cobra/test/flux_analysis.py | 1 | 18121 | from unittest import TestCase, TestLoader, TextTestRunner, skipIf
from warnings import warn
import sys
from os.path import join
from json import load
from six import iteritems
try:
import numpy
except:
numpy = None
if __name__ == "__main__":
sys.path.insert(0, "../..")
from cobra.test import create_test_model, data_directory
from cobra import Model, Reaction, Metabolite
from cobra.manipulation import initialize_growth_medium
from cobra.solvers import solver_dict, get_solver_name
from cobra.manipulation import modify, delete
from cobra.flux_analysis import *
sys.path.pop(0)
else:
from . import create_test_model, data_directory
from .. import Model, Reaction, Metabolite
from ..manipulation import initialize_growth_medium
from ..solvers import solver_dict, get_solver_name
from ..manipulation import modify, delete
from ..flux_analysis import *
class TestCobraFluxAnalysis(TestCase):
"""Test the simulation functions in cobra.flux_analysis"""
def setUp(self):
pass
def test_pFBA(self):
model = create_test_model("textbook")
for solver in solver_dict:
optimize_minimal_flux(model, solver=solver)
abs_x = [abs(i) for i in model.solution.x]
self.assertEqual(model.solution.status, "optimal")
self.assertAlmostEqual(model.solution.f, 0.8739, places=3)
self.assertAlmostEqual(sum(abs_x), 518.4221, places=3)
def test_modify_reversible(self):
model1 = create_test_model("textbook")
model1.optimize()
model2 = create_test_model("textbook")
modify.convert_to_irreversible(model2)
model2.optimize()
self.assertAlmostEqual(model1.solution.f, model2.solution.f, places=3)
modify.revert_to_reversible(model2)
model2.optimize()
self.assertAlmostEqual(model1.solution.f, model2.solution.f, places=3)
# Ensure revert_to_reversible is robust to solutions generated both
# before and after reversibility conversion, or not solved at all.
model3 = create_test_model("textbook")
model3.optimize()
modify.convert_to_irreversible(model3)
modify.revert_to_reversible(model3)
self.assertAlmostEqual(model1.solution.f, model3.solution.f, places=3)
model4 = create_test_model("textbook")
modify.convert_to_irreversible(model4)
modify.revert_to_reversible(model4)
def test_escape_ids(self):
model = create_test_model('textbook')
model.reactions.PGI.gene_reaction_rule = "a.b or c"
self.assertIn("a.b", model.genes)
modify.escape_ID(model)
self.assertNotIn("a.b", model.genes)
def test_gene_knockout_computation(self):
cobra_model = create_test_model()
# helper functions for running tests
delete_model_genes = delete.delete_model_genes
find_gene_knockout_reactions = delete.find_gene_knockout_reactions
def find_gene_knockout_reactions_fast(cobra_model, gene_list):
compiled_rules = delete.get_compiled_gene_reaction_rules(
cobra_model)
return find_gene_knockout_reactions(
cobra_model, gene_list,
compiled_gene_reaction_rules=compiled_rules)
def get_removed(m):
return {x.id for x in m._trimmed_reactions}
def test_computation(m, gene_ids, expected_reaction_ids):
genes = [m.genes.get_by_id(i) for i in gene_ids]
expected_reactions = {m.reactions.get_by_id(i)
for i in expected_reaction_ids}
removed1 = set(find_gene_knockout_reactions(m, genes))
removed2 = set(find_gene_knockout_reactions_fast(m, genes))
self.assertEqual(removed1, expected_reactions)
self.assertEqual(removed2, expected_reactions)
delete.delete_model_genes(m, gene_ids, cumulative_deletions=False)
self.assertEqual(get_removed(m), expected_reaction_ids)
delete.undelete_model_genes(m)
gene_list = ['STM1067', 'STM0227']
dependent_reactions = {'3HAD121', '3HAD160', '3HAD80', '3HAD140',
'3HAD180', '3HAD100', '3HAD181', '3HAD120',
'3HAD60', '3HAD141', '3HAD161', 'T2DECAI',
'3HAD40'}
test_computation(cobra_model, gene_list, dependent_reactions)
test_computation(cobra_model, ['STM4221'], {'PGI'})
test_computation(cobra_model, ['STM1746.S'], {'4PEPTabcpp'})
# test cumulative behavior
delete_model_genes(cobra_model, gene_list[:1])
delete_model_genes(cobra_model, gene_list[1:],
cumulative_deletions=True)
delete_model_genes(cobra_model, ["STM4221"],
cumulative_deletions=True)
dependent_reactions.add('PGI')
self.assertEqual(get_removed(cobra_model), dependent_reactions)
# non-cumulative following cumulative
delete_model_genes(cobra_model, ["STM4221"],
cumulative_deletions=False)
self.assertEqual(get_removed(cobra_model), {'PGI'})
# make sure on reset that the bounds are correct
reset_bound = cobra_model.reactions.get_by_id("T2DECAI").upper_bound
self.assertEqual(reset_bound, 1000.)
# test computation when gene name is a subset of another
test_model = Model()
test_reaction_1 = Reaction("test1")
test_reaction_1.gene_reaction_rule = "eggs or (spam and eggspam)"
test_model.add_reaction(test_reaction_1)
test_computation(test_model, ["eggs"], set())
test_computation(test_model, ["eggs", "spam"], {'test1'})
# test computation with nested boolean expression
test_reaction_1.gene_reaction_rule = \
"g1 and g2 and (g3 or g4 or (g5 and g6))"
test_computation(test_model, ["g3"], set())
test_computation(test_model, ["g1"], {'test1'})
test_computation(test_model, ["g5"], set())
test_computation(test_model, ["g3", "g4", "g5"], {'test1'})
# test computation when gene names are python expressions
test_reaction_1.gene_reaction_rule = "g1 and (for or in)"
test_computation(test_model, ["for", "in"], {'test1'})
test_computation(test_model, ["for"], set())
test_reaction_1.gene_reaction_rule = "g1 and g2 and g2.conjugate"
test_computation(test_model, ["g2"], {"test1"})
test_computation(test_model, ["g2.conjugate"], {"test1"})
test_reaction_1.gene_reaction_rule = "g1 and (try:' or 'except:1)"
test_computation(test_model, ["try:'"], set())
test_computation(test_model, ["try:'", "'except:1"], {"test1"})
def test_single_gene_deletion(self):
cobra_model = create_test_model("textbook")
# expected knockouts for textbook model
growth_dict = {"fba": {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
"b2276": 0.21, "b1779": 0.00},
"moma": {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
"b2276": 0.11, "b1779": 0.00},
}
# MOMA requires cplex or gurobi
try:
get_solver_name(qp=True)
except:
growth_dict.pop('moma')
for method, expected in growth_dict.items():
rates, statuses = single_gene_deletion(cobra_model,
gene_list=expected.keys(),
method=method)
for gene, expected_value in iteritems(expected):
self.assertEqual(statuses[gene], 'optimal')
self.assertAlmostEqual(rates[gene], expected_value, places=2)
def test_single_reaction_deletion(self):
cobra_model = create_test_model("textbook")
expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
results, status = single_reaction_deletion(
cobra_model, reaction_list=expected_results.keys())
self.assertEqual(len(results), 6)
self.assertEqual(len(status), 6)
for status_value in status.values():
self.assertEqual(status_value, "optimal")
for reaction, value in results.items():
self.assertAlmostEqual(value, expected_results[reaction], 5)
def compare_matrices(self, matrix1, matrix2, places=3):
nrows = len(matrix1)
ncols = len(matrix1[0])
self.assertEqual(nrows, len(matrix2))
self.assertEqual(ncols, len(matrix2[0]))
for i in range(nrows):
for j in range(ncols):
self.assertAlmostEqual(matrix1[i][j], matrix2[i][j],
places=places)
@skipIf(numpy is None, "double deletions require numpy")
def test_double_gene_deletion(self):
cobra_model = create_test_model("textbook")
genes = ["b0726", "b4025", "b0724", "b0720",
"b2935", "b2935", "b1276", "b1241"]
growth_list = [
[0.858, 0.857, 0.814, 0.000, 0.858, 0.858, 0.858, 0.858],
[0.857, 0.863, 0.739, 0.000, 0.863, 0.863, 0.863, 0.863],
[0.814, 0.739, 0.814, 0.000, 0.814, 0.814, 0.814, 0.814],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874]]
solution = double_gene_deletion(cobra_model, gene_list1=genes)
self.assertEqual(solution["x"], genes)
self.assertEqual(solution["y"], genes)
self.compare_matrices(growth_list, solution["data"])
# test when lists differ slightly
solution = double_gene_deletion(cobra_model, gene_list1=genes[:-1],
gene_list2=genes,
number_of_processes=1)
self.assertEqual(solution["x"], genes[:-1])
self.assertEqual(solution["y"], genes)
self.compare_matrices(growth_list[:-1], solution["data"])
@skipIf(numpy is None, "double deletions require numpy")
def test_double_reaction_deletion(self):
cobra_model = create_test_model("textbook")
reactions = ['FBA', 'ATPS4r', 'ENO', 'FRUpts2']
growth_list = [[0.704, 0.135, 0.000, 0.704],
[0.135, 0.374, 0.000, 0.374],
[0.000, 0.000, 0.000, 0.000],
[0.704, 0.374, 0.000, 0.874]]
solution = double_reaction_deletion(cobra_model,
reaction_list1=reactions,
number_of_processes=1)
self.assertEqual(solution["x"], reactions)
self.assertEqual(solution["y"], reactions)
self.compare_matrices(growth_list, solution["data"])
def test_flux_variability(self):
with open(join(data_directory, "textbook_fva.json"), "r") as infile:
fva_results = load(infile)
infeasible_model = create_test_model("textbook")
infeasible_model.reactions.get_by_id("EX_glc__D_e").lower_bound = 0
for solver in solver_dict:
# esolver is really slow
if solver == "esolver":
continue
cobra_model = create_test_model("textbook")
fva_out = flux_variability_analysis(
cobra_model, solver=solver,
reaction_list=cobra_model.reactions[1::3])
for name, result in iteritems(fva_out):
for k, v in iteritems(result):
self.assertAlmostEqual(fva_results[name][k], v, places=5)
# ensure that an infeasible model does not run FVA
self.assertRaises(ValueError, flux_variability_analysis,
infeasible_model, solver=solver)
def test_find_blocked_reactions(self):
m = create_test_model("textbook")
result = find_blocked_reactions(m, m.reactions[40:46])
self.assertEqual(result, ['FRUpts2'])
result = find_blocked_reactions(m, m.reactions[42:48])
self.assertEqual(set(result), {'FUMt2_2', 'FRUpts2'})
result = find_blocked_reactions(m, m.reactions[30:50],
open_exchanges=True)
self.assertEqual(result, [])
def test_loopless(self):
try:
solver = get_solver_name(mip=True)
except:
self.skip("no MILP solver found")
test_model = Model()
test_model.add_metabolites(Metabolite("A"))
test_model.add_metabolites(Metabolite("B"))
test_model.add_metabolites(Metabolite("C"))
EX_A = Reaction("EX_A")
EX_A.add_metabolites({test_model.metabolites.A: 1})
DM_C = Reaction("DM_C")
DM_C.add_metabolites({test_model.metabolites.C: -1})
v1 = Reaction("v1")
v1.add_metabolites({test_model.metabolites.A: -1,
test_model.metabolites.B: 1})
v2 = Reaction("v2")
v2.add_metabolites({test_model.metabolites.B: -1,
test_model.metabolites.C: 1})
v3 = Reaction("v3")
v3.add_metabolites({test_model.metabolites.C: -1,
test_model.metabolites.A: 1})
DM_C.objective_coefficient = 1
test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
feasible_sol = construct_loopless_model(test_model).optimize()
v3.lower_bound = 1
infeasible_sol = construct_loopless_model(test_model).optimize()
self.assertEqual(feasible_sol.status, "optimal")
self.assertEqual(infeasible_sol.status, "infeasible")
def test_gapfilling(self):
try:
solver = get_solver_name(mip=True)
except:
self.skip("no MILP solver found")
m = Model()
m.add_metabolites(map(Metabolite, ["a", "b", "c"]))
r = Reaction("EX_A")
m.add_reaction(r)
r.add_metabolites({m.metabolites.a: 1})
r = Reaction("r1")
m.add_reaction(r)
r.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
r = Reaction("DM_C")
m.add_reaction(r)
r.add_metabolites({m.metabolites.c: -1})
r.objective_coefficient = 1
U = Model()
r = Reaction("a2b")
U.add_reaction(r)
r.build_reaction_from_string("a --> b", verbose=False)
r = Reaction("a2d")
U.add_reaction(r)
r.build_reaction_from_string("a --> d", verbose=False)
result = gapfilling.growMatch(m, U)[0]
self.assertEqual(len(result), 1)
self.assertEqual(result[0].id, "a2b")
# 2 rounds with exchange reactions
result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[0]), 1)
self.assertEqual(len(result[1]), 1)
self.assertEqual({i[0].id for i in result},
{"SMILEY_EX_b", "SMILEY_EX_c"})
def test_remove_genes(self):
m = Model("test")
m.add_reactions([Reaction("r" + str(i + 1)) for i in range(8)])
self.assertEqual(len(m.reactions), 8)
rxns = m.reactions
rxns.r1.gene_reaction_rule = "(a and b) or (c and a)"
rxns.r2.gene_reaction_rule = "(a and b and d and e)"
rxns.r3.gene_reaction_rule = "(a and b) or (b and c)"
rxns.r4.gene_reaction_rule = "(f and b) or (b and c)"
rxns.r5.gene_reaction_rule = "x"
rxns.r6.gene_reaction_rule = "y"
rxns.r7.gene_reaction_rule = "x or z"
rxns.r8.gene_reaction_rule = ""
self.assertIn("a", m.genes)
self.assertIn("x", m.genes)
delete.remove_genes(m, ["a"], remove_reactions=False)
self.assertNotIn("a", m.genes)
self.assertIn("x", m.genes)
self.assertEqual(rxns.r1.gene_reaction_rule, "")
self.assertEqual(rxns.r2.gene_reaction_rule, "")
self.assertEqual(rxns.r3.gene_reaction_rule, "b and c")
self.assertEqual(rxns.r4.gene_reaction_rule, "(f and b) or (b and c)")
self.assertEqual(rxns.r5.gene_reaction_rule, "x")
self.assertEqual(rxns.r6.gene_reaction_rule, "y")
self.assertEqual(rxns.r7.genes, {m.genes.x, m.genes.z})
self.assertEqual(rxns.r8.gene_reaction_rule, "")
delete.remove_genes(m, ["x"], remove_reactions=True)
self.assertEqual(len(m.reactions), 7)
self.assertNotIn("r5", m.reactions)
self.assertNotIn("x", m.genes)
self.assertEqual(rxns.r1.gene_reaction_rule, "")
self.assertEqual(rxns.r2.gene_reaction_rule, "")
self.assertEqual(rxns.r3.gene_reaction_rule, "b and c")
self.assertEqual(rxns.r4.gene_reaction_rule, "(f and b) or (b and c)")
self.assertEqual(rxns.r6.gene_reaction_rule, "y")
self.assertEqual(rxns.r7.gene_reaction_rule, "z")
self.assertEqual(rxns.r7.genes, {m.genes.z})
self.assertEqual(rxns.r8.gene_reaction_rule, "")
@skipIf(numpy is None, "double deletions require numpy")
def test_phenotype_phase_plane(self):
model = create_test_model("textbook")
data = calculate_phenotype_phase_plane(
model, "EX_glc__D_e", "EX_o2_e",
reaction1_npoints=20, reaction2_npoints=20)
self.assertEqual(data.growth_rates.shape, (20, 20))
self.assertAlmostEqual(data.growth_rates.max(), 1.20898, places=4)
self.assertAlmostEqual(abs(data.growth_rates[0, :]).max(), 0, places=4)
# make a test suite to run all of the tests
loader = TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
def test_all():
TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
test_all()
| lgpl-2.1 | -3,225,913,691,558,580,000 | 43.965261 | 79 | 0.586447 | false |
scorpilix/Golemtest | apps/rendering/resources/imgcompare.py | 1 | 3797 | import logging
import math
from apps.rendering.resources.imgrepr import (EXRImgRepr, ImgRepr, load_img,
PILImgRepr)
logger = logging.getLogger("apps.rendering")
PSNR_ACCEPTABLE_MIN = 30
def check_size(file_, res_x, res_y):
img = load_img(file_)
if img is None:
return False
return img.get_size() == (res_x, res_y)
def calculate_psnr(mse, max_=255):
if mse <= 0 or max_ <= 0:
raise ValueError("MSE & MAX_ must be higher than 0")
return 20 * math.log10(max_) - 10 * math.log10(mse)
def calculate_mse(img1, img2, start1=(0, 0), start2=(0, 0), box=None):
mse = 0
if not isinstance(img1, ImgRepr) or not isinstance(img2, ImgRepr):
raise TypeError("img1 and img2 must be ImgRepr")
if box is None:
(res_x, res_y) = img1.get_size()
else:
(res_x, res_y) = box
for i in range(0, res_x):
for j in range(0, res_y):
[r1, g1, b1] = img1.get_pixel((start1[0] + i, start1[1] + j))
[r2, g2, b2] = img2.get_pixel((start2[0] + i, start2[1] + j))
mse += (r1 - r2) * (r1 - r2) + \
(g1 - g2) * (g1 - g2) + \
(b1 - b2) * (b1 - b2)
if res_x <= 0 or res_y <= 0:
raise ValueError("Image or box resolution must be greater than 0")
mse /= res_x * res_y * 3
return mse
def compare_imgs(img1, img2, max_col=255, start1=(0, 0),
start2=(0, 0), box=None):
mse = calculate_mse(img1, img2, start1, start2, box)
logger.debug("MSE = {}".format(mse))
if mse == 0:
return True
psnr = calculate_psnr(mse, max_col)
logger.debug("PSNR = {}".format(psnr))
return psnr >= PSNR_ACCEPTABLE_MIN
def compare_pil_imgs(file1, file2):
try:
img1 = PILImgRepr()
img1.load_from_file(file1)
img2 = PILImgRepr()
img2.load_from_file(file2)
return compare_imgs(img1, img2)
except Exception as err:
logger.info("Can't compare images {}, {}: {}".format(file1, file2,
err))
return False
def compare_exr_imgs(file1, file2):
try:
img1 = EXRImgRepr()
img1.load_from_file(file1)
img2 = EXRImgRepr()
img2.load_from_file(file2)
return compare_imgs(img1, img2, 1)
except Exception as err:
logger.info("Can't compare images {}, {}: {}".format(file1, file2,
err))
return False
def advance_verify_img(file_, res_x, res_y, start_box, box_size, compare_file,
cmp_start_box):
try:
img = load_img(file_)
cmp_img = load_img(compare_file)
if img is None or cmp_img is None:
return False
if img.get_size() != (res_x, res_y):
return False
def _box_too_small(box):
return box[0] <= 0 or box[1] <= 0
def _box_too_big(box):
return box[0] > res_x or box[1] > res_y
if _box_too_small(box_size) or _box_too_big(box_size):
logger.error("Wrong box size for advanced verification " \
"{}".format(box_size))
if isinstance(img, PILImgRepr) and isinstance(cmp_img, PILImgRepr):
return compare_imgs(img, cmp_img, start1=start_box,
start2=cmp_start_box, box=box_size)
else:
return compare_imgs(img, cmp_img, max_col=1, start1=start_box,
start2=cmp_start_box, box=box_size)
except Exception:
logger.exception("Cannot verify images {} and {}".format(file_,
compare_file))
return False
| gpl-3.0 | -3,704,521,870,128,538,000 | 32.307018 | 79 | 0.523045 | false |
our-city-app/oca-backend | src/oca/rest.py | 1 | 12230 | # coding: utf-8
"""
Our City App
Our City App internal apis # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
import urllib3
from oca.exceptions import ApiException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
| apache-2.0 | -7,978,392,025,791,435,000 | 41.027491 | 134 | 0.521096 | false |
cloudedbats/cloudedbats_wurb | cloudedbats_wurb/wurb_raspberry_pi/control_by_gpio.py | 1 | 4508 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016-2018 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import time
import logging
import threading
# Check if GPIO is available.
gpio_available = True
try: import RPi.GPIO as GPIO
except: gpio_available = False
class ControlByGpio(object):
""" Use GPIO for control when running without a graphical user interface. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
# Recording control.
self.rec_on_state = False
self.rec_off_state = False
self.rec_on_count = 0
self.rec_off_count = 0
# GPIO
if not gpio_available:
self._logger.error('GPIO control: RaspberryPi-GPIO not available.')
return
#
self._gpio_pin_rec_on = 37 # '#37 (GPIO 26)'
self._gpio_pin_rec_off = 38 # '#38 (GPIO 20)'
self._setup_gpio()
#
self._active = True
self._start_gpio_check()
def stop(self):
""" """
self._active = False
def is_gpio_rec_on(self):
""" """
return self.rec_on_state
def is_gpio_rec_off(self):
""" """
return self.rec_off_state
def is_gpio_rec_auto(self):
""" """
return (self.rec_on_state == False) and (self.rec_off_state == False)
def _fire_event(self, event):
""" Event for the state machine. """
if self._callback_function:
self._callback_function(event)
def _setup_gpio(self):
""" """
GPIO.setmode(GPIO.BOARD) # Use pin numbers (1-40).
# Use the built in pull-up resistors.
GPIO.setup(self._gpio_pin_rec_on, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self._gpio_pin_rec_off, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def _start_gpio_check(self):
""" """
# Check GPIO activity in a separate thread.
self._check_gpio_thread = threading.Thread(target = self._check_gpio, args = [])
self._check_gpio_thread.start()
def _check_gpio(self):
""" """
old_rec_on_state = self.rec_on_state
old_rec_off_state = self.rec_off_state
while self._active:
time.sleep(0.1)
try:
# Check if recording on is active.
if GPIO.input(self._gpio_pin_rec_on):
# High = inactive.
self.rec_on_count = 0
self.rec_on_state = False
else:
# Low = active.
if self.rec_on_count >= 5: # After 0.5 sec.
self.rec_on_state = True
else:
self.rec_on_count += 1
# Check if recording off is active.
if GPIO.input(self._gpio_pin_rec_off):
# High = inactive.
self.rec_off_count = 0
self.rec_off_state = False
else:
# Low = active.
if self.rec_off_count >= 5: # After 0.5 sec.
self.rec_off_state = True
else:
self.rec_off_count += 1
# Fire event.
if (old_rec_on_state != self.rec_on_state) or \
(old_rec_off_state != self.rec_off_state):
if self.rec_on_state:
# Rec on active.
self._fire_event('gpio_rec_on')
self._logger.debug('GPIO control: Fire event: gpio_rec_on.')
elif self.rec_off_state:
# Rec off active.
self._fire_event('gpio_rec_off')
self._logger.debug('GPIO control: Fire event: gpio_rec_off.')
else:
# Both inactive = Auto.
self._fire_event('gpio_rec_auto')
self._logger.debug('GPIO control: Fire event: gpio_rec_auto.')
#
old_rec_on_state = self.rec_on_state
old_rec_off_state = self.rec_off_state
except:
pass
| mit | -5,799,279,537,577,791,000 | 34.496063 | 88 | 0.482919 | false |
dnsserver/datahub | src/functional_tests/test_login_auth.py | 1 | 2354 | from .base import FunctionalTest
from core.db.manager import DataHubManager
class LoginTest(FunctionalTest):
def test_sign_in_bad_user(self):
# Justin has not created an account, but he tries to sign in anyway
self.sign_in_manually()
justin_url = self.browser.current_url
self.assertNotRegexpMatches(justin_url, self.username)
def test_register_user_manually_sign_in_and_delete(self):
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
# Justin clicks "Sign Up"
self.browser.find_element_by_id('id_sign_up')
# Justin registers a new account
self.sign_up_manually()
# The URL bar now now shows Justin's username
justin_url = self.browser.current_url
self.assertRegexpMatches(justin_url, self.username)
# Justin clicks on the menu item with his name
self.browser.find_element_by_id('id_user_menu').click()
# Justin signs out
self.browser.find_element_by_id('id_sign_out').click()
# The URL bar now shows logout
justin_url = self.browser.current_url
self.assertRegexpMatches(justin_url, 'logout')
# Justin is able to sign back in
self.sign_in_manually()
justin_url = self.browser.current_url
self.assertRegexpMatches(justin_url, self.username)
# DataHub deletes his user and database, somewhat vindictively
DataHubManager.remove_user_and_database(self.username)
# Justin doesn't like DataHub
# Justin goes to the settings page
# self.browser.find_element_by_id('id_settings').click()
# Justin deletes his account
# self.delete_account()
# Justin is now logged out
# justin_url = self.browser.current_url
# self.assertRegexpMatches(justin_url, 'logout')
# Justin cannot sign back in
# self.sign_in_manually()
# justin_url = self.browser.current_url
# self.assertNotRegex(justin_url, self.username)
# def test_justin_hacks_the_planet(self):
# pass
# Justin is hacking the planet
## Justin sneakily registers his username again
# His data does not reappear
# Justin has messed with the best. He does like the rest.
# def delete_account(self):
# pass
| mit | -3,764,447,796,731,937,300 | 31.694444 | 75 | 0.647833 | false |
jabez007/Archons_Oracle | Oracle/Oracle.py | 1 | 5883 | #!/usr/bin/env python3
# https://theneuralperspective.com/2016/10/04/05-recurrent-neural-networks-rnn-part-1-basic-rnn-char-rnn/
# https://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/
# Larger LSTM Network to Generate Text for Last Hope LARP
import sys
import os
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
SEQ_LENGTH = 29
STEP = 1
HIDDEN_LAYER = 512
BATCH_SIZE = 128
_dataX_ = list()
_int_to_char_ = dict()
_char_to_int_ = dict()
def train():
"""
Below are 10 ideas that may further improve the model that you could experiment with are:
-- Predict fewer than 1,000 characters as output for a given seed.
-- Remove all punctuation from the source text, and therefore from the models’ vocabulary.
-- Try a one hot encoded for the input sequences.
* Train the model on padded sentences rather than random sequences of characters.
* Increase the number of training epochs to 100 or many hundreds.
-- Add dropout to the visible input layer and consider tuning the dropout percentage.
-- Tune the batch size, try a batch size of 1 as a (very slow) baseline and larger sizes from there.
-- Add more memory units to the layers and/or more layers.
-- Experiment with scale factors (temperature) when interpreting the prediction probabilities.
* Change the LSTM layers to be “stateful” to maintain state across batches.
"""
raw_text = load_data()
X, y = format_data(raw_text)
model = build_model(X, y)
# define the checkpoint
filepath="weights-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=2, save_best_only=True, mode='min') # verbose = 2. This gives you one output per epoch.
callbacks_list = [checkpoint]
# fit the model
model.fit(X, y, epochs=50, batch_size=BATCH_SIZE, callbacks=callbacks_list) # Tune the batch size
def sample(seed = "", length = 280): # on November 7, 2017, the limit was doubled to 280
raw_text = load_data()
X, y = format_data(raw_text)
model = build_model(X, y)
# pick a random seed
if seed == "":
start = numpy.random.randint(0, len(_dataX_)-1)
pattern = _dataX_[start]
else:
pattern = [ _char_to_int_[char] if char in _char_to_int_.keys() else _char_to_int_[" "]
for char in (seed.lower().strip() + " ").rjust(SEQ_LENGTH)[-SEQ_LENGTH:] ]
print("Seed:")
print("\"" + ''.join([_int_to_char_[value] for value in pattern]) + "\"")
# generate characters
generated_text = ""
n_vocab = len(_int_to_char_)
for i in range(length): # 140 for twitter character limit...
x = numpy.zeros((1, SEQ_LENGTH, n_vocab))
for tt, char in enumerate(pattern):
x[0, tt, char] = 1.
prediction = model.predict(x, verbose=0)[0]
#index = numpy.argmax(prediction)
index = numpy.random.choice(range(n_vocab), 1, p=prediction[SEQ_LENGTH-1])[0]
result = _int_to_char_[index]
sys.stdout.write(result)
sys.stdout.flush()
generated_text += result
pattern.append(index)
pattern = pattern[1:len(pattern)]
print("\nDone.")
return generated_text
def load_data(filename = "lasthope.txt"):
# load text and covert to lowercase
raw_text = open(filename, encoding="utf8").read()
raw_text = raw_text.lower()
return raw_text
def format_data(raw_text):
global _int_to_char_, _char_to_int_
# create mapping of unique chars to integers
chars = sorted(list(set(raw_text)))
_char_to_int_ = dict((c, i) for i, c in enumerate(chars))
_int_to_char_ = dict((i, c) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print("Total Characters: " + str(n_chars))
print("Total Vocab: " + str(n_vocab))
# prepare the dataset of input to output pairs encoded as integers
dataY = []
for i in range(0, n_chars - SEQ_LENGTH, STEP):
seq_in = raw_text[i: i+SEQ_LENGTH]
seq_out = raw_text[i+1: i+1+SEQ_LENGTH]
_dataX_.append([_char_to_int_[char] for char in seq_in])
dataY.append([_char_to_int_[char] for char in seq_out])
n_patterns = len(_dataX_)
print("Total Patterns: " + str(n_patterns))
# One-hot encode X and y
X = numpy.zeros((n_patterns, SEQ_LENGTH, n_vocab), dtype=numpy.bool)
for i, seq in enumerate(_dataX_):
for t, char in enumerate(seq):
X[i, t, char] = 1
y = numpy.zeros((n_patterns, SEQ_LENGTH, n_vocab), dtype=numpy.bool)
for i, seq in enumerate(dataY):
for t, char in enumerate(seq):
y[i, t, char] = 1
return X, y
def build_model(X, y):
# define the LSTM model
model = Sequential()
model.add(LSTM(HIDDEN_LAYER, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(HIDDEN_LAYER, return_sequences=True))
model.add(LSTM(HIDDEN_LAYER, return_sequences=True))
model.add(Dense(y.shape[2], activation='softmax'))
# load previous network weights
loss = 10
filename = ""
for f in os.listdir():
if f.endswith('.hdf5'):
if float(f.split('.')[0].split('-')[2]) < loss:
filename = f
if filename != "":
print("checkpoint file: " + filename)
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
# # # #
if __name__ == "__main__":
train()
| mit | -5,439,092,132,026,910,000 | 38.442953 | 159 | 0.622767 | false |
hydroffice/hyo_soundspeed | hyo2/soundspeed/formats/writers/abstract.py | 1 | 2150 | from abc import ABCMeta, abstractmethod # , abstractproperty
import os
import logging
logger = logging.getLogger(__name__)
from hyo2.soundspeed.base.files import FileManager
from hyo2.soundspeed.formats.abstract import AbstractFormat
class AbstractWriter(AbstractFormat, metaclass=ABCMeta):
""" Abstract data writer """
def __repr__(self):
return "<%s:writer:%s:%s>" % (self.name, self.version, ",".join(self._ext))
def __init__(self):
super(AbstractWriter, self).__init__()
self.fod = None
@abstractmethod
def write(self, ssp, data_path, data_file=None, project=''):
pass
@abstractmethod
def _write_header(self):
pass
@abstractmethod
def _write_body(self):
pass
def finalize(self):
if self.fod:
if not self.fod.io.closed:
self.fod.io.close()
class AbstractTextWriter(AbstractWriter, metaclass=ABCMeta):
""" Abstract text data writer """
def __init__(self):
super(AbstractTextWriter, self).__init__()
def _write(self, data_path, data_file, encoding='utf8', append=False, binary=False):
"""Helper function to write the raw file"""
# data_path = os.path.join(data_path, self.name.lower()) # commented to avoid the creation of sub-folders
if not os.path.exists(data_path):
os.makedirs(data_path)
if data_file:
if len(data_file.split('.')) == 1:
data_file += (".%s" % (list(self.ext)[0],))
file_path = os.path.join(data_path, data_file)
else:
if self.ssp.cur.meta.original_path:
data_file = "%s.%s" % (os.path.basename(self.ssp.cur.meta.original_path), list(self.ext)[0])
else:
data_file = 'output.%s' % (list(self.ext)[0],)
file_path = os.path.join(data_path, data_file)
logger.info("output file: %s" % file_path)
if append:
mode = 'a'
else:
mode = 'w'
if binary:
mode = '%sb' % mode
self.fod = FileManager(file_path, mode=mode, encoding=encoding)
| lgpl-2.1 | -3,848,063,316,767,106,600 | 27.289474 | 114 | 0.58 | false |
google/orchestra | orchestra/google/marketing_platform/utils/schema/erf/Creative.py | 1 | 2231 | ###########################################################################
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
Creative_Schema = [
{ "name":"common_data",
"type":"RECORD",
"mode":"NULLABLE",
"fields":[
{ "name":"id",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"name",
"type":"STRING",
"mode":"NULLABLE",
},
{ "name":"active",
"type":"BOOLEAN",
"mode":"NULLABLE",
},
{ "name":"integration_code",
"type":"STRING",
"mode":"NULLABLE",
},
]
},
{ "name":"advertiser_id",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"dcm_placement_id",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"width_pixels",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"height_pixels",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"approval_status",
"type":"RECORD",
"mode":"REPEATED",
"fields":[
{ "name":"auditor",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"status",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"feedback",
"type":"STRING",
"mode":"NULLABLE",
},
{ "name":"sync_time",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"external_id",
"type":"INTEGER",
"mode":"NULLABLE",
},
]
},
{ "name":"expanding_direction",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"creative_type",
"type":"INTEGER",
"mode":"NULLABLE",
},
] | apache-2.0 | -204,701,574,196,349,100 | 23.26087 | 75 | 0.487674 | false |
Fluent-networks/floranet | floranet/web/rest/gateway.py | 1 | 7710 | import ipaddress
from flask_restful import Resource, reqparse, abort, inputs, fields, marshal
from flask_login import login_required
from twisted.internet.defer import inlineCallbacks, returnValue
from crochet import wait_for, TimeoutError
from floranet.models.gateway import Gateway
from floranet.log import log
# Crochet timeout. If the code block does not complete within this time,
# a TimeoutError exception is raised.
from __init__ import TIMEOUT
class GatewayResource(Resource):
"""Gateway resource base class.
Attributes:
restapi (RestApi): Flask Restful API object
server (NetServer): FloraNet network server object
fields (dict): Dictionary of attributes to be returned to a REST request
parser (RequestParser): Flask RESTful request parser
args (dict): Parsed request argument
"""
def __init__(self, **kwargs):
self.restapi = kwargs['restapi']
self.server = kwargs['server']
self.fields = {
'host': fields.String,
'eui': fields.Integer,
'name': fields.String,
'enabled': fields.Boolean,
'power': fields.Integer,
'created': fields.DateTime(dt_format='iso8601'),
'updated': fields.DateTime(dt_format='iso8601')
}
self.parser = reqparse.RequestParser(bundle_errors=True)
self.parser.add_argument('host', type=str)
self.parser.add_argument('eui', type=int)
self.parser.add_argument('name', type=str)
self.parser.add_argument('enabled', type=inputs.boolean)
self.parser.add_argument('power', type=int)
self.args = self.parser.parse_args()
class RestGateway(GatewayResource):
"""RestGateway Resource class.
Manages RESTAPI GET and PUT transactions for gateways.
"""
def __init__(self, **kwargs):
super(RestGateway, self).__init__(**kwargs)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def get(self, host):
"""Method to handle gateway GET requests"""
try:
g = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if g is None:
abort(404, message={'error': "Gateway {} doesn't exist.".format(host)})
returnValue(marshal(g, self.fields))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def put(self, host):
"""Method to handle gateway PUT requests
Args:
host (str): Gateway host address
"""
try:
gateway = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if gateway is None:
abort(404, message={'error': "Gateway {} doesn't exist".format(host)})
kwargs = {}
for a,v in self.args.items():
if v is not None and v != getattr(gateway, a):
kwargs[a] = v
setattr(gateway, a, v)
(valid, message) = yield gateway.valid()
if not valid:
abort(400, message=message)
# Update the gateway and server with the new attributes
if kwargs:
gateway.update(**kwargs)
self.server.lora.updateGateway(host, gateway)
returnValue(({}, 200))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def delete(self, host):
"""Method to handle gateway DELETE requests
Args:
host (str): Gateway host
"""
try:
g = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if g is None:
abort(404, message={'error': "Gateway {} doesn't exist.".format(host)})
deleted = yield g.delete()
self.server.lora.deleteGateway(g)
returnValue(({}, 200))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
class RestGateways(GatewayResource):
""" RestGateways Resource class.
Manages REST API GET and POST transactions for reading multiple gateways,
and creating gateways.
"""
def __init__(self, **kwargs):
super(RestGateways, self).__init__(**kwargs)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def get(self):
"""Method to get all gateways"""
try:
gateways = yield Gateway.all()
if gateways is None:
returnValue({})
data = {}
for i,g in enumerate(gateways):
data[i] = marshal(g, self.fields)
returnValue(data)
except TimeoutError:
# Exception returns 500 to client
log.error("REST API timeout retrieving all gateways")
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def post(self):
"""Method to create a gateway"""
host = self.args['host']
name = self.args['name']
eui = self.args['eui']
enabled = self.args['enabled']
power = self.args['power']
message = {}
# Check for required args
required = {'host', 'name', 'eui', 'enabled', 'power'}
for r in required:
if self.args[r] is None:
message[r] = "Missing the {} parameter.".format(r)
if message:
abort(400, message=message)
# Ensure we have a valid address
try:
ipaddress.ip_address(host)
except (ipaddress.AddressValueError, ValueError):
message = {'error': "Invalid IP address {} ".format(host)}
abort(400, message=message)
# Ensure we have a valid EUI
if not isinstance(eui, (int, long)):
message = {'error': "Invalid gateway EUI {} ".format(eui)}
abort(400, message=message)
# Check this gateway does not currently exist
exists = yield Gateway.exists(where=['host = ?', host])
if exists:
message = {'error': "Gateway address {} ".format(host) + \
"currently exists."}
abort(400, message=message)
# Check the EUI does not currently exist
exists = yield Gateway.exists(where=['eui = ?', eui])
if exists:
message = {'error': "Gateway EUI {} ".format(eui) + \
"currently exists."}
abort(400, message=message)
# Create and validate
gateway = Gateway(host=host, eui=eui, name=name, enabled=enabled, power=power)
(valid, message) = gateway.valid()
if not valid:
abort(400, message=message)
try:
g = yield gateway.save()
if g is None:
abort(500, message={'error': "Error saving the gateway."})
# Add the new gateway to the server.
self.server.lora.addGateway(g)
location = self.restapi.api.prefix + '/gateway/' + str(host)
returnValue(({}, 201, {'Location': location}))
except TimeoutError:
# Exception returns 500 to client
log.error("REST API timeout for gateway POST request")
| mit | 563,634,068,682,700,800 | 34.045455 | 87 | 0.557717 | false |
firelab/viirs_ba | misc_utils/run_fom.py | 1 | 1752 | #
# Script to run all of the figure of merit code for a single run back to back.
#
import viirs_fom as vf
import sys
def all_fom(database_name, workers=12) :
# I over U FOM
vf.calc_all_ioveru_fom('{}_schema_info.csv'.format(database_name),
'gt', 'burnmask13', workers=workers)
# Zones
vf.do_all_zonetbl_runs('.','gt','burnmask13',
zone_tbl='fixed_zone_counts',
workers=workers,
mask_tbl='bobafet13')
# 2013 events
vf.do_all_zonetbl_runs('.','gt','burnmask13',
zonedef_tbl='calevents_2013',
zone_tbl='fixed_events_2013_counts',
zone_col='fireid',
year=2013,
workers=workers,
spatial_filter=True,
mask_tbl='bobafet13')
# 2014 events
vf.do_all_zonetbl_runs('.','gt','burnmask14',
zonedef_tbl='calevents_2014',
zone_tbl='fixed_events_2014_counts',
zone_col='fireid',
year=2014,
workers=workers,
spatial_filter=True,
mask_tbl='bobafet14')
#
if __name__ == "__main__" :
if len(sys.argv) != 2 :
print "Usage: {0} database_name".format(sys.argv[0])
print "Run this from the base directory of a batch-of-runs, and"
print "provide the database name associated with the entire batch."
sys.exit()
all_fom(sys.argv[1])
| cc0-1.0 | -7,760,519,851,290,668,000 | 36.933333 | 78 | 0.450913 | false |
gsastry/human-rl | train/roadrunner_b1_feature_files.py | 1 | 1284 | import os
import sys
from build_feature_files import build_feature_files
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "humanrl"))
if __name__ == "__main__":
import argparse
from humanrl import frame
from humanrl.classifier_tf import (SavedCatastropheClassifierTensorflow,
DataLoader, BlockerLabeller)
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', type=str, default="")
common_hparams = dict(use_action=True, image_shape=[210, 160, 3])
base_directory = "logs/RoadRunner"
new_directory = "labels/RoadRunner/b1/0"
num_episodes = 100
negative_example_keep_prob = 0.1
if not os.path.exists(new_directory):
print("Writing feature files")
classifier = SavedCatastropheClassifierTensorflow(
"models/roadrunner/c1/classifier/final.ckpt")
data_loader = DataLoader(
hparams=classifier.classifier.hparams, labeller=BlockerLabeller(classifier))
label_counts = build_feature_files(base_directory, new_directory, data_loader, num_episodes,
negative_example_keep_prob)
print(label_counts)
paths = frame.feature_file_paths("labels/RoadRunner/b1/0")
| mit | 4,743,128,848,245,503,000 | 36.764706 | 100 | 0.650312 | false |
Transkribus/TWI-edit | views.py | 1 | 16936 | #imports of python modules
import json
import sys
import re
import random
from xml.etree import ElementTree
#Imports of django modules
from django.http import HttpResponse
from django.http import JsonResponse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.utils import translation
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.utils.html import escape
#Imports pf <del>read</del> utils modules
from apps.utils.services import *
from apps.utils.utils import crop
import settings
import apps.edit.settings
from apps.navigation import navigation
#Imports from app (library)
#import library.settings
#import library.navigation# TODO Fix this import!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#from library.forms import RegisterForm, IngestMetsUrlForm, MetsFileForm
#from profiler import profile #profile is a decorator, but things get circular if I include it in decorators.py so...
@login_required
#def proofread(request, collId, docId, page=None, transcriptId=None):# TODO Decide whether to select which transcript to work with unless it should always be the newest?
def proofread(request, collId, docId, page, transcriptId=None):# TODO Decide whether to select which transcript to work with unless it should always be the newest?
t = request.user.tsdata.t
#RM default to page 1
# if page is None :
# page = 1
current_transcript = t.current_transcript(request, collId, docId, page)
if isinstance(current_transcript,HttpResponse):
return apps.utils.views.error_view(request,current_transcript)
transcript = t.transcript(request, current_transcript.get("tsId"), current_transcript.get("url"))
if isinstance(transcript,HttpResponse):
return apps.utils.views.error_view(request,transcript)
transcriptId = str(transcript.get("tsId"))
if request.method == 'POST':# This is by JQuery...
content = json.loads(request.POST.get('content'))
transcript_xml = t.transcript_xml(request, transcriptId, current_transcript.get("url"))
if isinstance(transcript_xml,HttpResponse):
return apps.utils.views.error_view(request,transcript_xml)
transcript_root = ElementTree.fromstring(transcript_xml)
# TODO Decide what to do about regionId... It's not necessary....
for text_region in transcript_root.iter('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextRegion'):# We have to have the namespace...
regionTextEquiv = ""
for line in text_region.iter('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextLine'):
modified_text = content.get(line.get("id")) # Only lines which have changed are submitted...
if None == modified_text:
modified_text = line.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv').find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode').text
else:
line.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv').find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode').text = modified_text
regionTextEquiv += modified_text +"\r\n"
text_region.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv').find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode').text = regionTextEquiv
t.save_transcript(request, ElementTree.tostring(transcript_root), collId, docId, page, transcriptId)
current_transcript = t.current_transcript(request, collId, docId, page)# We want the updated transcript now.
if isinstance(current_transcript,HttpResponse):
return apps.utils.views.error_view(request,current_transcript)
return HttpResponse(str(_("Transcript saved!")), content_type="text/plain")
else:
regions=transcript.get("PcGts").get("Page").get("TextRegion");
if isinstance(regions, dict):
regions = [regions]
lineList = []
if regions:
for x in regions:
lines = x.get("TextLine")
if isinstance(lines, dict):
lineList.extend([lines])
else: # Assume that lines is a list of lines
for line in lines:
lineList.extend([line])
# TODO Use "readingorder"?
if lineList:
for line in lineList:
line['crop'] = crop(line.get("Coords").get("@points"))#,True)
line['id'] = line.get("@id")
line['Unicode'] = line.get('TextEquiv').get('Unicode')
#RM need to test whether this has been successful
document = t.document(request, collId, docId, -1)
if isinstance(document,HttpResponse):
return apps.utils.views.error_view(request,document)
return render(request, 'edit/proofread.html', {
'imageUrl': document.get('pageList').get('pages')[int(page) - 1].get("url"),
'lines': lineList
})
@login_required
def correct(request, collId, docId, page=None, transcriptId=None):# TODO Decide whether to select which transcript to work with unless it should always be the newest?
#def correct(request, collId, docId, page, transcriptId=None):# TODO Decide whether to select which transcript to work with unless it should always be the newest?
t = request.user.tsdata.t
#RM default to page 1
if page is None :
page = 1
#Use this to get the role of the current user untils such time as it is available from t.collection
role = apps.utils.utils.get_role(request,collId)
if 'edit' in request.path and not (role == 'Editor' or role == 'Owner' or role == 'Admin' or role == 'CrowdTranscriber' or role == 'Transcriber'):
t_log('Redirect user due to insufficient role access. [from: %s to: %s]' % (request.get_full_path(), request.get_full_path().replace('edit', 'view')))
return HttpResponseRedirect(request.get_full_path().replace('edit', 'view'))
current_transcript = t.current_transcript(request, collId, docId, page)
if isinstance(current_transcript,HttpResponse):
return apps.utils.views.error_view(request,current_transcript)
transcript = t.transcript(request, current_transcript.get("tsId"), current_transcript.get("url"))
if isinstance(transcript,HttpResponse):
return apps.utils.views.error_view(request,transcript)
#RM Add arrow-in-breadcrumb-bar navigation to sibling documents
collection = t.collection(request, {'collId': collId})
# nav = navigation.up_next_prev(request,"document",docId,collection,[collId])
navdata = navigation.get_nav(collection,docId,'docId','title')
transcriptId = str(transcript.get("tsId"))
if request.method == 'POST':# This is by JQuery...
if 'content' in request.POST:
content = json.loads(request.POST.get('content'))
transcript_xml = t.transcript_xml(request, transcriptId, current_transcript.get("url"))
if isinstance(transcript_xml,HttpResponse):
return apps.utils.views.error_view(request,transcript_xml)
transcript_root = ElementTree.fromstring(transcript_xml)
# TODO Decide what to do about regionId... It's not necessary....
for text_region in transcript_root.iter('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextRegion'):# We have to have the namespace...
regionTextEquiv = ""
for line in text_region.iter('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextLine'):
modified_content = content.get(text_region.get("id") + line.get("id"))
if "custom" in modified_content :
line.set("custom", modified_content.get("custom"))
if "Unicode" in modified_content :
modified_text = modified_content.get("Unicode")
regionTextEquiv += modified_text +"\r\n"
t_equiv = line.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv')
##############################################################
# RM in cases where the is no TextQuiv (or Unicde) tag already
# We must make one before attempting to add modified text
#############################################################
if t_equiv is None :
t_equiv = ElementTree.SubElement(line,'{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv')
ElementTree.SubElement(t_equiv,'{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode')
t_equiv.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode').text = modified_text
r_text_equiv = text_region.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv')
##############################################################
# RM in cases where the is no TextQuiv (or Unicde) tag already
# We must make one before attempting to add modified text
#############################################################
if r_text_equiv is None:
r_text_equiv = ElementTree.SubElement(text_region,'{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}TextEquiv')
ElementTree.SubElement(r_text_equiv,'{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode')
r_text_equiv.find('{http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15}Unicode').text = regionTextEquiv
t.save_transcript(request, ElementTree.tostring(transcript_root), collId, docId, page, transcriptId)
current_transcript = t.current_transcript(request, collId, docId, page)# We want the updated transcript now.
#RM add some error catching (though somewhat suboptimal)
if isinstance(current_transcript,HttpResponse):
t_log("current_transcript request has failed... %s" % current_transcript)
#For now this will do but there may be other reasons the transckribus request fails...
return apps.utils.views.error_view(request, current_transcript)
success_message = str(_("Transcript saved!"))
return HttpResponse(success_message, content_type="text/plain")
elif 'status' in request.POST:
t.save_page_status(request, request.POST.get('status'), collId, docId, page, transcriptId)
success_message = str(_("Page status changed!"))
return HttpResponse(success_message, content_type="text/plain")
else:
regions = transcript.get("PcGts").get("Page").get("TextRegion");
if isinstance(regions, dict):
regions = [regions]
lineList = []
#regionData = [] # Let's leave this here for now, it might still be needed.
if regions:
for x in regions:
lines = x.get("TextLine") # Region!
region_width = crop(x.get("Coords").get("@points"), 1).get('w')
if lines:
if isinstance(lines, dict):
lines['regionWidth'] = region_width
lines['@id'] = x.get("@id") + lines['@id'] # TODO Figure out why this results in region_blah_region_blah_line instead of just region_blah_line_, the transcript already has the duplicate region_blah for each line
lineList.extend([lines])
#regionData.extend([x.get("@id"), 1])
else: # Assume that lines is a list of lines
for line in lines:
line['regionWidth'] = region_width
line['@id'] = x.get("@id") + line['@id'] # TODO Figure out why this results in region_blah_region_blah_line instead of just region_blah_line_, the transcript already has the duplicate region_blah for each line
lineList.extend([line])
#regionData.extend([x.get("@id"), len(lines)])
content_dict = {}
# TODO Unmessify this, the loop below might be better placed inside the one above
if lineList:
for line in lineList:
line_crop = crop(line.get("Coords").get("@points"))
line['crop'] = line_crop
textEquiv = line.get("TextEquiv")
if textEquiv:
unicode = textEquiv.get("Unicode")
if unicode:
line['Unicode'] = unicode.replace(" ", "\u00A0")
else:
line['Unicode'] = ""
else:
if 'edit' in request.path:
t_log('Redirect user back to view mode since no lines in on page. [from: %s to: %s]' % (request.get_full_path(), request.get_full_path().replace('edit', 'view')))
return HttpResponseRedirect(request.get_full_path().replace('edit', 'view'))
# Get thumbnails
# RM Make one document request here...
# RM need to test whether this has been successful
document = t.document(request, collId, docId, -1)
if isinstance(document,HttpResponse):
return apps.utils.views.error_view(request,document)
# RM and get pages from the result... and also the url further down
pages = document.get('pageList').get('pages')
thumb_urls =[]
for thumb_page in pages:
if 0 < thumb_page.get("tsList").get("transcripts")[0].get("nrOfLines"):
if 0 < thumb_page.get("tsList").get("transcripts")[0].get("nrOfTranscribedLines"):
thumb_urls.append("['" + escape(thumb_page.get("thumbUrl")).replace("&", "&") + "', 'transcribed']")# The JavaScript must get the strings like this.
else:
thumb_urls.append("['" + escape(thumb_page.get("thumbUrl")).replace("&", "&") + "', 'only-segmented']")# The JavaScript must get the strings like this.
else:
thumb_urls.append("['" + escape(thumb_page.get("thumbUrl")).replace("&", "&") + "', 'no-segmentation']")# The JavaScript must get the strings like this.
pageStatus = document.get('pageList').get('pages')[int(page) - 1].get("tsList").get('transcripts')[0].get('status')
if pageStatus == 'GT' and 'edit' in request.path:
t_log('Redirect user back to view mode since page status is GT. [from: %s to: %s]' % (request.get_full_path(), request.get_full_path().replace('edit', 'view')))
return HttpResponseRedirect(request.get_full_path().replace('edit', 'view'))
i = request.GET.get('i') if request.GET.get('i') else 'i'
if i == 'sbs' or i == 't' and 'edit' in request.path:
t_log('Redirect user back to view mode since interface "sbs" and "t" do not support edit. [from: %s to: %s]' % (request.get_full_path(), request.get_full_path().replace('edit', 'view')))
return HttpResponseRedirect(request.get_full_path().replace('edit', 'view'))
tags = [
{"name": "abbrev", "color": "FF0000"},
{"name": "date", "color": "0000FF"},
{"name": "gap", "color": "1CE6FF"},
{"name": "person", "color": "00FF00"},
{"name": "place", "color": "8A2BE2"},
{"name": "unclear", "color": "FFCC66"},
{"name": "organization", "color": "FF00FF"}
]
#RM defined the dict for all the stuff going to the view so...
view_data = {
'imageUrl': document.get('pageList').get('pages')[int(page) - 1].get("url"),
'pageStatus': pageStatus,
'lines': lineList,
'thumbArray': "[" + ", ".join(thumb_urls) + "]",
'collId': collId,
'collName': document.get('collection').get('colName'),
'docId': docId,
'title': document.get('md').get('title'),
'pageNo': page,
'tags': tags,
'i': i,
'role': role,
'metadata' : document.get('md'),
#'regionData': regionData,
}
# we can add the navdata to the end of it
view_data.update(navdata)
return render(request, 'edit/correct.html', view_data)
| gpl-3.0 | 4,320,347,720,505,138,000 | 58.633803 | 237 | 0.606991 | false |
RobLoach/lutris | lutris/util/wineprefix.py | 1 | 2815 | import os
from lutris.util.wineregistry import WineRegistry
from lutris.util.log import logger
from lutris.util import joypad
class WinePrefixManager:
"""Class to allow modification of Wine prefixes without the use of Wine"""
hkcu_prefix = "HKEY_CURRENT_USER"
def __init__(self, path):
self.path = path
def setup_defaults(self):
self.sandbox()
self.override_dll("winemenubuilder.exe", "")
def get_registry_path(self, key):
if key.startswith(self.hkcu_prefix):
return os.path.join(self.path, 'user.reg')
else:
raise ValueError("Unsupported key '{}'".format(key))
def get_key_path(self, key):
if key.startswith(self.hkcu_prefix):
return key[len(self.hkcu_prefix) + 1:]
else:
raise ValueError(
"The key {} is currently not supported by WinePrefixManager".format(key)
)
def set_registry_key(self, key, subkey, value):
registry = WineRegistry(self.get_registry_path(key))
registry.set_value(self.get_key_path(key), subkey, value)
registry.save()
def clear_registry_key(self, key):
registry = WineRegistry(self.get_registry_path(key))
registry.clear_key(self.get_key_path(key))
registry.save()
def override_dll(self, dll, mode):
key = self.hkcu_prefix + "/Software/Wine/DllOverrides"
if mode.startswith("dis"):
mode = ""
if mode not in ("builtin", "native", "builtin,native", "native,builtin", ""):
logger.error("DLL override '%s' mode is not valid", mode)
return
self.set_registry_key(key, dll, mode)
def sandbox(self):
user = os.getenv('USER')
user_dir = os.path.join(self.path, "drive_c/users/", user)
# Replace symlinks
if os.path.exists(user_dir):
for item in os.listdir(user_dir):
path = os.path.join(user_dir, item)
if os.path.islink(path):
os.unlink(path)
os.makedirs(path)
def set_crash_dialogs(self, enabled):
"""Enable or diable Wine crash dialogs"""
key = self.hkcu_prefix + "/Software/Wine/WineDbg"
value = 1 if enabled else 0
self.set_registry_key(key, "ShowCrashDialog", value)
def configure_joypads(self):
joypads = joypad.get_joypads()
key = self.hkcu_prefix + '/Software/Wine/DirectInput/Joysticks'
self.clear_registry_key(key)
for device, joypad_name in joypads:
if 'event' in device:
disabled_joypad = "{} (js)".format(joypad_name)
else:
disabled_joypad = "{} (event)".format(joypad_name)
self.set_registry_key(key, disabled_joypad, 'disabled')
| gpl-3.0 | 4,930,818,353,654,908,000 | 35.558442 | 88 | 0.593961 | false |
gamesun/MyTerm-for-YellowStone | appInfo.py | 1 | 1742 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2013, gamesun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of gamesun nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GAMESUN "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GAMESUN BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
title = 'MyTerm for YellowStone'
version = '1.0'
file_name = title + ' ' + version
url = 'https://github.com/gamesun/MyTerm-for-YellowStone#myterm-for-yellowstone'
author = 'gamesun'
copyright = 'Copyright (C) 2013, gamesun'
| bsd-3-clause | -5,196,408,145,172,072,000 | 42.55 | 80 | 0.762342 | false |
thinker0/aurproxy | tellapart/aurproxy/metrics/publisher.py | 1 | 6667 | # Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base metrics client and derived implementations.
"""
__copyright__ = 'Copyright (C) 2015 TellApart, Inc. All Rights Reserved.'
from abc import (
ABCMeta,
abstractmethod)
from tellapart.aurproxy.util import (
get_logger,
PeriodicTask)
logger = get_logger(__name__)
class FlushEngine(object):
"""Class that uses some scheduling mechanism (threading, gevent, etc.) in
order to periodically call flush_fn.
"""
__metaclass__ = ABCMeta
def __init__(self, period, flush_fn):
"""
Args:
period - The period in seconds at which to flush.
flush_fn - The function to call.
"""
self._period = period
self._flush_fn = flush_fn
@abstractmethod
def start(self):
"""Starts the engine.
"""
@abstractmethod
def stop(self):
"""Stops the engine.
"""
class ThreadFlushEngine(FlushEngine):
"""Class that uses a thread to periodically flush.
"""
def __init__(self, period, flush_fn):
super(ThreadFlushEngine, self).__init__(period, flush_fn)
self._thread = PeriodicTask(self._period, self._flush_fn)
def start(self):
"""Override of base method.
"""
self._thread.start()
def stop(self):
"""Override of base method.
"""
self._thread.stop()
class MetricPublisher(object):
"""Base definition of a class intended to publish metrics to external sources.
"""
__metaclass__ = ABCMeta
def __init__(self, source, period=60, flush_engine=ThreadFlushEngine):
"""
Args:
source - The identifier to use as the source of the data when publishing.
period - The period in seconds at which to publish metrics.
flush_engine - The type or instance of a FlushEngine used to schedule
publication.
"""
self._period = period
self._source = source
if isinstance(flush_engine, type):
self._flush_engine = flush_engine(self._period, self.publish)
else:
self._flush_engine = flush_engine
self._metric_stores = []
self._started = False
@abstractmethod
def publish(self):
"""Publishes metrics to an external endpoint.
"""
def register_store(self, metric_store):
"""Registers a metric store with the publisher.
Args:
metric_store - A MetricStore object.
"""
# Only start flushing after registration has occurred.
if not self._started:
self._flush_engine.start()
self._started = True
self._metric_stores.append(metric_store)
class LibratoMetricPublisher(MetricPublisher):
"""Implementation of a MetricPublisher that publishes to Librato.
"""
def __init__(self, api_user, api_token, source, period=60,
flush_engine=ThreadFlushEngine):
"""
Args:
api_user - The API User for Librato.
api_token - The API Token for Librato.
source - The identifier to use as the source of the data when publishing.
period - The period in seconds at which to publish metrics.
flush_engine - The type or instance of a FlushEngine used to schedule
publication.
"""
self._api_user = api_user
self._api_token = api_token
super(LibratoMetricPublisher, self).__init__(source, period, flush_engine)
def _get_queue(self):
"""Gets a Librato Queue object for bulk submission of metrics.
Returns:
A Librato Queue object.
"""
import librato
from librato import Queue
connection = librato.connect(self._api_user, self._api_token)
return Queue(connection)
def publish(self):
"""Override of base method.
"""
try:
logger.info('Publishing metrics to Librato.')
queue = self._get_queue()
for store in self._metric_stores:
for metric in store.get_metrics():
queue.add(
name=metric.name,
value=metric.value(),
type=metric.metric_type.lower(),
source=self._source,
period=self._period,
# Enable Service-Side aggregation by default.
attributes={'aggregate': True})
# The Librato queue object takes care of chunking the POSTs on submit.
queue.submit()
except Exception:
logger.exception('Failed to publish metrics to Librato!')
class OpenTSDBMetricPublisher(MetricPublisher):
"""Implementation of a MetricPublisher that publishes to OpenTSDB.
"""
def __init__(self, prefix, host, port, source, period=60,
flush_engine=ThreadFlushEngine):
"""
Args:
host - hostname.
port - host port.
source - The identifier to use as the source of the data when publishing.
period - The period in seconds at which to publish metrics.
flush_engine - The type or instance of a FlushEngine used to schedule
publication.
"""
self._prefix = prefix
self._host = host
self._port = int(port)
super(OpenTSDBMetricPublisher, self).__init__(source, period, flush_engine)
def hostname(self):
import socket
return socket.gethostname()
def publish(self):
import os
import time
import struct
from socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR, SO_LINGER, IPPROTO_TCP, TCP_NODELAY
"""Override of base method.
"""
try:
logger.debug('Publishing metrics to OpenTSDB.')
sock = socket(AF_INET, SOCK_STREAM)
sock.settimeout(3)
sock.connect((self._host, self._port))
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.setsockopt(SOL_SOCKET, SO_LINGER, struct.pack('ii', 1, 0))
ts = int(time.time())
for store in self._metric_stores:
for metric in store.get_metrics():
request = "put %s%s%s %d %f host=%s pid=%d" % (self._prefix, self._source, metric.name, ts, metric.value(),
self.hostname(), os.getpid())
logger.debug('Publishing: %s' % (request))
sock.sendall(request + "\n")
sock.close()
except Exception:
logger.exception('Failed to publish metrics to OpenTSDB!')
| apache-2.0 | 4,870,028,771,380,186,000 | 29.865741 | 117 | 0.646468 | false |
HeavenMin/PlantImageRecognition | Dataset Process/Delete_Anaything_Not_Flower.py | 1 | 2191 |
"""
AUTHOR : Lang
PURPOSE : Multi Self Deep Learning
"""
__author__ = 'Lang'
import tensorflow as tf, sys
import os
# change this as you see fit
graph_path_temple = sys.argv[1]
label_path_temple = sys.argv[2]
graph_path = os.path.abspath(graph_path_temple)
label_path = os.path.abspath(label_path_temple)
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile(label_path)]
# Unpersists graph from file
with tf.gfile.FastGFile(graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
count = 0
tracing = open("processing.txt",'w')
tracing.close()
for image_dir_path in os.listdir('.'):
try:
for image_path in os.listdir(image_dir_path):
try:
# Read in the image_data
image_data = tf.gfile.FastGFile(image_dir_path+'/'+image_path, 'rb').read()
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
if label_lines[top_k[0]] == "no":
os.remove(image_dir_path+'/'+image_path)
print('removed picture '+image_path)
else:
print('remain picture '+image_path)
except:
os.remove(image_dir_path+'/'+image_path)
print('removed picture'+image_path)
count = count +1
tracing = open("processing.txt",'a')
tracing.write("finish " + str(count) + " kinds of removing not flower pictures\n")
tracing.close()
except:
print('error:'+ image_dir_path)
tracing = open("processing.txt",'a')
tracing.write("all finished")
tracing.close()
| apache-2.0 | -2,854,865,646,833,656,000 | 30.753623 | 91 | 0.572798 | false |
mojolab/LivingData | tools/gdocstest.py | 1 | 1443 | #!/usr/bin/python
import httplib2
import pprint
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from oauth2client.client import OAuth2WebServerFlow
# Copy your credentials from the APIs Console
CLIENT_ID = "110041408722.apps.googleusercontent.com"
CLIENT_SECRET = "IGeDmFs_w1mieqQ_s9-PJaNN"
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Redirect URI for installed apps
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
# Path to the file to upload
FILENAME = '/home/mojoarjun/CSV/HIVOSPOSTLOG-CIRCLES.CSV'
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print 'Go to the following link in your browser: ' + authorize_url
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
# Insert a file
media_body = MediaFileUpload(FILENAME, mimetype='text/plain', resumable=True)
body = {
'title': 'My document',
'description': 'A test document',
'mimeType': 'text/plain'
}
file = drive_service.files().insert(body=body, media_body=media_body).execute()
pprint.pprint(file)
| apache-2.0 | 7,104,577,775,863,679,000 | 30.369565 | 79 | 0.763687 | false |
colour-science/colour-demosaicing | colour_demosaicing/bayer/demosaicing/menon2007.py | 1 | 10345 | # -*- coding: utf-8 -*-
"""
DDFAPD - Menon (2007) Bayer CFA Demosaicing
===========================================
*Bayer* CFA (Colour Filter Array) DDFAPD - *Menon (2007)* demosaicing.
References
----------
- :cite:`Menon2007c` : Menon, D., Andriani, S., & Calvagno, G. (2007).
Demosaicing With Directional Filtering and a posteriori Decision. IEEE
Transactions on Image Processing, 16(1), 132-141.
doi:10.1109/TIP.2006.884928
"""
import numpy as np
from scipy.ndimage.filters import convolve, convolve1d
from colour.utilities import as_float_array, tsplit, tstack
from colour_demosaicing.bayer import masks_CFA_Bayer
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'demosaicing_CFA_Bayer_Menon2007', 'demosaicing_CFA_Bayer_DDFAPD',
'refining_step_Menon2007'
]
def _cnv_h(x, y):
"""
Helper function for horizontal convolution.
"""
return convolve1d(x, y, mode='mirror')
def _cnv_v(x, y):
"""
Helper function for vertical convolution.
"""
return convolve1d(x, y, mode='mirror', axis=0)
def demosaicing_CFA_Bayer_Menon2007(CFA, pattern='RGGB', refining_step=True):
"""
Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using
DDFAPD - *Menon (2007)* demosaicing algorithm.
Parameters
----------
CFA : array_like
*Bayer* CFA.
pattern : unicode, optional
**{'RGGB', 'BGGR', 'GRBG', 'GBRG'}**,
Arrangement of the colour filters on the pixel array.
refining_step : bool
Perform refining step.
Returns
-------
ndarray
*RGB* colourspace array.
Notes
-----
- The definition output is not clipped in range [0, 1] : this allows for
direct HDRI / radiance image generation on *Bayer* CFA data and post
demosaicing of the high dynamic range data as showcased in this
`Jupyter Notebook <https://github.com/colour-science/colour-hdri/\
blob/develop/colour_hdri/examples/\
examples_merge_from_raw_files_with_post_demosaicing.ipynb>`__.
References
----------
:cite:`Menon2007c`
Examples
--------
>>> CFA = np.array(
... [[ 0.30980393, 0.36078432, 0.30588236, 0.3764706 ],
... [ 0.35686275, 0.39607844, 0.36078432, 0.40000001]])
>>> demosaicing_CFA_Bayer_Menon2007(CFA)
array([[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019608],
[ 0.32156864, 0.3764706 , 0.40000001]],
<BLANKLINE>
[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019609],
[ 0.32156864, 0.3764706 , 0.40000001]]])
>>> CFA = np.array(
... [[ 0.3764706 , 0.36078432, 0.40784314, 0.3764706 ],
... [ 0.35686275, 0.30980393, 0.36078432, 0.29803923]])
>>> demosaicing_CFA_Bayer_Menon2007(CFA, 'BGGR')
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
"""
CFA = as_float_array(CFA)
R_m, G_m, B_m = masks_CFA_Bayer(CFA.shape, pattern)
h_0 = np.array([0, 0.5, 0, 0.5, 0])
h_1 = np.array([-0.25, 0, 0.5, 0, -0.25])
R = CFA * R_m
G = CFA * G_m
B = CFA * B_m
G_H = np.where(G_m == 0, _cnv_h(CFA, h_0) + _cnv_h(CFA, h_1), G)
G_V = np.where(G_m == 0, _cnv_v(CFA, h_0) + _cnv_v(CFA, h_1), G)
C_H = np.where(R_m == 1, R - G_H, 0)
C_H = np.where(B_m == 1, B - G_H, C_H)
C_V = np.where(R_m == 1, R - G_V, 0)
C_V = np.where(B_m == 1, B - G_V, C_V)
D_H = np.abs(C_H - np.pad(C_H, ((0, 0),
(0, 2)), mode=str('reflect'))[:, 2:])
D_V = np.abs(C_V - np.pad(C_V, ((0, 2),
(0, 0)), mode=str('reflect'))[2:, :])
del h_0, h_1, CFA, C_V, C_H
k = np.array(
[[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 3, 0, 3],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 1]]) # yapf: disable
d_H = convolve(D_H, k, mode='constant')
d_V = convolve(D_V, np.transpose(k), mode='constant')
del D_H, D_V
mask = d_V >= d_H
G = np.where(mask, G_H, G_V)
M = np.where(mask, 1, 0)
del d_H, d_V, G_H, G_V
# Red rows.
R_r = np.transpose(np.any(R_m == 1, axis=1)[np.newaxis]) * np.ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[np.newaxis]) * np.ones(B.shape)
k_b = np.array([0.5, 0, 0.5])
R = np.where(
np.logical_and(G_m == 1, R_r == 1),
G + _cnv_h(R, k_b) - _cnv_h(G, k_b),
R,
)
R = np.where(
np.logical_and(G_m == 1, B_r == 1) == 1,
G + _cnv_v(R, k_b) - _cnv_v(G, k_b),
R,
)
B = np.where(
np.logical_and(G_m == 1, B_r == 1),
G + _cnv_h(B, k_b) - _cnv_h(G, k_b),
B,
)
B = np.where(
np.logical_and(G_m == 1, R_r == 1) == 1,
G + _cnv_v(B, k_b) - _cnv_v(G, k_b),
B,
)
R = np.where(
np.logical_and(B_r == 1, B_m == 1),
np.where(
M == 1,
B + _cnv_h(R, k_b) - _cnv_h(B, k_b),
B + _cnv_v(R, k_b) - _cnv_v(B, k_b),
),
R,
)
B = np.where(
np.logical_and(R_r == 1, R_m == 1),
np.where(
M == 1,
R + _cnv_h(B, k_b) - _cnv_h(R, k_b),
R + _cnv_v(B, k_b) - _cnv_v(R, k_b),
),
B,
)
RGB = tstack([R, G, B])
del R, G, B, k_b, R_r, B_r
if refining_step:
RGB = refining_step_Menon2007(RGB, tstack([R_m, G_m, B_m]), M)
del M, R_m, G_m, B_m
return RGB
demosaicing_CFA_Bayer_DDFAPD = demosaicing_CFA_Bayer_Menon2007
def refining_step_Menon2007(RGB, RGB_m, M):
"""
Performs the refining step on given *RGB* colourspace array.
Parameters
----------
RGB : array_like
*RGB* colourspace array.
RGB_m : array_like
*Bayer* CFA red, green and blue masks.
M : array_like
Estimation for the best directional reconstruction.
Returns
-------
ndarray
Refined *RGB* colourspace array.
Examples
--------
>>> RGB = np.array(
... [[[0.30588236, 0.35686275, 0.3764706],
... [0.30980393, 0.36078432, 0.39411766],
... [0.29607844, 0.36078432, 0.40784314],
... [0.29803923, 0.37647060, 0.42352942]],
... [[0.30588236, 0.35686275, 0.3764706],
... [0.30980393, 0.36078432, 0.39411766],
... [0.29607844, 0.36078432, 0.40784314],
... [0.29803923, 0.37647060, 0.42352942]]])
>>> RGB_m = np.array(
... [[[0, 0, 1],
... [0, 1, 0],
... [0, 0, 1],
... [0, 1, 0]],
... [[0, 1, 0],
... [1, 0, 0],
... [0, 1, 0],
... [1, 0, 0]]])
>>> M = np.array(
... [[0, 1, 0, 1],
... [1, 0, 1, 0]])
>>> refining_step_Menon2007(RGB, RGB_m, M)
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411765],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
"""
R, G, B = tsplit(RGB)
R_m, G_m, B_m = tsplit(RGB_m)
M = as_float_array(M)
del RGB, RGB_m
# Updating of the green component.
R_G = R - G
B_G = B - G
FIR = np.ones(3) / 3
B_G_m = np.where(
B_m == 1,
np.where(M == 1, _cnv_h(B_G, FIR), _cnv_v(B_G, FIR)),
0,
)
R_G_m = np.where(
R_m == 1,
np.where(M == 1, _cnv_h(R_G, FIR), _cnv_v(R_G, FIR)),
0,
)
del B_G, R_G
G = np.where(R_m == 1, R - R_G_m, G)
G = np.where(B_m == 1, B - B_G_m, G)
# Updating of the red and blue components in the green locations.
# Red rows.
R_r = np.transpose(np.any(R_m == 1, axis=1)[np.newaxis]) * np.ones(R.shape)
# Red columns.
R_c = np.any(R_m == 1, axis=0)[np.newaxis] * np.ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[np.newaxis]) * np.ones(B.shape)
# Blue columns.
B_c = np.any(B_m == 1, axis=0)[np.newaxis] * np.ones(B.shape)
R_G = R - G
B_G = B - G
k_b = np.array([0.5, 0, 0.5])
R_G_m = np.where(
np.logical_and(G_m == 1, B_r == 1),
_cnv_v(R_G, k_b),
R_G_m,
)
R = np.where(np.logical_and(G_m == 1, B_r == 1), G + R_G_m, R)
R_G_m = np.where(
np.logical_and(G_m == 1, B_c == 1),
_cnv_h(R_G, k_b),
R_G_m,
)
R = np.where(np.logical_and(G_m == 1, B_c == 1), G + R_G_m, R)
del B_r, R_G_m, B_c, R_G
B_G_m = np.where(
np.logical_and(G_m == 1, R_r == 1),
_cnv_v(B_G, k_b),
B_G_m,
)
B = np.where(np.logical_and(G_m == 1, R_r == 1), G + B_G_m, B)
B_G_m = np.where(
np.logical_and(G_m == 1, R_c == 1),
_cnv_h(B_G, k_b),
B_G_m,
)
B = np.where(np.logical_and(G_m == 1, R_c == 1), G + B_G_m, B)
del B_G_m, R_r, R_c, G_m, B_G
# Updating of the red (blue) component in the blue (red) locations.
R_B = R - B
R_B_m = np.where(
B_m == 1,
np.where(M == 1, _cnv_h(R_B, FIR), _cnv_v(R_B, FIR)),
0,
)
R = np.where(B_m == 1, B + R_B_m, R)
R_B_m = np.where(
R_m == 1,
np.where(M == 1, _cnv_h(R_B, FIR), _cnv_v(R_B, FIR)),
0,
)
B = np.where(R_m == 1, R - R_B_m, B)
del R_B, R_B_m, R_m
return tstack([R, G, B])
| bsd-3-clause | -7,104,833,143,231,050,000 | 27.03523 | 79 | 0.487385 | false |
tigeorgia/GeorgiaCorporationScraper | registry/pdfparse.py | 1 | 20874 | # -*- coding: utf-8 -*-
import os, codecs
import itertools
import re
import checkers
from bs4 import BeautifulSoup
# Headers for extracting different types of data from PDF
headers = {
"extract_date": [u"ამონაწერის მომზადების თარიღი:"],
"subject": [u"სუბიექტი"],
"name": [u"საფირმო სახელწოდება:",u"სახელწოდება:"],
"address": [u"იურიდიული მისამართი:"],
"email": [u"ელექტრონული ფოსტა:"],
"email-short": [u"ელ. ფოსტა:"],
"validity_info": [u"დამატებითი ინფორმაციის ნამდვილობაზე პასუხისმგებელია ინფორმაციის მომწოდებელი პირი."],
"more_information": [u"დამატებითი ინფორმაცია:"],
"other_address": [u"სხვა მისამართი:"],
"identification_number": [u"საიდენტიფიკაციო ნომერი:"],
"phone": [u"ტელეფონი:"],
"legal_id_code1": [u"საიდენტიფიკაციო"],
"legal_id_code2": [u"საიდენტიფიკაციო კოდი:"],
"legal_form": [u"სამართლებრივი ფორმა:"],
"reg_date": [u"სახელმწიფო"],
"reg_date2": [u"სახელმწიფო რეგისტრაციის"],
"reg_date3": [u"სახელმწიფო რეგისტრაციის თარიღი:"],
"reg_agency": [u"მარეგისტრირებელი ორგანო:"],
"tax_agency": [u"საგადასახადო ინსპექცია:"],
"directors": [u"ხელმძღვანელობაზე/წარმომადგენლობაზე უფლებამოსილი პირები",u"დირექტორები",],
"partners": [u"პარტნიორები",u"დამფუძნებლები",],
"lien": [u"ყადაღა/აკრძალვა:"],
"leasing": [u"გირავნობა"],
"reorganization": [u"რეორგანიზაცია"],
"founders": [u"დამფუძნებლები"],
}
english_headers = {
"extract_date": ["Extract Preparation Date:"],
"subject": ["Entity"],
"name": ["Firm name:"],
"address": ["Legal address:"],
"email": ["E-mail:"],
"email-short": ["E-mail"],
"phone": [u"ტელეფონი:"],
"legal_id_code": ["Identification code:"],
"legal_form": ["Legal form:"],
"reg_date": ["State registration date:"],
"reg_agency": ["Registering authority:"],
"tax_agency": ["Tax inspection authority:"],
"directors": ["Persons Entitled To Manage / To Represent"],
"partners": ["Partners"],
"lien": ["Lien/Injunction:"],
"leasing": [u"Leasing"],
"reorganization": [u"Reorganization"],
"founders": [u"Founders"],
}
headers_new_format = {
"extract_date": [u"ამონაწერის მომზადების თარიღი:"],
"subject": [u"სუბიექტი"],
"name": [u"სახელწოდება:"],
"address": [u"მისამართი:"],
"email": [u"ფოსტა:"],
"more_information": [u"ინფორმაცია:"],
"other_address": [u"მისამართი:"],
"email-short": [u"ფოსტა:"],
"identification_number": [u"ნომერი:"],
"phone": [u"ტელეფონი:"],
"legal_id_code": [u"კოდი:"],
"legal_form": [u"ფორმა:"],
"reg_date":[u"თარიღი:"],
"reg_agency": [u"ორგანო:"],
"tax_agency": [u"საგადასახადო ინსპექცია:"],
"directors": [u"ხელმძღვანელობაზე/წარმომადგენლობაზე უფლებამოსილი პირები",u"დირექტორები",],
"partners": [u"პარტნიორები",u"დამფუძნებლები",],
"lien": [u"ყადაღა/აკრძალვა:"],
"leasing": [u"გირავნობა"],
"reorganization": [u"რეორგანიზაცია"],
"founders": [u"დამფუძნებლები"],
}
all_splited_headers = {
"extract_date": [u"ამონაწერის მომზადების თარიღი:"],
"subject": [u"სუბიექტი"],
"name1": [u"საფირმო"],
"name2": [u"სახელწოდება:"],
"address1": [u"იურიდიული"],
"address2": [u"მისამართი:"],
"other_address1": [u"სხვა"],
"other_address2": [u"მისამართი:"],
"more_information1": [u"დამატებითი"],
"more_information2": [u"ინფორმაცია:"],
"identification_number1": [u"საიდენტიფიკაციო"],
"identification_number2": [u"ნომერი:"],
"email1": [u"ელექტრონული"],
"email2": [u"ფოსტა:"],
"email-short1": [u"ელ."],
"email-short2": [u"ფოსტა:"],
"phone": [u"ტელეფონი:"],
"legal_id_code1": [u"საიდენტიფიკაციო"],
"legal_id_code2": [u"კოდი:"],
"legal_form1": [u"ფორმა:"],
"legal_form2": [u"ფორმა:"],
"reg_date1": [u"სახელმწიფო"],
"reg_date2":[u"რეგისტრაციის"],
"reg_date3":[u"თარიღი:"],
"reg_agency1": [u"მარეგისტრირებელი"],
"reg_agency2": [u"ორგანო:"],
"tax_agency": [u"საგადასახადო ინსპექცია:"],
"directors": [u"ხელმძღვანელობაზე/წარმომადგენლობაზე უფლებამოსილი პირები",u"დირექტორები",],
"partners": [u"პარტნიორები",u"დამფუძნებლები",],
"lien": [u"ყადაღა/აკრძალვა:"],
"leasing": [u"გირავნობა"],
"reorganization": [u"რეორგანიზაცია"],
"founders": [u"დამფუძნებლები"],
}
headers_to_check_boxes = {
"name": [u"საფირმო სახელწოდება:"],
"address": [u"იურიდიული მისამართი:"],
"email": [u"ელექტრონული ფოსტა:"],
"email-short": [u"ელ. ფოსტა:"],
"validity_info": [u"დამატებითი ინფორმაციის ნამდვილობაზე პასუხისმგებელია ინფორმაციის მომწოდებელი პირი."],
"more_information": [u"დამატებითი ინფორმაცია:"],
"other_address": [u"სხვა მისამართი:"],
"identification_number": [u"საიდენტიფიკაციო ნომერი:"],
"phone": [u"ტელეფონი:"],
"legal_id_code": [u"საიდენტიფიკაციო კოდი:"],
"legal_form": [u"სამართლებრივი ფორმა:"],
"reg_date": [u"სახელმწიფო რეგისტრაციის თარიღი:"],
"reg_agency": [u"მარეგისტრირებელი ორგანო:"],
"tax_agency": [u"საგადასახადო ინსპექცია:"],
}
simple_headers_to_check_boxes = {
"name": [u"სახელწოდება:"],
"address": [u"მისამართი:"],
"email": [u"ელექტრონული ფოსტა:"],
"phone": [u"ტელეფონი:"],
"legal_id_code": [u"კოდი:"],
"legal_form": [u"ფორმა:"],
"reg_date":[u"თარიღი:"],
"reg_agency": [u"ორგანო:"],
"tax_agency": [u"საგადასახადო ინსპექცია:"],
}
# Find all the text boxes after the start box
# until a box that is in headers is found.
def find_to_next_header(start, headers, search):
results = []
si = 0
for tb in search: #search.index(start) fails with UnicodeError. No idea why
try:
if tb == start:
si += 1
break
except UnicodeError:
print(u"Header: {}".format(start))
print(u"Current: {}".format(tb))
return results
si += 1
all_strings = list(itertools.chain(*headers.values()))
while si < len(search) and search[si] not in all_strings:
#print(u"checking {}".format(search[si+1]))
results.append(search[si])
si += 1
return results
def check_box_values(boxList):
checkedBoxes = []
for box in boxList:
if box is not None and box.text is not None:
isHeaderFound = False
for simpleHeader in simple_headers_to_check_boxes:
# we have found a header that may be in the same box as its value
simpleHeaderValue = simple_headers_to_check_boxes[simpleHeader][0]
if simpleHeaderValue in box.text:
isHeaderFound = True
longHeader = headers_to_check_boxes[simpleHeader][0]
if longHeader in box.text:
splitedBox = box.text.split(longHeader)
if len(splitedBox) == 2:
checkedBoxes.append(longHeader)
if splitedBox[1].strip():
checkedBoxes.append(splitedBox[1].strip())
else:
splitedBox = box.text.split(simpleHeaderValue)
if len(splitedBox) == 2:
checkedBoxes.append(simpleHeaderValue)
if splitedBox[1].strip():
checkedBoxes.append(splitedBox[1].strip())
if not isHeaderFound and box.text.strip():
checkedBoxes.append(box.text.strip())
return checkedBoxes
def get_pdf_lines(start_header,boxes,soup,isEnglishDocument,issecondtry):
headersToCheck = {}
headersToRead = {}
if issecondtry and not isEnglishDocument:
headersToCheck = all_splited_headers
headersToRead = headers_new_format
else:
if isEnglishDocument:
headersToCheck = english_headers
headersToRead = english_headers
else:
headersToCheck = headers
headersToRead = headers
#header_tag = soup.find("text",text=headersToRead[start_header])
header_tag = ""
for b in boxes:
if b == headersToRead[start_header][0]:
header_tag = b
break;
if header_tag is not "":
lines = find_to_next_header(header_tag,headersToCheck,boxes)
return lines
else:
return None
# Find all the text boxes between the two given boxes.
# The definition of "between" is: on the same line,
# after the first box, through on the same line, before the
# second box.
# Relies on a list sorted top to bottom, left to right.
def find_between(start, end,search):
results = []
# Find search indices:
si = search.index(start)
ei = search.index(end)
# This slightly funky search is due to the fact that
# the == operator works on all fields, while the
# other operators operate on only .top.
while search[si] <= search[si-1]:
si -= 1
while search[ei] >= search[ei+1]:
ei += 1
# Search through all text boxes that might fit our criteria
for i in range(si, ei+1):
if search[i].top != start.top and search[i].top != end.top:
results.append(search[i])
elif search[i].top == start.top:
if search[i].left > start.left:
results.append(search[i])
elif search[i].top == end.top:
if search[i].left < end.left:
results.append(search[i])
return results
def boxes_from_xml(text):
soup = BeautifulSoup(text, "xml", from_encoding="utf-8")
boxes = []
for t in soup.find_all("text"):
t['top'] = unicode(int(t['top'])+1200*(int(t.parent['number'])-1))
boxes.append(TextBox(t))
return boxes
# Removes duplicates from a list.
# Sorts too.
# items that are almost equal but don't hash equal.
def remove_duplicates(tb_list):
results = sorted(tb_list)
for i in range(0,len(results)):
if (i < len(results)-1 and
results[i] == results[i+1]): # Stop early, avoid out of bounds
# == is defined fuzzily
results[i+1] = results[i]
results = set(results) # But hash isn't fuzzy, so we need to do this to
# get rid of duplicates.
return sorted(results)
# Converts the given PDF file to XML, returns the text.
def pdfToHtml(filename):
# pdftohtml from the poppler suite, executed with the
# following options:
# -q: Don't print messages or errors
# -xml: Output xml, not HTML
# -s: Generate a single document with all PDF pages
# -i: ignore images
# -enc: Encoding
os.system('pdftohtml -q -xml -s -i -enc UTF-8 {} {}'.format(filename,filename+'.xml'))
# Read output
with codecs.open(filename+'.xml', 'rb',encoding="utf-8") as fin:
text = fin.read()
# Clean up after ourselves
#os.remove(tmp+'/'+fname+'.xml')
# Construct new response
return text
# I'm not sure these next two are very useful anymore as currently written.
def parse_address(array):
confidence = 0.0
joined = u''.join(array)
# Some random metrics, currently not used.
if len(joined) > 40 and len(joined) < 90:
confidence += 0.25
if u"საქართველო" in joined:
confidence += 0.30
return [[(u"address", joined, confidence)]]
def parse_email(array):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
result = []
for s in array:
try:
validate_email(s)
result.append([(u"email", s, 1.0)])
except ValidationError:
pass
return result
def parse_directors(text): # Takes array of strings
# First, flatten the array so we don't have to do a double for-loop
strings = [s.strip() for sr in text for s in sr.split(",")]
# The first item in every record is going to be an ID number
# (probably). So if we can make a reasonably good guess about
# whether a string is an ID number, then we can figure out where
# the new records start (with the unfortunate exception that some
# records have multiple leading IDs).
results = []
record = {"id_code": [],}
for s in strings:
if len(s) == 0:
continue
if checkers.check_id(s) >= 0.5: # Found an ID, might indicate new record.
# Two possibilities: blank / non-blank record
if len(record) == 1:
record["id_code"].append(s)
else: # Non-blank record, create a new record
for key in record:
record[key] = ' '.join(record[key])
results.append(record)
record = {"id_code":[s],}
else: # Not an ID, must be something else.
# Figure out which of our checkers is most confident
# and assume that the string is of that type.
info_types = [(checkers.check_name,"name"),
(checkers.check_nationality,"nationality"),
(checkers.check_position,"position")]
greatest = (0,"unknown")
for typ in info_types:
conf = typ[0](s)
if conf is not None and conf > greatest[0]:
greatest = (conf, typ[1])
try:
record[greatest[1]].append(s)
except KeyError:
record[greatest[1]] = [s]
# Convert arrays into strings
for key in record:
record[key] = ' '.join(record[key])
results.append(record)
return results
def parse_owners(text):
# First, flatten the array so we don't have to do a double for-loop
strings = [s.strip() for sr in text for s in sr.split(",")]
drop = [u"წილი", u"ანგარიშის ნომერი"]
# The first item in every record is going to be an ID number
# (probably). So if we can make a reasonably good guess about
# whether a string is an ID number, then we can figure out where
# the new records start (with the unfortunate exception that some
# records have multiple leading IDs).
results = []
record = {"id_code": [],}
for s in strings:
if s in drop or len(s) == 0:
continue
if checkers.check_share(s) > 0:
share_amt = u""
for part in s.split():
if re.compile('\d{1,3}\.\d{8}%').match(part):
share_amt = part
try:
record["share"].append(share_amt)
except KeyError:
record["share"] = [share_amt]
elif checkers.check_id(s) >= 0.5: # Found an ID, might indicate new record.
# Two possibilities: blank / non-blank record
if len(record) == 1:
record["id_code"].append(s)
else: # Non-blank record, create a new record
for key in record:
record[key] = ' '.join(record[key])
results.append(record)
record = {"id_code":[s],}
else:
# Not an ID, must be something else.
# Figure out which of our checkers is most confident
# and assume that the string is of that type.
info_types = [(checkers.check_name,"name"),(checkers.check_nationality,"nationality"),]
greatest = (0,"unknown")
for typ in info_types:
conf = typ[0](s)
if conf is not None and conf > greatest[0]:
greatest = (conf, typ[1])
try:
record[greatest[1]].append(s)
except KeyError:
record[greatest[1]] = [s]
# Convert arrays into strings
for key in record:
record[key] = ' '.join(record[key])
results.append(record)
return results
from bs4 import Tag
class TextBox:
def __init__(self, tag):
if(isinstance(tag,Tag)):
self.top = int(tag['top'])
self.left = int(tag['left'])
self.width = int(tag['width'])
self.height = int(tag['height'])
self.text = tag.string
else:
raise TypeError(u"Tried to construct TextBox with {}".format(tag.__class__))
def ctr_v(self):
return self.height / 2
def ctr_h(self):
return self.width / 2
def __lt__(self, other):
if self.top < other.top:
return True
elif self.top == other.top:
return self.left < other.left
def __gt__(self, other):
if self.top > other.top:
return True
elif self.top == other.top:
return self.left > other.left
def __le__(self, other):
if self.top <= other.top:
return True
else:
return False
def __ge__(self, other):
if self.top >= other.top:
return True
else:
return False
def __eq__(self, other):
if (isinstance(other, TextBox) and
abs(self.top - other.top) < 2 and
abs(self.left - other.left) < 2 and
self.width == other.width and
self.height == other.height and
self.text == other.text):
return True
else:
return False
def __hash__(self):
return hash((self.top,self.left,self.width,self.height,self.text))
def __repr__(self):
return str(((self.top,self.left),(self.width,self.height)))
def __unicode__(self):
return u"TextBox t="+unicode(self.top)+u", l="+unicode(self.left)+\
u", w="+unicode(self.width)+u", h="+unicode(self.height)#+u", txt="+unicode(self.text)
| mit | -8,582,748,740,361,425,000 | 35.433884 | 108 | 0.572984 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/theano/tensor/nnet/tests/test_conv3d2d.py | 1 | 7537 | from __future__ import absolute_import, print_function, division
import time
from nose.plugins.skip import SkipTest
from nose_parameterized import parameterized
import numpy
try:
from scipy import ndimage
except ImportError:
ndimage = None
from six.moves import xrange
import theano
from theano.gof.opt import check_stack_trace
from theano.tensor.nnet.conv3d2d import conv3d, get_diagonal_subtensor_view, DiagonalSubtensor, IncDiagonalSubtensor
import theano.tests.unittest_tools as utt
if theano.config.mode == 'FAST_COMPILE':
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_get_diagonal_subtensor_view(wrap=lambda a: a):
x = numpy.arange(20).reshape(5, 4).astype('float32')
x = wrap(x)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
# test that it works in 2d
assert numpy.all(numpy.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])
x = numpy.arange(24).reshape(4, 3, 2)
xv01 = get_diagonal_subtensor_view(x, 0, 1)
xv02 = get_diagonal_subtensor_view(x, 0, 2)
xv12 = get_diagonal_subtensor_view(x, 1, 2)
# print 'x', x
# print 'xv01', xv01
# print 'xv02', xv02
assert numpy.all(numpy.asarray(xv01) == [
[[12, 13], [8, 9], [4, 5]],
[[18, 19], [14, 15], [10, 11]]])
assert numpy.all(numpy.asarray(xv02) == [
[[6, 1], [8, 3], [10, 5]],
[[12, 7], [14, 9], [16, 11]],
[[18, 13], [20, 15], [22, 17]],
])
# diagonal views of each leading matrix is the same
# as the slices out of the diagonal view of the entire 3d tensor
for xi, xvi in zip(x, xv12):
assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
def pyconv3d(signals, filters, border_mode='valid'):
Ns, Ts, C, Hs, Ws = signals.shape
Nf, Tf, C, Hf, Wf = filters.shape
# if border_mode is not 'valid', the signals need zero-padding
if border_mode == 'full':
Tpad = Tf - 1
Hpad = Hf - 1
Wpad = Wf - 1
elif border_mode == 'half':
Tpad = Tf // 2
Hpad = Hf // 2
Wpad = Wf // 2
else:
Tpad = 0
Hpad = 0
Wpad = 0
if Tpad > 0 or Hpad > 0 or Wpad > 0:
# zero-pad signals
signals_padded = numpy.zeros((Ns, Ts + 2 * Tpad, C,
Hs + 2 * Hpad, Ws + 2 * Wpad), 'float32')
signals_padded[:, Tpad:(Ts + Tpad), :, Hpad:(Hs + Hpad),
Wpad:(Ws + Wpad)] = signals
Ns, Ts, C, Hs, Ws = signals_padded.shape
signals = signals_padded
Tf2 = Tf // 2
Hf2 = Hf // 2
Wf2 = Wf // 2
rval = numpy.zeros((Ns, Ts - Tf + 1, Nf, Hs - Hf + 1, Ws - Wf + 1))
for ns in xrange(Ns):
for nf in xrange(Nf):
for c in xrange(C):
s_i = signals[ns, :, c, :, :]
f_i = filters[nf, :, c, :, :]
r_i = rval[ns, :, nf, :, :]
o_i = ndimage.convolve(s_i, f_i, mode='constant', cval=1)
o_i_sh0 = o_i.shape[0]
# print s_i.shape, f_i.shape, r_i.shape, o_i.shape
r_i += o_i[Tf2:o_i_sh0 - Tf2, Hf2:-Hf2, Wf2:-Wf2]
return rval
def check_diagonal_subtensor_view_traces(fn):
assert check_stack_trace(
fn, ops_to_check=(DiagonalSubtensor, IncDiagonalSubtensor))
@parameterized.expand(('valid', 'full', 'half'), utt.custom_name_func)
def test_conv3d(border_mode):
check_conv3d(border_mode=border_mode,
mode=mode_without_gpu,
shared=theano.tensor._shared)
# This function will also be used in theano/sandbox/cuda/tests/test_tensor_op.py,
# which is not possible if it is decorated by @parameterized.expand
def check_conv3d(border_mode, mode=mode_without_gpu, shared=theano.tensor._shared):
if ndimage is None or not theano.config.cxx:
raise SkipTest("conv3d2d tests need SciPy and a c++ compiler")
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 5, 3, 5, 5
signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
t0 = time.time()
pyres = pyconv3d(signals, filters, border_mode)
print(time.time() - t0)
s_signals = shared(signals)
s_filters = shared(filters)
s_output = shared(signals * 0)
out = conv3d(s_signals, s_filters,
signals_shape=signals.shape,
filters_shape=filters.shape,
border_mode=border_mode)
newconv3d = theano.function([], [],
updates={s_output: out},
mode=mode)
check_diagonal_subtensor_view_traces(newconv3d)
t0 = time.time()
newconv3d()
print(time.time() - t0)
utt.assert_allclose(pyres, s_output.get_value(borrow=True))
gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters])
gnewconv3d = theano.function([], [],
updates=[(s_filters, gfilters),
(s_signals, gsignals)],
mode=mode,
name='grad')
check_diagonal_subtensor_view_traces(gnewconv3d)
t0 = time.time()
gnewconv3d()
print('grad', time.time() - t0)
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 2, 3, 2, 2
signals = numpy.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
utt.verify_grad(lambda s, f: conv3d(s, f, border_mode=border_mode),
[signals, filters], eps=1e-1, mode=mode)
# Additional Test that covers the case of patched implementation for filter with Tf=1
Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32
Nf, Tf, C, Hf, Wf = 32, 1, 3, 5, 5
signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
t0 = time.time()
pyres = pyconv3d(signals, filters, border_mode)
print(time.time() - t0)
s_signals = shared(signals)
s_filters = shared(filters)
s_output = shared(signals * 0)
out = conv3d(s_signals, s_filters,
signals_shape=signals.shape,
filters_shape=filters.shape,
border_mode=border_mode)
newconv3d = theano.function([], [],
updates={s_output: out},
mode=mode)
t0 = time.time()
newconv3d()
print(time.time() - t0)
utt.assert_allclose(pyres, s_output.get_value(borrow=True))
gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters])
gnewconv3d = theano.function([], [],
updates=[(s_filters, gfilters),
(s_signals, gsignals)],
mode=mode,
name='grad')
t0 = time.time()
gnewconv3d()
print('grad', time.time() - t0)
Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5
Nf, Tf, C, Hf, Wf = 4, 1, 3, 2, 2
signals = numpy.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')
filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')
utt.verify_grad(lambda s, f: conv3d(s, f, border_mode=border_mode),
[signals, filters], eps=1e-1, mode=mode)
| agpl-3.0 | 3,719,070,918,848,381,000 | 34.384977 | 116 | 0.553934 | false |
reubano/swutils | manage.py | 1 | 2288 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A script to manage development tasks """
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from os import path as p
from manager import Manager
from subprocess import call
manager = Manager()
BASEDIR = p.dirname(__file__)
@manager.command
def clean():
"""Remove Python file and build artifacts"""
call(p.join(BASEDIR, 'helpers', 'clean'), shell=True)
@manager.command
def check():
"""Check staged changes for lint errors"""
call(p.join(BASEDIR, 'helpers', 'check-stage'), shell=True)
@manager.arg('where', 'w', help='Modules to check')
@manager.command
def lint(where=None):
"""Check style with flake8"""
call('flake8 %s' % (where if where else ''), shell=True)
@manager.command
def pipme():
"""Install requirements.txt"""
call('pip install -r requirements.txt', shell=True)
@manager.command
def require():
"""Create requirements.txt"""
cmd = 'pip freeze -l | grep -vxFf dev-requirements.txt > requirements.txt'
call(cmd, shell=True)
@manager.arg('where', 'w', help='test path', default=None)
@manager.arg(
'stop', 'x', help='Stop after first error', type=bool, default=False)
@manager.command
def test(where=None, stop=False):
"""Run nose and script tests"""
opts = '-xv' if stop else '-v'
opts += 'w %s' % where if where else ''
call([p.join(BASEDIR, 'helpers', 'test'), opts])
@manager.command
def register():
"""Register package with PyPI"""
call('python %s register' % p.join(BASEDIR, 'setup.py'), shell=True)
@manager.command
def release():
"""Package and upload a release"""
sdist()
wheel()
upload()
@manager.command
def build():
"""Create a source distribution and wheel package"""
sdist()
wheel()
@manager.command
def upload():
"""Upload distribution files"""
call('twine upload %s' % p.join(BASEDIR, 'dist', '*'), shell=True)
@manager.command
def sdist():
"""Create a source distribution package"""
call(p.join(BASEDIR, 'helpers', 'srcdist'), shell=True)
@manager.command
def wheel():
"""Create a wheel package"""
call(p.join(BASEDIR, 'helpers', 'wheel'), shell=True)
if __name__ == '__main__':
manager.main()
| mit | -581,464,328,525,125,900 | 21.653465 | 78 | 0.645542 | false |
bstroebl/QGIS | python/plugins/GdalTools/tools/dialogSRS.py | 1 | 2128 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dialogSRS.py
---------------------
Date : June 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'June 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
class GdalToolsSRSDialog(QDialog):
def __init__(self, title, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle( title )
layout = QVBoxLayout()
self.selector = QgsProjectionSelector(self)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Close)
layout.addWidget(self.selector)
layout.addWidget(buttonBox)
self.setLayout(layout)
self.connect(buttonBox, SIGNAL("accepted()"), self.accept)
self.connect(buttonBox, SIGNAL("rejected()"), self.reject)
def epsg(self):
return "EPSG:" + str(self.selector.selectedEpsg())
def proj4string(self):
return self.selector.selectedProj4String()
def getProjection(self):
if self.selector.selectedEpsg() != 0:
return self.epsg()
if not self.selector.selectedProj4String().isEmpty():
return self.proj4string()
return QString()
| gpl-2.0 | 715,995,025,839,427,600 | 33.885246 | 80 | 0.522086 | false |
alaeddine10/ggrc-core | src/ggrc/fulltext/mysql.py | 1 | 1158 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By:
# Maintained By:
from ggrc import db
from sqlalchemy import event
from sqlalchemy.sql.expression import text
from sqlalchemy.schema import DDL
from .sql import SqlIndexer
class MysqlRecordProperty(db.Model):
__tablename__ = 'fulltext_record_properties'
__table_args__ = {'mysql_engine': 'myisam'}
key = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(64), primary_key=True)
tags = db.Column(db.String)
property = db.Column(db.String(64), primary_key=True)
content = db.Column(db.Text)
event.listen(
MysqlRecordProperty.__table__,
'after_create',
DDL('ALTER TABLE {tablename} ADD FULLTEXT INDEX {tablename}_text_idx '
'(content)'.format(tablename=MysqlRecordProperty.__tablename__))
)
class MysqlIndexer(SqlIndexer):
record_type = MysqlRecordProperty
def search(self, terms):
return db.session.query(self.record_type).filter(
'match (content) against (:terms)').params(terms=terms).all()
Indexer = MysqlIndexer | apache-2.0 | 8,088,862,217,565,668,000 | 30.324324 | 78 | 0.722798 | false |
GoogleCloudPlatform/healthcare | imaging/ml/toolkit/hcls_imaging_ml_toolkit/exception.py | 1 | 1060 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom status codes and exceptions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from google.rpc import code_pb2
class CustomExceptionError(Exception):
""""Exceptions that have an status_code attribute of a known error type."""
def __init__(self, message: Text, status_code: code_pb2.Code):
super(CustomExceptionError, self).__init__(message)
self.status_code = status_code
| apache-2.0 | -4,307,747,215,463,142,000 | 35.551724 | 77 | 0.75 | false |
PythonMid/pymidweb | pythonmid/apps/community/migrations/0006_auto_20150624_2211.py | 1 | 1137 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pythonmid.apps.community.models
class Migration(migrations.Migration):
dependencies = [
('community', '0005_sponsor_level'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='level',
field=models.IntegerField(choices=[(1, 'Gold'), (2, 'Platinum')], help_text='Nivel Prioridad Sponsor'),
),
migrations.AlterField(
model_name='sponsor',
name='logo',
field=models.ImageField(help_text='imagen del patrocinador', upload_to=pythonmid.apps.community.models.image_path),
),
migrations.AlterField(
model_name='sponsor',
name='provides',
field=models.CharField(max_length=600, help_text='Recurso o actividad que aporta a la comunidad'),
),
migrations.AlterField(
model_name='sponsor',
name='website',
field=models.URLField(max_length=700, help_text='URL de sitio oficial'),
),
]
| gpl-2.0 | 3,638,746,908,936,171,000 | 31.485714 | 127 | 0.593668 | false |
anushreejangid/csmpe-main | csmpe/core_plugins/csm_install_operations/ios_xe/pre_activate.py | 1 | 8907 | # =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
from csmpe.plugins import CSMPlugin
from utils import available_space
from utils import number_of_rsp
from utils import install_folder
from utils import check_issu_readiness
from utils import remove_exist_subpkgs
from utils import install_package_family
from utils import create_folder
from utils import xe_show_platform
from utils import check_pkg_conf
class Plugin(CSMPlugin):
"""This plugin performs pre-activate tasks."""
name = "Install Pre-Activate Plugin"
platforms = {'ASR900'}
phases = {'Pre-Activate'}
os = {'XE'}
def run(self):
self.ctx.info("Hardware platform: {}".format(self.ctx._connection.platform))
self.ctx.info("OS Version: {}".format(self.ctx._connection.os_version))
try:
packages = self.ctx.software_packages
except AttributeError:
self.ctx.warning("No package list provided. Skipping calculation of required free bootflash memory.")
return
pkg = ''.join(packages)
con_platforms = ['ASR-902', 'ASR-920']
sub_platforms = ['ASR-903', 'ASR-907']
rsp_count = 1
folder = 'bootflash:'
stby_folder = 'stby-bootflash:'
# check the device type vs the package family
supported_imgs = {}
supported_imgs['asr902'] = ['asr900', 'asr903']
supported_imgs['asr903'] = ['asr900', 'asr903']
supported_imgs['asr907'] = ['asr900', 'asr903']
supported_imgs['asr920'] = ['asr920']
m = re.search('ASR-(\d+)', self.ctx._connection.platform)
if m:
device_family = m.group(1)
device_family = 'asr' + device_family
else:
self.ctx.error("Unspported device: {}".format(self.ctx._connection.platform))
return
pkg_family = install_package_family(pkg)
if not pkg_family:
self.ctx.info("Private device image: {}".format(pkg))
if pkg_family not in supported_imgs[device_family]:
self.ctx.info("Private device image: {} on {}".format(pkg, self.ctx._connection.platform))
# check the RSP type between image and device:
curr_rsp = None
pkg_rsp = None
output = self.ctx.send("show version | include RSP")
if output:
m = re.search('(RSP\d)', output)
if m:
curr_rsp = m.group(0).lower()
m = re.search('(rsp\d)', pkg)
if m:
pkg_rsp = m.group(0)
if curr_rsp and pkg_rsp and curr_rsp != pkg_rsp:
self.ctx.info("Incompatible Route processor in {} for this device {}".format(pkg, curr_rsp))
# Determine one of the following modes: consolidated, subpackage, or issu
if self.ctx._connection.platform in con_platforms:
mode = 'consolidated'
elif self.ctx._connection.platform in sub_platforms:
mode = 'subpackage'
# Determine the number of RSP's in the chassis
rsp_count = number_of_rsp(self.ctx)
if rsp_count == 0:
self.ctx.error("No RSP is discovered")
return
# Determine the install folder
folder = install_folder(self.ctx)
stby_folder = 'stby-' + folder
# Create the folder if it does not exist
if not create_folder(self.ctx, folder):
self.ctx.error("Install folder {} creation failed", format(folder))
return
if rsp_count == 2 and not create_folder(self.ctx, stby_folder):
self.ctx.error("Install folder {} creation "
"failed", format(stby_folder))
return
else:
self.ctx.error("Unsupported platform: {}".format(self.ctx._connection.platform))
return
total_size = 10000000
valid_pkg_conf = False
if mode == 'subpackage':
# Check if the packages.conf is valid
valid_pkg_conf = check_pkg_conf(self.ctx, folder)
# Remove residual image files from previous installations
if valid_pkg_conf:
remove_exist_subpkgs(self.ctx, folder, pkg)
else:
self.ctx.warning("Empty or invalid {}/packages.conf".format(folder))
self.ctx.warning("Residual packages from previous installations are not "
"automatically removed from bootflash: / stby-bootflash:.")
self.ctx.info("Sub-package mode will be performed to "
"activate package = {}".format(pkg))
cmd = "dir bootflash: | include " + pkg
output = self.ctx.send(cmd)
if output:
m = re.search('-rw-\s+(\d+)\s+', output)
if m:
total_size += int(m.group(1))
flash_free = available_space(self.ctx, 'bootflash:')
self.ctx.info("Total required / bootflash "
"available: {} / {} bytes".format(total_size, flash_free))
if flash_free < total_size:
self.ctx.error("Not enough space on bootflash: to install packages. "
"The install process can't proceed.\n"
"Please erase unused images, crashinfo, "
"core files, and tracelogs")
else:
self.ctx.info("There is enough space on bootflash: to install packages.")
if rsp_count == 2:
if valid_pkg_conf:
remove_exist_subpkgs(self.ctx, stby_folder, pkg)
stby_free = available_space(self.ctx, 'stby-bootflash:')
self.ctx.info("Total required / stby-bootflash "
"available: {} / {} bytes".format(total_size, stby_free))
if stby_free < total_size:
self.ctx.error("Not enough space on stby-bootflash: to "
"install packages. The install process can't proceed.\n"
"Please erase unused images, crashinfo, core files, "
"and tracelogs")
else:
self.ctx.info("There is enough space on stby-bootflash: to install packages.")
# Determine if ISSU is feasible
if mode == 'subpackage' and rsp_count == 2 and valid_pkg_conf:
if check_issu_readiness(self.ctx, pkg, total_size):
mode = 'issu'
self.ctx.info("ISSU will be performed to activate package = {}".format(pkg))
# Log the status of RP and SIP
platform_info = xe_show_platform(self.ctx)
if not platform_info:
self.ctx.error("The CLI 'show platform' is not able to determine the status of RP and SIP ")
return
self.ctx.info("show platform = {}".format(platform_info))
self.ctx.info("Activate number of RSP = {}".format(rsp_count))
self.ctx.info("Activate package = {}".format(pkg))
self.ctx.info("Install folder = {}".format(folder))
self.ctx.info("Activate package mode = {}".format(mode))
self.ctx.save_data('xe_rsp_count', rsp_count)
self.ctx.save_data('xe_activate_pkg', pkg)
self.ctx.save_data('xe_boot_mode', mode)
self.ctx.save_data('xe_install_folder', folder)
self.ctx.save_data('xe_show_platform', platform_info)
return True
| bsd-2-clause | 3,346,183,386,358,608,000 | 41.822115 | 113 | 0.589536 | false |
jonmorehouse/vimhub | lib/issue.py | 1 | 8147 | import utils
import config
import re
import git
import copy
import comment_list
import webbrowser
import github
try:
import vim
except ImportError as e:
vim = False
i_hash = {} # hash individual issues
class Issue:
defaults = {
"title": "",
"assignee": "",
"milestone": "",
"state": "open",
"labels": [],
"body": "",
}
def __init__(self, **kwargs):
# set defaults for class
if not Issue.defaults.get("assignee"):
Issue.defaults["assignee"] = utils.github.user()["login"]
self.repo = kwargs.get("repo")
self.number = kwargs.get("number")
self.issue_uri = "repos/%s/issues/%s" % (self.repo, self.number)
self.comments = comment_list.CommentList(self.number, self.repo)
self._get_data()
@classmethod
def open(cls, *args):
i = cls._issue_from_args(*args)
if not i or not i.repo:
print "Not a valid repository or issue. Please try again or consult help pages"
return
i.post_hook()
@classmethod
def browse(cls, *args):
i = cls._issue_from_args(*args)
if hasattr(i, "url"):
webbrowser.open(i.url)
i.map_buffer()
@classmethod
def save(cls):
i = cls._issue_from_buffer()
if not i:
print "Error has occurred. Issue was not found. Please report an issue on github"
return
# parse the uri from this issue
i.position = vim.current.window.cursor
i.parse() # parse the buffer
i.update() # push to the server
i.post_hook()
@classmethod
def toggle_state(cls):
i = cls._issue_from_buffer()
i.position = vim.current.window.cursor
i.parse() # parse current buffer to correct location
i.change_state()
i.update()
i.post_hook()
@classmethod
def _issue_from_args(cls, *args, **kwargs):
kwargs = utils.args_to_kwargs(args, kwargs)
if not kwargs.get("args") or len(kwargs.get("args")) == 0:
kwargs["number"] = "new"
else:
kwargs["number"] = kwargs.get("args")[0]
del kwargs["args"]
key = "%s/%s" % (kwargs.get("repo"), kwargs.get("number"))
if not i_hash.has_key(key):
i_hash[key] = cls(**kwargs)
return i_hash.get(key)
@classmethod
def _issue_from_buffer(cls):
# bname corresponds to to the issue hash key
bname = vim.current.buffer.name
# check to make sure the correct pieces are here
mg = re.match(r"(?P<user>.*)/(?P<repo>.*)/(?P<issue>.*)", bname)
if not mg:
return None
return i_hash.get(bname)
def change_state(self):
if self.data["state"] == "open":
self.data["state"] = "closed"
else:
self.data["state"] = "open"
def parse(self):
# reset body
self.data["body"] = []
# this is messy - convert to a matchgroup in the future
for index, line in enumerate(vim.current.buffer[1:]):
mg = re.match(r"# (?P<label>[^:]+): (?P<value>.*)", line)
# handle normal attribute
if mg:
value = mg.group("value")
label = mg.group("label").lower()
if label in self.defaults.keys():
if type(self.defaults[label]) == list:
self.data[label] = value.split(",")
else:
self.data[label] = value
# handle error
elif re.search(r"^## Comments Issue #%s" % self.number, line):
# pass the comments to the other section
self.comments.parse(vim.current.buffer[index+1:-1])
break
else:
self.data["body"].append(line)
self.data["body"] = utils.trim_lines(self.data["body"])
def post_hook(self):
self.draw()
self.map_buffer()
if hasattr(self, "position"):
vim.command(str(self.position[0]))
#vim.command("|%s" % str(self.position[1]))
def map_buffer(self):
# autocommand to call on post save ...
vim.command("map <buffer> s :python issue.Issue.save()<cr>") # uses current buffer name
# toggle the state of the current issue
vim.command("map <buffer> cc :python issue.Issue.toggle_state()<cr>") # uses current buffer name
# hit enter to browse the current url
vim.command("map <buffer> <cr> :normal! 0<cr>:python issue.Issue.browse(\"%s\", \"%s\")<cr>" % (self.repo, self.number)) # doesn't use current buffer name
def draw(self):
self.buffer_name = "%s/%s" % (self.repo, self.number)
b = utils.get_buffer(self.buffer_name)
vim.command("1,$d")
vim.command("set filetype=markdown")
# print out issue
b.append("## %s # %s" % (self.repo, self.number))
b.append("")
# iterate through all keys that aren't body
keys = self.data.keys()
keys.remove("body")
for key in keys:
value = self.data[key]
if type(value) == list:
value = ",".join(value)
b.append("# %s: %s" % (key.capitalize(), value))
# print out body if applicable
if self.data.has_key("body") and self.data["body"]:
for line in self.data["body"].splitlines():
b.append(line)
# now we need to print the comments
self.comments.draw(b)
# remove leading line
vim.command("1delete _")
def update(self):
if self.number == "new":
self._create_issue()
else:
self._save_issue()
def _get_data(self):
self.data = copy.deepcopy(self.defaults)
# get issue from github api if not new
if not self.number == "new":
data, status = github.request(github.url(self.issue_uri))
if not status:
utils.log(data)
return
# issue was successfully requested
for key in self.defaults.keys() + ["assignee", "user"]:
# github will return None
if key in ("assignee", "user") and data.get(key):
self.data[key] = data[key]["login"]
elif key == "labels":
self.data[key] = [str(label["name"]) for label in data[key]]
elif key == "milestone" and data.get("milestone"):
self.data[key] = data[key]["title"]
elif data.get(key):
self.data[key] = data[key]
# grab the browse url
self.url = data["html_url"]
def _create_issue(self):
# create issue on the server
uri = "repos/%s/issues" % self.repo
url = github.url(uri)
data = utils.clean_data(copy.deepcopy(self.data), ["state"])
if not data or len(data.keys()) == 0:
utils.log("New issues require title/body")
return
data, status = github.request(url, "post", data)
if not status:
utils.log(data)
return
# update attributes as needed for object
self.number = str(data["number"])
self.data["user"] = data["user"]["login"]
self.url = data["html_url"]
self.issue_uri = "repos/%s/issues/%s" % (self.repo, self.number)
self.comments.number = self.number
# clean up hash
del i_hash["%s/%s" % (self.repo, "new")]
i_hash["%s/%s" % (self.repo, self.number)] = self
# delete the old buffer that we don't need any more
vim.command("silent new")
vim.command("bdelete %s" % self.buffer_name)
def _save_issue(self):
# get ready for the patch operation
url = github.url(self.issue_uri)
data = utils.clean_data(copy.deepcopy(self.data), ["number", "user", "labels"])
data, status = github.request(url, "patch", data)
| mit | 8,800,810,227,783,466,000 | 30.824219 | 162 | 0.527924 | false |
samba-team/samba | source4/dsdb/tests/python/passwords.py | 1 | 39738 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This tests the password changes over LDAP for AD implementations
#
# Copyright Matthias Dieter Wallnoefer 2010
#
# Notice: This tests will also work against Windows Server if the connection is
# secured enough (SASL with a minimum of 128 Bit encryption) - consider
# MS-ADTS 3.1.1.3.1.5
import optparse
import sys
import base64
import time
import os
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import SubunitOptions, TestProgram
from samba.tests.password_test import PasswordTestCase
import samba.getopt as options
from samba.auth import system_session
from samba.credentials import Credentials
from ldb import SCOPE_BASE, LdbError
from ldb import ERR_ATTRIBUTE_OR_VALUE_EXISTS
from ldb import ERR_UNWILLING_TO_PERFORM, ERR_INSUFFICIENT_ACCESS_RIGHTS
from ldb import ERR_NO_SUCH_ATTRIBUTE
from ldb import ERR_CONSTRAINT_VIOLATION
from ldb import Message, MessageElement, Dn
from ldb import FLAG_MOD_ADD, FLAG_MOD_REPLACE, FLAG_MOD_DELETE
from samba import gensec
from samba.samdb import SamDB
import samba.tests
from samba.tests import delete_force
from password_lockout_base import BasePasswordTestCase
parser = optparse.OptionParser("passwords.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
# Force an encrypted connection
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
#
# Tests start here
#
class PasswordTests(PasswordTestCase):
def setUp(self):
super(PasswordTests, self).setUp()
self.ldb = SamDB(url=host, session_info=system_session(lp), credentials=creds, lp=lp)
# Gets back the basedn
base_dn = self.ldb.domain_dn()
# Gets back the configuration basedn
configuration_dn = self.ldb.get_config_basedn().get_linearized()
# permit password changes during this test
self.allow_password_changes()
self.base_dn = self.ldb.domain_dn()
# (Re)adds the test user "testuser" with no password atm
delete_force(self.ldb, "cn=testuser,cn=users," + self.base_dn)
self.ldb.add({
"dn": "cn=testuser,cn=users," + self.base_dn,
"objectclass": "user",
"sAMAccountName": "testuser"})
# Tests a password change when we don't have any password yet with a
# wrong old password
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: noPassword
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e:
(num, msg) = e.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
# Windows (2008 at least) seems to have some small bug here: it
# returns "0000056A" on longer (always wrong) previous passwords.
self.assertTrue('00000056' in msg)
# Sets the initial user password with a "special" password change
# I think that this internally is a password set operation and it can
# only be performed by someone which has password set privileges on the
# account (at least in s4 we do handle it like that).
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
add: userPassword
userPassword: thatsAcomplPASS1
""")
# But in the other way around this special syntax doesn't work
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
""")
self.fail()
except LdbError as e1:
(num, _) = e1.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
# Enables the user account
self.ldb.enable_account("(sAMAccountName=testuser)")
# Open a second LDB connection with the user credentials. Use the
# command line credentials for information like the domain, the realm
# and the workstation.
creds2 = Credentials()
creds2.set_username("testuser")
creds2.set_password("thatsAcomplPASS1")
creds2.set_domain(creds.get_domain())
creds2.set_realm(creds.get_realm())
creds2.set_workstation(creds.get_workstation())
creds2.set_gensec_features(creds2.get_gensec_features()
| gensec.FEATURE_SEAL)
self.ldb2 = SamDB(url=host, credentials=creds2, lp=lp)
def test_unicodePwd_hash_set(self):
"""Performs a password hash set operation on 'unicodePwd' which should be prevented"""
# Notice: Direct hash password sets should never work
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement("XXXXXXXXXXXXXXXX", FLAG_MOD_REPLACE,
"unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e2:
(num, _) = e2.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
def test_unicodePwd_hash_change(self):
"""Performs a password hash change operation on 'unicodePwd' which should be prevented"""
# Notice: Direct hash password changes should never work
# Hash password changes should never work
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd: XXXXXXXXXXXXXXXX
add: unicodePwd
unicodePwd: YYYYYYYYYYYYYYYY
""")
self.fail()
except LdbError as e3:
(num, _) = e3.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
def test_unicodePwd_clear_set(self):
"""Performs a password cleartext set operation on 'unicodePwd'"""
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement("\"thatsAcomplPASS2\"".encode('utf-16-le'),
FLAG_MOD_REPLACE, "unicodePwd")
self.ldb.modify(m)
def test_unicodePwd_clear_change(self):
"""Performs a password cleartext change operation on 'unicodePwd'"""
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS1\"".encode('utf-16-le')).decode('utf8') + """
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS2\"".encode('utf-16-le')).decode('utf8') + """
""")
# Wrong old password
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')).decode('utf8') + """
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS4\"".encode('utf-16-le')).decode('utf8') + """
""")
self.fail()
except LdbError as e4:
(num, msg) = e4.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('00000056' in msg)
# A change to the same password again will not work (password history)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS2\"".encode('utf-16-le')).decode('utf8') + """
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS2\"".encode('utf-16-le')).decode('utf8') + """
""")
self.fail()
except LdbError as e5:
(num, msg) = e5.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('0000052D' in msg)
def test_dBCSPwd_hash_set(self):
"""Performs a password hash set operation on 'dBCSPwd' which should be prevented"""
# Notice: Direct hash password sets should never work
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement("XXXXXXXXXXXXXXXX", FLAG_MOD_REPLACE,
"dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e6:
(num, _) = e6.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
def test_dBCSPwd_hash_change(self):
"""Performs a password hash change operation on 'dBCSPwd' which should be prevented"""
# Notice: Direct hash password changes should never work
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: dBCSPwd
dBCSPwd: XXXXXXXXXXXXXXXX
add: dBCSPwd
dBCSPwd: YYYYYYYYYYYYYYYY
""")
self.fail()
except LdbError as e7:
(num, _) = e7.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
def test_userPassword_clear_set(self):
"""Performs a password cleartext set operation on 'userPassword'"""
# Notice: This works only against Windows if "dSHeuristics" has been set
# properly
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("thatsAcomplPASS2", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
def test_userPassword_clear_change(self):
"""Performs a password cleartext change operation on 'userPassword'"""
# Notice: This works only against Windows if "dSHeuristics" has been set
# properly
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
# Wrong old password
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS3
add: userPassword
userPassword: thatsAcomplPASS4
""")
self.fail()
except LdbError as e8:
(num, msg) = e8.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('00000056' in msg)
# A change to the same password again will not work (password history)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS2
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e9:
(num, msg) = e9.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('0000052D' in msg)
def test_clearTextPassword_clear_set(self):
"""Performs a password cleartext set operation on 'clearTextPassword'"""
# Notice: This never works against Windows - only supported by us
try:
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement("thatsAcomplPASS2".encode('utf-16-le'),
FLAG_MOD_REPLACE, "clearTextPassword")
self.ldb.modify(m)
# this passes against s4
except LdbError as e10:
(num, msg) = e10.args
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
raise LdbError(num, msg)
def test_clearTextPassword_clear_change(self):
"""Performs a password cleartext change operation on 'clearTextPassword'"""
# Notice: This never works against Windows - only supported by us
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS1".encode('utf-16-le')).decode('utf8') + """
add: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS2".encode('utf-16-le')).decode('utf8') + """
""")
# this passes against s4
except LdbError as e11:
(num, msg) = e11.args
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
raise LdbError(num, msg)
# Wrong old password
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS3".encode('utf-16-le')).decode('utf8') + """
add: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS4".encode('utf-16-le')).decode('utf8') + """
""")
self.fail()
except LdbError as e12:
(num, msg) = e12.args
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('00000056' in msg)
# A change to the same password again will not work (password history)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS2".encode('utf-16-le')).decode('utf8') + """
add: clearTextPassword
clearTextPassword:: """ + base64.b64encode("thatsAcomplPASS2".encode('utf-16-le')).decode('utf8') + """
""")
self.fail()
except LdbError as e13:
(num, msg) = e13.args
# "NO_SUCH_ATTRIBUTE" is returned by Windows -> ignore it
if num != ERR_NO_SUCH_ATTRIBUTE:
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
self.assertTrue('0000052D' in msg)
def test_failures(self):
"""Performs some failure testing"""
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError as e14:
(num, _) = e14.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError as e15:
(num, _) = e15.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
""")
self.fail()
except LdbError as e16:
(num, _) = e16.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
""")
self.fail()
except LdbError as e17:
(num, _) = e17.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
add: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError as e18:
(num, _) = e18.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
add: userPassword
userPassword: thatsAcomplPASS1
""")
self.fail()
except LdbError as e19:
(num, _) = e19.args
self.assertEqual(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e20:
(num, _) = e20.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e21:
(num, _) = e21.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e22:
(num, _) = e22.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e23:
(num, _) = e23.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e24:
(num, _) = e24.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e25:
(num, _) = e25.args
self.assertEqual(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e26:
(num, _) = e26.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
""")
self.fail()
except LdbError as e27:
(num, _) = e27.args
self.assertEqual(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
try:
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
replace: userPassword
userPassword: thatsAcomplPASS3
""")
self.fail()
except LdbError as e28:
(num, _) = e28.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS1
add: userPassword
userPassword: thatsAcomplPASS2
replace: userPassword
userPassword: thatsAcomplPASS3
""")
self.fail()
except LdbError as e29:
(num, _) = e29.args
self.assertEqual(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
# Reverse order does work
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
add: userPassword
userPassword: thatsAcomplPASS2
delete: userPassword
userPassword: thatsAcomplPASS1
""")
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
userPassword: thatsAcomplPASS2
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')).decode('utf8') + """
""")
# this passes against s4
except LdbError as e30:
(num, _) = e30.args
self.assertEqual(num, ERR_ATTRIBUTE_OR_VALUE_EXISTS)
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')).decode('utf8') + """
add: userPassword
userPassword: thatsAcomplPASS4
""")
# this passes against s4
except LdbError as e31:
(num, _) = e31.args
self.assertEqual(num, ERR_NO_SUCH_ATTRIBUTE)
# Several password changes at once are allowed
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
replace: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS2
""")
# Several password changes at once are allowed
self.ldb.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
replace: userPassword
userPassword: thatsAcomplPASS1
userPassword: thatsAcomplPASS2
replace: userPassword
userPassword: thatsAcomplPASS3
replace: userPassword
userPassword: thatsAcomplPASS4
""")
# This surprisingly should work
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"userPassword": ["thatsAcomplPASS1", "thatsAcomplPASS2"]})
# This surprisingly should work
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"userPassword": ["thatsAcomplPASS1", "thatsAcomplPASS1"]})
def test_empty_passwords(self):
print("Performs some empty passwords testing")
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"unicodePwd": []})
self.fail()
except LdbError as e32:
(num, _) = e32.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"dBCSPwd": []})
self.fail()
except LdbError as e33:
(num, _) = e33.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"userPassword": []})
self.fail()
except LdbError as e34:
(num, _) = e34.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
try:
self.ldb.add({
"dn": "cn=testuser2,cn=users," + self.base_dn,
"objectclass": "user",
"clearTextPassword": []})
self.fail()
except LdbError as e35:
(num, _) = e35.args
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement([], FLAG_MOD_ADD, "unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e36:
(num, _) = e36.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement([], FLAG_MOD_ADD, "dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e37:
(num, _) = e37.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_ADD, "userPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e38:
(num, _) = e38.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement([], FLAG_MOD_ADD, "clearTextPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e39:
(num, _) = e39.args
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement([], FLAG_MOD_REPLACE, "unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e40:
(num, _) = e40.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement([], FLAG_MOD_REPLACE, "dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e41:
(num, _) = e41.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_REPLACE, "userPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e42:
(num, _) = e42.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement([], FLAG_MOD_REPLACE, "clearTextPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e43:
(num, _) = e43.args
self.assertTrue(num == ERR_UNWILLING_TO_PERFORM or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["unicodePwd"] = MessageElement([], FLAG_MOD_DELETE, "unicodePwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e44:
(num, _) = e44.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["dBCSPwd"] = MessageElement([], FLAG_MOD_DELETE, "dBCSPwd")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e45:
(num, _) = e45.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_DELETE, "userPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e46:
(num, _) = e46.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["clearTextPassword"] = MessageElement([], FLAG_MOD_DELETE, "clearTextPassword")
try:
self.ldb.modify(m)
self.fail()
except LdbError as e47:
(num, _) = e47.args
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
def test_plain_userPassword(self):
print("Performs testing about the standard 'userPassword' behaviour")
# Delete the "dSHeuristics"
self.ldb.set_dsheuristics(None)
time.sleep(1) # This switching time is strictly needed!
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword", FLAG_MOD_ADD,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEqual(str(res[0]["userPassword"][0]), "myPassword")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword2", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEqual(str(res[0]["userPassword"][0]), "myPassword2")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement([], FLAG_MOD_DELETE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
# Set the test "dSHeuristics" to deactivate "userPassword" pwd changes
self.ldb.set_dsheuristics("000000000")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword3", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEqual(str(res[0]["userPassword"][0]), "myPassword3")
# Set the test "dSHeuristics" to deactivate "userPassword" pwd changes
self.ldb.set_dsheuristics("000000002")
m = Message()
m.dn = Dn(self.ldb, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("myPassword4", FLAG_MOD_REPLACE,
"userPassword")
self.ldb.modify(m)
res = self.ldb.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEqual(str(res[0]["userPassword"][0]), "myPassword4")
# Reset the test "dSHeuristics" (reactivate "userPassword" pwd changes)
self.ldb.set_dsheuristics("000000001")
def test_modify_dsheuristics_userPassword(self):
print("Performs testing about reading userPassword between dsHeuristic modifies")
# Make sure userPassword cannot be read
self.ldb.set_dsheuristics("000000000")
# Open a new connection (with dsHeuristic=000000000)
ldb1 = SamDB(url=host, session_info=system_session(lp),
credentials=creds, lp=lp)
# Set userPassword to be read
# This setting only affects newer connections (ldb2)
ldb1.set_dsheuristics("000000001")
time.sleep(1)
m = Message()
m.dn = Dn(ldb1, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("thatsAcomplPASS1", FLAG_MOD_REPLACE,
"userPassword")
ldb1.modify(m)
res = ldb1.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
# userPassword cannot be read, it wasn't set, instead the
# password was
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
# Open another new connection (with dsHeuristic=000000001)
ldb2 = SamDB(url=host, session_info=system_session(lp),
credentials=creds, lp=lp)
res = ldb2.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
# Check on the new connection that userPassword was not stored
# from ldb1 or is not readable
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
# Set userPassword to be readable
# This setting does not affect this connection
ldb2.set_dsheuristics("000000000")
time.sleep(1)
res = ldb2.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
# Check that userPassword was not stored from ldb1
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
m = Message()
m.dn = Dn(ldb2, "cn=testuser,cn=users," + self.base_dn)
m["userPassword"] = MessageElement("thatsAcomplPASS2", FLAG_MOD_REPLACE,
"userPassword")
ldb2.modify(m)
res = ldb2.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
# Check despite setting it with userPassword support disabled
# on this connection it should still not be readable
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
# Only password from ldb1 is the user's password
creds2 = Credentials()
creds2.set_username("testuser")
creds2.set_password("thatsAcomplPASS1")
creds2.set_domain(creds.get_domain())
creds2.set_realm(creds.get_realm())
creds2.set_workstation(creds.get_workstation())
creds2.set_gensec_features(creds2.get_gensec_features()
| gensec.FEATURE_SEAL)
try:
SamDB(url=host, credentials=creds2, lp=lp)
except:
self.fail("testuser used the wrong password")
ldb3 = SamDB(url=host, session_info=system_session(lp),
credentials=creds, lp=lp)
# Check that userPassword was stored from ldb2
res = ldb3.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
# userPassword can be read
self.assertTrue(len(res) == 1)
self.assertTrue("userPassword" in res[0])
self.assertEqual(str(res[0]["userPassword"][0]), "thatsAcomplPASS2")
# Reset the test "dSHeuristics" (reactivate "userPassword" pwd changes)
self.ldb.set_dsheuristics("000000001")
ldb4 = SamDB(url=host, session_info=system_session(lp),
credentials=creds, lp=lp)
# Check that userPassword that was stored from ldb2
res = ldb4.search("cn=testuser,cn=users," + self.base_dn,
scope=SCOPE_BASE, attrs=["userPassword"])
# userPassword can be not be read
self.assertTrue(len(res) == 1)
self.assertFalse("userPassword" in res[0])
def test_zero_length(self):
# Get the old "minPwdLength"
minPwdLength = self.ldb.get_minPwdLength()
# Set it temporarely to "0"
self.ldb.set_minPwdLength("0")
# Get the old "pwdProperties"
pwdProperties = self.ldb.get_pwdProperties()
# Set them temporarely to "0" (to deactivate eventually the complexity)
self.ldb.set_pwdProperties("0")
self.ldb.setpassword("(sAMAccountName=testuser)", "")
# Reset the "pwdProperties" as they were before
self.ldb.set_pwdProperties(pwdProperties)
# Reset the "minPwdLength" as it was before
self.ldb.set_minPwdLength(minPwdLength)
def test_pw_change_delete_no_value_userPassword(self):
"""Test password change with userPassword where the delete attribute doesn't have a value"""
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: userPassword
add: userPassword
userPassword: thatsAcomplPASS1
""")
except LdbError as e:
(num, msg) = e.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
else:
self.fail()
def test_pw_change_delete_no_value_clearTextPassword(self):
"""Test password change with clearTextPassword where the delete attribute doesn't have a value"""
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: clearTextPassword
add: clearTextPassword
clearTextPassword: thatsAcomplPASS2
""")
except LdbError as e:
(num, msg) = e.args
self.assertTrue(num == ERR_CONSTRAINT_VIOLATION or
num == ERR_NO_SUCH_ATTRIBUTE) # for Windows
else:
self.fail()
def test_pw_change_delete_no_value_unicodePwd(self):
"""Test password change with unicodePwd where the delete attribute doesn't have a value"""
try:
self.ldb2.modify_ldif("""
dn: cn=testuser,cn=users,""" + self.base_dn + """
changetype: modify
delete: unicodePwd
add: unicodePwd
unicodePwd:: """ + base64.b64encode("\"thatsAcomplPASS3\"".encode('utf-16-le')).decode('utf8') + """
""")
except LdbError as e:
(num, msg) = e.args
self.assertEqual(num, ERR_CONSTRAINT_VIOLATION)
else:
self.fail()
def tearDown(self):
super(PasswordTests, self).tearDown()
delete_force(self.ldb, "cn=testuser,cn=users," + self.base_dn)
delete_force(self.ldb, "cn=testuser2,cn=users," + self.base_dn)
# Close the second LDB connection (with the user credentials)
self.ldb2 = None
if "://" not in host:
if os.path.isfile(host):
host = "tdb://%s" % host
else:
host = "ldap://%s" % host
TestProgram(module=__name__, opts=subunitopts)
| gpl-3.0 | -4,943,850,758,046,627,000 | 33.705677 | 105 | 0.600961 | false |
UofS-CTLE/Projtrack3 | ctleweb/d2lstat/views.py | 1 | 1924 | from django.shortcuts import render
from .d2lstat import process_file, calculateVirtualClassroomStats, facultyNotUsingD2LCalculation
from .forms import UploadFileForm, VirtualClassroomUsageForm, FacultyNotUsingD2LForm
def index(request):
if request.method == 'POST':
process_file(request.FILES['usage'].temporary_file_path(),
request.FILES['full'].temporary_file_path(),
request.FILES['part'].temporary_file_path(),
request.POST['semester'],
request.POST['total_courses'])
return render(request, 'd2lstat/report.html')
else:
form = UploadFileForm()
return render(request, 'd2lstat/index.html', {'form': form})
def virtualClassroomStats(request):
if request.method == 'POST':
statsList = calculateVirtualClassroomStats(request.FILES['usage'].temporary_file_path(),
request.FILES['full'].temporary_file_path(),
request.FILES['part'].temporary_file_path(),
request.FILES['virtualClassroomData'].temporary_file_path())
return render(request, 'd2lstat/virtualClassroomStatsResults.html', {'statsList':statsList})
else:
form = VirtualClassroomUsageForm()
return render(request, 'd2lstat/virtualClassroomStats.html', {'form': form})
def facultyNotUsingD2L(request):
if request.method == 'POST':
statsList = facultyNotUsingD2LCalculation(request.FILES['usage'].temporary_file_path(),
request.FILES['full'].temporary_file_path(),
request.FILES['part'].temporary_file_path(),
request.POST['semester'])
return render(request, 'd2lstat/FacultyNotUsingD2LResults.html', {'statsList':statsList})
else:
form = FacultyNotUsingD2LForm()
return render(request, 'd2lstat/FacultyNotUsingD2L.html', {'form': form})
| gpl-3.0 | 1,134,818,794,261,291,500 | 48.333333 | 100 | 0.650208 | false |
bverdu/onDemand | gui/widgets.py | 1 | 15338 | # encoding: utf-8
'''
Created on 29 mai 2015
@author: Bertrand Verdu
'''
from __future__ import print_function
import os
from kivy.clock import Clock
from kivy.core.window import Window
# from kivy.graphics.transformation import Matrix
from kivy.loader import Loader
from kivy.metrics import dp
from kivy.properties import ObjectProperty,\
StringProperty, DictProperty, BooleanProperty, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.bubble import Bubble
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image, AsyncImage
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.scatter import Scatter
from kivy.uix.screenmanager import Screen
from kivy.uix.settings import SettingString, SettingSpacer, SettingPath
from kivy.uix.togglebutton import ToggleButton
Loader.num_workers = 4
Loader.loading_image = 'data/icons/wait.zip'
def button_size():
if Window.size[0] > Window.size[1]:
return Window.size[0] / 5, Window.size[1] / 8
else:
return Window.size[0] / 3, Window.size[1] / 8
class Home(Screen):
background = StringProperty('data/background_ebony.png')
status = DictProperty()
room = ''
def unlock_widgets(self, state):
print('unlock: %s' % state)
if isinstance(self.children[0], FloatLayout):
for w in self.children[0].children:
w.do_translation_x = state
w.do_translation_y = state
class StartPage(GridLayout):
devices = DictProperty()
roomlist = DictProperty()
format = ListProperty([i for i in button_size()])
_rooms = []
_devices = []
rooms = None
lights = None
medias = None
first = True
def on_devices(self, instance, value):
# print('device: %s' % value)
for uid, device in value.items():
if uid not in self._devices:
for dev in self.roomlist[device['room']]['devices']:
if dev['uid'] == uid:
self.add_device(dev)
def on_roomlist(self, instance, value):
# print('room: %s' % value)
# print(len(self.children))
if len(self.children) == 0:
def later(ignored):
self.on_roomlist(instance, value)
# f = lambda ignored: self.on_roomlist(instance, value)
Clock.schedule_once(later, 2)
return
# print(len(self.children))
# print(self.rooms)
# print(self.ids)
if not self.rooms:
self._rooms = []
self._devices = []
if len(self.children) > 0:
for child in self.children:
if len(child.children) > 0:
child = child.children[0]
if child.typ:
print(child.typ)
setattr(self, child.typ, child)
for room, values in value.items():
if room == 'Home':
continue
if room not in self._rooms:
if self.rooms:
# print('add room: %s -- %s' % (room, values['pic']))
self.add_room(room, values['pic'])
if self.rooms:
for device in values['devices']:
if device['uid'] not in self._devices:
# print('add device: %s' % device)
self.add_device(device)
def add_room(self, room, pic):
# print('from Window: %s X %s' % Window.size)
w = button_size()[0]
r = RoomButton(ltext=room,
pic=pic,
width=w,
size_hint=(None, 1))
# size_hint=((.20, 1)
# if Window.size[0] >= Window.size[1]
# else (.4, 1)))
self.rooms.add_widget(r)
self._rooms.append(room)
def add_device(self, device):
w = button_size()[0]
print('dimensions d : %s X %s' %
(self.parent.width, self.parent.height))
if device['type'] == 'Lights':
if self.lights:
b = LightButtonWithText(
pic=type_img(device['type']),
ltext=device['name'],
width=w,
size_hint=(None, 1))
# size_hint=((.20, 1)
# if Window.size[0] >= Window.size[1]
# else (.4, 1)))
self.lights.add_widget(b)
elif device['type'] == 'MediaPlayer':
if self.medias:
b = MediaButtonWithText(
pic=type_img(device['type']),
ltext=device['name'],
device=device,
width=w,
size_hint=(None, 1))
self.medias.add_widget(b)
else:
return
self._devices.append(device['uid'])
class Shutters(Screen):
pass
class Scenarios(Screen):
pass
class RoomButton(Button):
pic = StringProperty()
ltext = StringProperty()
class LightButtonWithText(ToggleButton):
pic = StringProperty()
ltext = StringProperty()
class MediaButtonWithText(ToggleButton):
pic = StringProperty()
ltext = StringProperty()
device = ObjectProperty()
class Pop_device(object):
def __init__(self, parent):
self.parent = parent
content = parent.typ(pop=self)
self.popup = Popup(
title=parent.name,
content=content,
size_hint=(.2, .3))
def display(self):
self.popup.open()
def dismiss(self):
self.popup.dismiss()
def define_size(self, size):
print('Size: %s' % size)
if size[0] < 120:
print('resize2 !')
self.popup.size_hint = self.popup.size_hint[0] * 1.5,\
self.popup.size_hint[1] * 1.5
# self.popup.size = (100, 100)
# self.popup.content.size = (100, 100)
class Bubble_device(Bubble):
pass
class Player_menu(BoxLayout):
pop = ObjectProperty()
class Light_menu(BoxLayout):
pop = ObjectProperty()
class Bubble_player(Bubble):
pass
# class SensorLabel(Label):
class SensorPad(Scatter):
sensors = ListProperty([])
def __init__(self, *args, **kwargs):
super(SensorPad, self).__init__(*args, **kwargs)
def on_sensors(self, instance, value):
self.ids.bl.clear_widgets()
for s in value:
d, v, u = s.get()
if u is None:
l = Label(text='%s: %s' % (d, ('Oui' if v else 'Non')))
else:
l = Label(text='%s: %s %s' % (d, v, u))
self.ids.bl.add_widget(l)
self.size = (self.size[0], 30 * len(value))
# m = Matrix().scale(1, len(value), 1)
# self.apply_transform(m, True)
class DeviceButton(Scatter):
pic_true = StringProperty('data/icons/lamp_1.png')
pic_false = StringProperty('data/icons/lamp_0.png')
state = BooleanProperty(False)
play = ObjectProperty(None)
config = ObjectProperty(None)
open = ObjectProperty(None)
name = StringProperty('Light')
bubble = ObjectProperty(None)
scheduled = False
typ = Light_menu
def pushed(self):
if self.do_translation_x:
# print('unlocked')
return True
else:
Clock.schedule_once(self.show_bubble, 1)
self.scheduled = True
def on_touch_up(self, touch):
if self.scheduled:
Clock.unschedule(self.show_bubble)
self.scheduled = False
# print('locked')
self.state = not self.state
self.play(self)
if self.do_translation_x:
# print('locking')
self.do_translation_x = False
self.do_translation_y = False
if self.config:
self.config.set(
self.name,
'position',
str(self.pos[0]) + '*' + str(self.pos[1]))
self.config.write()
def unlock(self, state=True):
# print('unlock')
self.do_translation_x = state
self.do_translation_y = state
# self.unlocked = True
# self.remove_widget(self.bubb)
# return False
def show_bubble(self, *l):
self.scheduled = False
# self.bubb = bubb = self.bubble()
# bubb.pos = bubb.pos[0] + self.width, bubb.pos[1]
if not self.bubble:
self.bubble = Pop_device(self)
self.bubble.display()
# self.add_widget(bubb)
# def on_touch_down(self, touch):
# # print('touch %s - %s' % (touch.pos, self.pos))
#
# '''.. versionchanged:: 1.4.0'''
# if self.collide_point(*touch.pos):
# self.state = not self.state
# print(self.state)
# return self.play(self)
# if self.locked:
# return True
class LightButton(DeviceButton):
pass
class PlayerButton(DeviceButton):
pic_true = StringProperty('data/icons/multimedia_playing')
pic_false = StringProperty('data/icons/multimedia_stopped.png')
typ = Player_menu
# bubble = Pop_device(self)
class ScatterCross(Scatter):
pass
class HVAC(Screen):
pass
class BgImage(AsyncImage):
pass
class SettingImg(SettingPath):
def _create_popup(self, instance):
from jnius import autoclass # SDcard Android
# Get path to SD card Android
try:
Environment = autoclass('android.os.Environment')
# print(Environment.DIRECTORY_DCIM)
# print(Environment.DIRECTORY_MOVIES)
# print(Environment.DIRECTORY_MUSIC)
env = Environment()
print('two')
sdpath = env.getExternalStorageDirectory().getAbsolutePath()
try:
if not env.isExternalStorageRemovable():
if os.path.lexists('/storage/sdcard1'):
sdpath = '/storage/sdcard1/'\
+ Environment.DIRECTORY_PICTURES
else:
print('removable')
except Exception as err:
print(err)
print('three')
print(':)')
# Not on Android
except:
print(':(')
sdpath = os.path.expanduser('~')
print('popup!')
print(sdpath)
# create popup layout
content = BoxLayout(orientation='vertical', spacing=5)
# popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, 0.9),
width=dp(300))
# create the filechooser
print('1')
if os.path.isfile(self.value):
print('file!')
path = os.path.split(self.value)[0]
if len(sdpath) == 0:
path = os.path.expanduser('~')
elif '/data/living.png' in self.value:
print('living found!')
path = sdpath
else:
path = sdpath
print(path)
self.textinput = textinput = FileChooserListView(
path=path, size_hint=(1, 1), dirselect=True)
textinput.bind(on_path=self._validate)
self.textinput = textinput
# construct the content
content.add_widget(textinput)
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingPos(SettingString):
'''Implementation of a string setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it's shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
# position = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
pic = StringProperty()
position = StringProperty('50*50')
def __init__(self, **kwargs):
super(SettingPos, self).__init__(**kwargs)
self.img = Image(source=self.pic)
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _dismiss(self, *largs):
if self.popup:
self.popup.dismiss()
self.popup = None
def _register(self, instance, touch):
if self.img.collide_point(*touch.pos):
# self.position = '*'.join([str(p) for p in touch.pos])
# print(touch)
# print(self.img.pos)
# print(self.img.size)
# print(Window.size)
x, y = self.img.to_widget(touch.pos[0], touch.pos[1], True)
x = x - self.img.pos[0] - 20.0
y = y + 68.0
# print('%s * %s' % (x, y))
self.position = str(x) + '*' + str(y)
def _validate(self, instance):
value = self.position
self.value = value
# print(self.value)
self._dismiss()
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
# popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content)
pos = [float(c) for c in self.value.split('*')]
scat = ScatterCross(size=(20, 20), size_hint=(None, None), pos=pos)
scat.bind(on_touch_up=self._register)
self.img.add_widget(scat)
content.add_widget(self.img)
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
def type_img(typ):
if typ in ['Lights']:
return 'data/icons/lamp_1.png'
elif typ in ['MediaPlayer']:
return 'data/icons/Multimedia.png'
| agpl-3.0 | 1,037,562,344,166,610,300 | 29.923387 | 79 | 0.548703 | false |
tensorflow/models | official/modeling/hyperparams/oneof.py | 1 | 1870 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config class that supports oneof functionality."""
from typing import Optional
import dataclasses
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class OneOfConfig(base_config.Config):
"""Configuration for configs with one of feature.
Attributes:
type: 'str', name of the field to select.
"""
type: Optional[str] = None
def as_dict(self):
"""Returns a dict representation of OneOfConfig.
For the nested base_config.Config, a nested dict will be returned.
"""
if self.type is None:
return {'type': None}
elif self.__dict__['type'] not in self.__dict__:
raise ValueError('type: {!r} is not a valid key!'.format(
self.__dict__['type']))
else:
chosen_type = self.type
chosen_value = self.__dict__[chosen_type]
return {'type': self.type, chosen_type: self._export_config(chosen_value)}
def get(self):
"""Returns selected config based on the value of type.
If type is not set (None), None is returned.
"""
chosen_type = self.type
if chosen_type is None:
return None
if chosen_type not in self.__dict__:
raise ValueError('type: {!r} is not a valid key!'.format(self.type))
return self.__dict__[chosen_type]
| apache-2.0 | 4,027,534,071,067,376,000 | 31.807018 | 80 | 0.691444 | false |
Salamek/reader | reader/read.py | 1 | 1502 | from acr122l import acr122l
import time
acr122l = acr122l()
true = True
def read_card():
global true
true = False
key = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
print acr122l.TAG_Authenticate(0x00, key)
print 'Starting read data'
readed = acr122l.TAG_Read(0x01)
if readed:
acr122l.LCD_Clear()
acr122l.LED_control('0010')
acr122l.LCD_Text(False,'A',0x00,'Done! Wait 10 s to next scan...')
acr122l.LCD_Text(False,'A',0x40,readed)
acr122l.Buzzer_control(1,1,1)
else:
acr122l.LCD_Clear()
acr122l.LED_control('0001')
acr122l.LCD_Text(False,'A',0x00,'Error,Scan again')
acr122l.LCD_Text(False,'A',0x40,'Wait 10 s to next scan...')
acr122l.Buzzer_control(10,10,1)
time.sleep(5)
acr122l.LCD_back_light(True)
acr122l.LED_control('1000')
true = True
acr122l.LCD_Clear()
acr122l.LCD_Text(False,'A',0x00,'Ready')
#cnt = 1
acr122l.LED_control('1000')
acr122l.LCD_back_light(True)
acr122l.LCD_Clear()
acr122l.LCD_Text(False,'A',0x00,'Ready')
while true:
ret = acr122l.TAG_Polling()
if ret:
acr122l.LCD_Clear()
acr122l.LCD_Text(False,'A',0x00,'Reading...')
acr122l.LED_control('0100')
#if cnt != ret[17]:
#cnt = ret[17]
target_number = ret[18] #Target number
sens_res = [ret[19],ret[20]] #SENS_RES
sel_res = ret[21] #SEL_RES
len_uid = ret[22] #Length of the UID
end_uid = 25+len_uid
uid = []
for i in range(25, end_uid):
uid.append(ret[i])
if uid:
read_card()
#break
#else:
# true = False
#else:
# if cnt:
# cnt = 0
| gpl-3.0 | -4,102,813,253,444,852,700 | 19.861111 | 68 | 0.654461 | false |
otsaloma/poor-maps | poor/paths.py | 1 | 1396 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Standard paths to files."""
import os
__all__ = ("CACHE_HOME_DIR", "CONFIG_HOME_DIR", "DATA_DIR", "DATA_HOME_DIR", "LOCALE_DIR")
XDG_CACHE_HOME = os.path.expanduser(os.getenv("XDG_CACHE_HOME", "~/.cache"))
XDG_CONFIG_HOME = os.path.expanduser(os.getenv("XDG_CONFIG_HOME", "~/.config"))
XDG_DATA_HOME = os.path.expanduser(os.getenv("XDG_DATA_HOME", "~/.local/share"))
CACHE_HOME_DIR = os.path.join(XDG_CACHE_HOME, "harbour-poor-maps")
CONFIG_HOME_DIR = os.path.join(XDG_CONFIG_HOME, "harbour-poor-maps")
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
DATA_HOME_DIR = os.path.join(XDG_DATA_HOME, "harbour-poor-maps")
LOCALE_DIR = "/usr/share/harbour-poor-maps/locale"
| gpl-3.0 | -8,651,152,680,631,865,000 | 42.625 | 90 | 0.717049 | false |
leandrotoledo/python-telegram-bot | telegram/payment/precheckoutquery.py | 2 | 5229 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram PreCheckoutQuery."""
from typing import TYPE_CHECKING, Any, Optional
from telegram import OrderInfo, TelegramObject, User
from telegram.utils.helpers import DEFAULT_NONE
from telegram.utils.types import JSONDict, ODVInput
if TYPE_CHECKING:
from telegram import Bot
class PreCheckoutQuery(TelegramObject):
"""This object contains information about an incoming pre-checkout query.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`id` is equal.
Note:
In Python ``from`` is a reserved word, use ``from_user`` instead.
Args:
id (:obj:`str`): Unique query identifier.
from_user (:class:`telegram.User`): User who sent the query.
currency (:obj:`str`): Three-letter ISO 4217 currency code.
total_amount (:obj:`int`): Total price in the smallest units of the currency (integer, not
float/double). For example, for a price of US$ 1.45 pass ``amount = 145``.
See the :obj:`exp` parameter in
`currencies.json <https://core.telegram.org/bots/payments/currencies.json>`_,
it shows the number of digits past the decimal point for each currency
(2 for the majority of currencies).
invoice_payload (:obj:`str`): Bot specified invoice payload.
shipping_option_id (:obj:`str`, optional): Identifier of the shipping option chosen by the
user.
order_info (:class:`telegram.OrderInfo`, optional): Order info provided by the user.
bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Attributes:
id (:obj:`str`): Unique query identifier.
from_user (:class:`telegram.User`): User who sent the query.
currency (:obj:`str`): Three-letter ISO 4217 currency code.
total_amount (:obj:`int`): Total price in the smallest units of the currency.
invoice_payload (:obj:`str`): Bot specified invoice payload.
shipping_option_id (:obj:`str`): Optional. Identifier of the shipping option chosen by the
user.
order_info (:class:`telegram.OrderInfo`): Optional. Order info provided by the user.
bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.
"""
__slots__ = (
'bot',
'invoice_payload',
'shipping_option_id',
'currency',
'order_info',
'total_amount',
'id',
'from_user',
'_id_attrs',
)
def __init__(
self,
id: str, # pylint: disable=W0622
from_user: User,
currency: str,
total_amount: int,
invoice_payload: str,
shipping_option_id: str = None,
order_info: OrderInfo = None,
bot: 'Bot' = None,
**_kwargs: Any,
):
self.id = id # pylint: disable=C0103
self.from_user = from_user
self.currency = currency
self.total_amount = total_amount
self.invoice_payload = invoice_payload
self.shipping_option_id = shipping_option_id
self.order_info = order_info
self.bot = bot
self._id_attrs = (self.id,)
@classmethod
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['PreCheckoutQuery']:
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
if not data:
return None
data['from_user'] = User.de_json(data.pop('from'), bot)
data['order_info'] = OrderInfo.de_json(data.get('order_info'), bot)
return cls(bot=bot, **data)
def answer( # pylint: disable=C0103
self,
ok: bool,
error_message: str = None,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> bool:
"""Shortcut for::
bot.answer_pre_checkout_query(update.pre_checkout_query.id, *args, **kwargs)
For the documentation of the arguments, please see
:meth:`telegram.Bot.answer_pre_checkout_query`.
"""
return self.bot.answer_pre_checkout_query(
pre_checkout_query_id=self.id,
ok=ok,
error_message=error_message,
timeout=timeout,
api_kwargs=api_kwargs,
)
| lgpl-3.0 | -7,302,558,104,083,430,000 | 36.35 | 98 | 0.634921 | false |
spapas/django-localflavor-gr | django_localflavor_gr/tests.py | 1 | 2527 | from forms import *
from django.test import SimpleTestCase
class GRLocalFlavorTests(SimpleTestCase):
def test_GRTaxNumberField(self):
""" The valid tests are from greek tax numbers (AFMs) found on the internet
with a google search. """
error = ['Enter a valid greek tax number (9 digits).']
valid = {
'090051291': '090051291',
'997881842': '997881842',
'090220804': '090220804',
'090000045': '090000045',
'099757704': '099757704',
}
invalid = {
'123456789': error,
'123 32 12 3213': error,
'32 123 5345': error,
'0': error,
'00000': error,
'000000000': error,
'1111111': error,
'3123123': error,
'312312334534': error,
'999999999': error,
'123123123': error,
'321000123': error,
}
self.assertFieldOutput(GRTaxNumberCodeField, valid, invalid)
def test_GRPostalCodeField(self):
error = ['Enter a valid 5-digit greek postal code.']
valid = {
'51642': '51642',
'21742': '21742',
'75006': '75006',
'85017': '85017',
}
invalid = {
'12 34': error,
'124567': error,
'04567': error,
'94567': error,
'124567': error,
'1345': error,
'134115': error,
}
self.assertFieldOutput(GRPostalCodeField, valid, invalid)
def test_GRPhoneNumberField(self):
error = ['Enter a 10-digit greek phone number.']
valid = {
'2109292921': '2109292921',
'+301109292921': '+301109292921',
}
invalid = {
'12 34': error,
'124567': error,
'21092929211': error,
'661232123': error,
}
self.assertFieldOutput(GRPhoneNumberField, valid, invalid)
def test_GRMobilePhoneNumberField(self):
error = ['Enter a greek mobile phone number starting with 69.']
valid = {
'6945555555': '6945555555',
'6931234567': '6931234567',
'+306971234567': '+306971234567',
}
invalid = {
'12 34': error,
'124567': error,
'21092929211': error,
'2102233444': error,
'2111234567': error,
}
self.assertFieldOutput(GRMobilePhoneNumberField, valid, invalid)
| bsd-3-clause | -981,661,244,806,528,000 | 28.383721 | 83 | 0.504551 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/payment_mode.py | 1 | 1182 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.enums',
marshal='google.ads.googleads.v7',
manifest={
'PaymentModeEnum',
},
)
class PaymentModeEnum(proto.Message):
r"""Container for enum describing possible payment modes. """
class PaymentMode(proto.Enum):
r"""Enum describing possible payment modes."""
UNSPECIFIED = 0
UNKNOWN = 1
CLICKS = 4
CONVERSION_VALUE = 5
CONVERSIONS = 6
GUEST_STAY = 7
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -7,034,423,592,130,008,000 | 28.55 | 74 | 0.680203 | false |
classner/pymp | tests/unittests.py | 1 | 10038 | """Unittests for the pymp package."""
# pylint: disable=protected-access, invalid-name
from __future__ import print_function
import logging
import unittest
logging.basicConfig(level=logging.INFO)
class ParallelTest(unittest.TestCase):
"""Test the parallel context."""
def test_init(self):
"""Initialization test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
pinst = pymp.Parallel(2)
with pinst as parallel:
if not parallel._is_fork:
self.assertEqual(len(parallel._pids), 1)
nested_parallel = pymp.Parallel(2)
self.assertRaises(AssertionError,
nested_parallel.__enter__)
pymp.config.nested = True
with nested_parallel:
pass
pymp.config.nested = False
self.assertRaises(AssertionError,
pinst.__enter__)
self.assertEqual(pymp.shared._NUM_PROCS.value, 1)
self.assertEqual(pymp.Parallel._level, 0)
def test_num_threads(self):
"""Test num threads property."""
import pymp
import os
pymp.config.nested = False
pymp.config.thread_limit = 4
tlist = pymp.shared.list()
with pymp.Parallel(2) as p:
tlist.append(p.num_threads)
self.assertEqual(list(tlist), [2, 2])
pymp.config.nested = True
tlist = pymp.shared.list()
with pymp.Parallel(2) as p:
with pymp.Parallel(2) as p2:
tlist.append(p2.num_threads)
self.assertEqual(list(tlist), [2, 2, 2, 2])
def test_thread_num(self):
"""Test thread_num property."""
import pymp
pymp.config.nested = True
pymp.config.thread_limit = 4
tlist = pymp.shared.list()
with pymp.Parallel(2) as p:
tlist.append(p.thread_num)
self.assertEqual(sorted(list(tlist)), [0, 1])
tlist = pymp.shared.list()
tlist2 = pymp.shared.list()
tlist3 = pymp.shared.list()
with pymp.Parallel(2) as p:
with pymp.Parallel(2) as p2:
if not p._is_fork:
tlist.append(p2.thread_num)
else:
tlist2.append(p2.thread_num)
tlist3.append(p.thread_num)
self.assertEqual(sorted(list(tlist)), [0, 1])
self.assertEqual(sorted(list(tlist2)), [0, 1])
self.assertEqual(sorted(list(tlist3)), [0, 1])
def test_range(self):
"""Range test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
try:
import numpy as np
except ImportError:
return
tarr = pymp.shared.array((5, 1))
with pymp.Parallel(2) as p:
for i in p.range(len(tarr)):
tarr[i, 0] = 1.
self.assertEqual(np.sum(tarr), 5.)
def test_lock(self):
"""Lock test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
try:
import numpy as np
except ImportError:
return
tarr = pymp.shared.array((1, 1))
lock = pymp.shared.lock()
with pymp.Parallel(2) as p:
for _ in p.range(1000):
with lock:
tarr[0, 0] += 1.
self.assertEqual(tarr[0, 0], 1000.)
def test_list(self):
"""Shared list test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
tlist = pymp.shared.list()
with pymp.Parallel(2) as p:
for _ in p.range(1000):
tlist.append(1.)
self.assertEqual(len(tlist), 1000)
def test_dict(self):
"""Shared dict test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
tdict = pymp.shared.dict()
with pymp.Parallel(2) as p:
for iter_idx in p.range(400):
tdict[iter_idx] = 1.
self.assertEqual(len(tdict), 400)
def test_queue(self):
"""Shared queue test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
tqueue = pymp.shared.queue()
with pymp.Parallel(2) as p:
for iter_idx in p.range(400):
tqueue.put(iter_idx)
self.assertEqual(tqueue.qsize(), 400)
def test_rlock(self):
"""Shared rlock test."""
import pymp
pymp.config.nested = False
pymp.config.thread_limit = 4
rlock = pymp.shared.rlock()
tlist = pymp.shared.list()
with pymp.Parallel(2):
with rlock:
with rlock:
tlist.append(1.)
self.assertEqual(len(tlist), 2)
def test_thread_limit(self):
"""Thread limit test."""
import pymp
pymp.config.thread_limit = 3
pymp.config.nested = True
thread_list = pymp.shared.list()
with pymp.Parallel(4) as p:
thread_list.append(p.thread_num)
thread_list = list(thread_list)
thread_list.sort()
self.assertEqual(list(thread_list), [0, 1, 2])
thread_list = pymp.shared.list()
with pymp.Parallel(2) as p:
with pymp.Parallel(2) as p2:
thread_list.append(p2.thread_num)
thread_list = list(thread_list)
thread_list.sort()
self.assertTrue(thread_list == [0, 0, 1] or
thread_list == [0, 0, 1, 1])
# Second case if the first two threads were exiting already.
def test_xrange(self):
"""Test the dynamic schedule."""
import pymp
pymp.config.thread_limit = 4
pymp.config.nested = True
tlist = pymp.shared.list()
with pymp.Parallel(2):
with pymp.Parallel(2) as p:
for idx in p.xrange(5):
tlist.append(idx)
self.assertEqual(len(tlist), 10)
def test_exceptions(self):
"""Test raising behavior."""
import pymp
pymp.config.thread_limit = 4
pymp.config.nested = True
def exc_context():
"""Creates a context with an Exception in a subthread."""
with pymp.Parallel(2) as p:
if p.thread_num == 1:
raise Exception()
self.assertRaises(Exception, exc_context)
def test_print(self): # pylint: disable=no-self-use
"""Test the print method."""
import pymp
pymp.config.thread_limit = 3
pymp.config.nested = True
with pymp.Parallel(2):
with pymp.Parallel(2) as p:
p.print("Hi from thread {0}.".format(p.thread_num))
def test_safety_check(self):
"""Test that the methods can only be used within their context."""
import pymp
pymp.config.thread_limit = 3
pymp.config.nested = True
p = pymp.Parallel(2)
# Exception before use.
self.assertRaises(AssertionError, lambda: p.thread_num)
self.assertRaises(AssertionError, lambda: p.num_threads)
self.assertRaises(AssertionError, lambda: p.lock)
self.assertRaises(AssertionError, lambda: p.range(10))
self.assertRaises(AssertionError, lambda: p.xrange(10))
with p:
pass
# Exception after use.
self.assertRaises(AssertionError, lambda: p.thread_num)
self.assertRaises(AssertionError, lambda: p.num_threads)
self.assertRaises(AssertionError, lambda: p.lock)
self.assertRaises(AssertionError, lambda: p.range(10))
self.assertRaises(AssertionError, lambda: p.xrange(10))
def test_if(self):
"""Test the if_ deactivation."""
import pymp
pymp.config.thread_limit = 3
pymp.config.nested = True
with pymp.Parallel(if_=False) as p:
self.assertEqual(p.num_threads, 1)
def test_noreshape(self):
"""Test if reshaping is effectively prevented."""
import pymp
sa = pymp.shared.array((3, 3))
self.assertRaises(ValueError, lambda: sa.reshape((4, 4)))
sa.reshape((1, 3, 3))
def test_iterable_two_threads(self):
"""Test if iterating over an iterable is working correctly."""
import pymp
rnge = iter(range(10))
thread_list = pymp.shared.list()
with pymp.Parallel(2) as p:
for elem in p.iterate(rnge):
thread_list.append((p.thread_num, elem))
elements = [item[1] for item in thread_list]
self.assertEqual(sorted(elements), list(range(10)))
threads = [item[0] for item in thread_list]
for item in threads:
self.assertEqual(item, 1)
def test_iterable_one_thread(self):
"""Test if iterating over an iterable is working correctly."""
import pymp
rnge = iter(range(10))
thread_list = pymp.shared.list()
with pymp.Parallel(1) as p:
for elem in p.iterate(rnge):
thread_list.append((p.thread_num, elem))
elements = [item[1] for item in thread_list]
self.assertEqual(sorted(elements), list(range(10)))
threads = [item[0] for item in thread_list]
for item in threads:
self.assertEqual(item, 0)
def test_iterable_three_threads(self):
"""Test if iterating over an iterable is working correctly."""
import pymp
pymp.config.thread_limit = 3
rnge = iter(range(10))
thread_list = pymp.shared.list()
with pymp.Parallel(3) as p:
for elem in p.iterate(rnge):
thread_list.append((p.thread_num, elem))
elements = [item[1] for item in thread_list]
self.assertEqual(sorted(elements), list(range(10)))
threads = [item[0] for item in thread_list]
for item in threads:
self.assertTrue(item in [1, 2])
if __name__ == '__main__':
unittest.main()
| mit | 925,766,839,590,195,100 | 33.733564 | 74 | 0.560371 | false |
dopplerapp/doppler-agent | doppler/bin/doppler-configure.py | 1 | 4062 | #!/usr/bin/env python
import os
import shutil
import sys
import subprocess
from string import Template
from optparse import OptionParser
import doppler
CONFIG_TEMPLATES_PATH = os.path.join(os.path.dirname(doppler.__file__), "config")
DEFAULT_CONFIG_PATH = "/etc/doppler-agent.conf"
DEFAULT_UPSTART_PATH = "/etc/init/doppler-agent.conf"
# Parse command line options
parser = OptionParser(version="%prog " + doppler.__version__)
parser.add_option(
"-k", "--api-key",
dest="api_key",
help="Specify API key to use for config generation",
)
parser.add_option(
"-e", "--endpoint",
dest="endpoint",
help="Specify endpoint to use for sending metrics",
default="http://notify.doppler.io",
)
parser.add_option(
"-g", "--generate-config",
action="store_true",
dest="generate_config",
help="Generate doppler config file at /etc/doppler-agent.conf",
)
parser.add_option(
"-i", "--install-startup-scripts",
action="store_true",
dest="install_startup_scripts",
help="Install upstart/init.d startup scripts for the agent",
)
parser.add_option(
"-s", "--start-agent",
action="store_true",
dest="start_agent",
help="Start the agent",
)
(options, args) = parser.parse_args()
def run_silently(command):
worked = True
with open(os.devnull, "w") as devnull:
try:
subprocess.check_call(command.split(), stdout=devnull, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
worked = False
return worked
def can_write_file(path):
has_write_permission = False
if os.path.isfile(path):
if os.access(path, os.W_OK):
has_write_permission = True
else:
if os.access(os.path.dirname(path), os.W_OK):
has_write_permission = True
return has_write_permission
def machine_uses_upstart():
return os.path.isfile("/sbin/initctl")
# Check options are valid
if not (options.generate_config or options.install_startup_scripts or options.start_agent):
parser.print_help()
# Generate config files
if options.generate_config:
# TODO: Don't overwrite existing config files!!!
# Check for --api-key command line flag
if options.api_key:
if can_write_file(DEFAULT_CONFIG_PATH):
# Generate the config file from the template
config = None
with open(os.path.join(CONFIG_TEMPLATES_PATH, "doppler-agent.conf")) as f:
config_template = f.read()
config = Template(config_template).substitute(api_key=options.api_key, endpoint=options.endpoint)
# Write the new config file
with open(DEFAULT_CONFIG_PATH, "w") as f:
f.write(config)
else:
sys.exit("Error! We don't have permission to write to %s, try running as sudo." % DEFAULT_CONFIG_PATH)
else:
sys.exit("Can't generate config file without an API key")
# Install startup scripts
if options.install_startup_scripts:
# Check which init system this machine uses
if machine_uses_upstart():
if can_write_file(DEFAULT_UPSTART_PATH):
shutil.copyfile(os.path.join(CONFIG_TEMPLATES_PATH, "doppler-agent.upstart"), DEFAULT_UPSTART_PATH)
else:
sys.exit("Error! We don't have permission to write to %s, try running as sudo." % DEFAULT_UPSTART_PATH)
else:
sys.exit("Error! We currently only support starting the agent with upstart")
# Start the agent
if options.start_agent:
if machine_uses_upstart():
if os.path.isfile(DEFAULT_UPSTART_PATH):
worked = run_silently("initctl start doppler-agent") or run_silently("initctl restart doppler-agent")
if not worked:
sys.exit("Got bad return code from upstart, process probably didn't start")
else:
sys.exit("Error! Couldn't find doppler-agent upstart script, try running with --generate-startup-scripts")
else:
sys.exit("Error! We currently only support starting the agent with upstart") | mit | -2,343,837,577,511,789,600 | 32.858333 | 118 | 0.655835 | false |
dchaplinsky/pep.org.ua | pepdb/tasks/management/commands/export_foreign_companies.py | 1 | 2271 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import xlsxwriter
from django.core.management.base import BaseCommand
from tasks.models import BeneficiariesMatching
class Command(BaseCommand):
help = ('Exports the list of foreign companies from declarations of PEPs '
'which aren\'t yet in DB to an excel file for further processing '
'and reconciliation with the registry')
def add_arguments(self, parser):
parser.add_argument(
'target_file',
help='Excel file to export to',
)
def handle(self, *args, **options):
keys = [
"owner_name",
"company_name_declaration",
"company_name_en",
"zip",
"city",
"street",
"appt",
"country",
"company_code",
"notes",
"status",
"company_name_orig",
"link",
"founder_1",
"founder_2",
"founder_3",
"founder_4",
"founder_5",
"founder_6",
"founder_7"
]
workbook = xlsxwriter.Workbook(options["target_file"])
for kind, name in (("f", "Founders"), ("b", "Beneficiaries")):
ws = workbook.add_worksheet(name)
for i, f in enumerate(keys):
ws.write(0, i, f)
row = 1
for t in BeneficiariesMatching.objects.filter(
status="n", type_of_connection=kind).nocache().iterator():
base_res = {
"owner_name": t.person_json["full_name"]
}
for company in t.pep_company_information:
res = base_res.copy()
res["company_name_declaration"] = company["company_name"]
res["company_name_en"] = company["en_name"] or ""
res["country"] = company["country"]
res["zip"] = company["address"] or ""
res["company_code"] = company["beneficial_owner_company_code"]
for i, f in enumerate(keys):
ws.write(row, i, res.get(f, ""))
row += 1
workbook.close()
| mit | -8,220,127,339,190,007,000 | 30.541667 | 82 | 0.484808 | false |
ctools/ctools | test/test_csworkflow.py | 1 | 4902 | #! /usr/bin/env python
# ==========================================================================
# This scripts performs unit tests for the csworkflow script.
#
# Copyright (C) 2016-2018 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import gammalib
import cscripts
from testing import test
# ================================ #
# Test class for csworkflow script #
# ================================ #
class Test(test):
"""
Test class for csworkflow script
This test class makes unit tests for the cslightcrv script by using it
from the command line and from Python.
"""
# Constructor
def __init__(self):
"""
Constructor
"""
# Call base class constructor
test.__init__(self)
# Set members
self._workflow = self._datadir + '/workflow.xml'
# Return
return
# Set test functions
def set(self):
"""
Set all test functions
"""
# Set test name
self.name('csworkflow')
# Append tests
self.append(self._test_cmd, 'Test csworkflow on command line')
self.append(self._test_python, 'Test csworkflow from Python')
# Return
return
# Test csworkflow on command line
def _test_cmd(self):
"""
Test csworkflow on the command line
"""
# Set script name
csworkflow = self._script('csworkflow')
# Remove result file
gammalib.GFilename('wf_crab_results.xml').remove()
# Setup csworkflow command
cmd = csworkflow+' inflow="'+self._workflow+'"'+ \
' logfile="csworkflow_cmd1.log" chatter=1'
# Check if execution was successful
self.test_assert(self._execute(cmd) == 0,
'Check successful execution from command line')
# Check fit result
self._check_fit_result('wf_crab_results.xml')
# Check csworkflow --help
self._check_help(csworkflow)
# Return
return
# Test csworkflow from Python
def _test_python(self):
"""
Test csworkflow from Python
"""
# Remove result file
gammalib.GFilename('wf_crab_results.xml').remove()
# Set-up csworkflow
workflow = cscripts.csworkflow()
workflow['inflow'] = self._workflow
workflow['logfile'] = 'csworkflow_py1.log'
workflow['chatter'] = 2
# Run script
workflow.logFileOpen() # Make sure we get a log file
workflow.run()
# Check fit result
self._check_fit_result('wf_crab_results.xml')
# Remove result file
gammalib.GFilename('wf_crab_results.xml').remove()
# Set-up csworkflow
workflow = cscripts.csworkflow()
workflow['inflow'] = self._workflow
workflow['logfile'] = 'csworkflow_py2.log'
workflow['chatter'] = 3
# Execute script
workflow.execute()
# Check fit result
self._check_fit_result('wf_crab_results.xml')
# Return
return
# Check fit result XML values
def _check_fit_result(self, filename):
"""
Check result file
"""
# Load fit results
models = gammalib.GModels(filename)
# Set reference values
prefactor = 3.63152145731529e-16
index = 2.39100016863397
pre_background = 1.30910556742873
index_background = 0.252909973473968
# Check fit result values
self.test_value(models['Crab'][2].value(),
prefactor, 1.0e-4 * prefactor,
'Check Crab prefactor')
self.test_value(models['Crab'][3].value(),
-index, 1.0e-4 * index,
'Check Crab index')
self.test_value(models['Background'][0].value(),
pre_background, 1.0e-4 * pre_background,
'Check background model prefactor')
self.test_value(models['Background'][1].value(),
-index_background, 1.0e-4 * index_background,
'Check background model index')
# Return
return
| gpl-3.0 | -1,305,521,599,451,866,400 | 29.259259 | 76 | 0.559772 | false |
simplegeo/authorize | authorize/gen_xml.py | 1 | 17930 | # -*- encoding: utf-8 -*-
import re
import decimal
from xml.etree.cElementTree import fromstring, tostring
from xml.etree.cElementTree import Element, iselement
from authorize import responses
API_SCHEMA = 'https://api.authorize.net/xml/v1/schema/AnetApiSchema.xsd'
API_SCHEMA_NS = "AnetApi/xml/v1/schema/AnetApiSchema.xsd"
PREFIX = "{AnetApi/xml/v1/schema/AnetApiSchema.xsd}"
INDIVIDUAL = u"individual"
BUSINESS = u"business"
ECHECK_CCD = u"CCD"
ECHECK_PPD = u"PPD"
ECHECK_TEL = u"TEL"
ECHECK_WEB = u"WEB"
BANK = u"bank"
CREDIT_CARD = u"cc"
ECHECK = u"echeck"
DAYS_INTERVAL = u"days"
MONTHS_INTERVAL = u"months"
VALIDATION_NONE = u"none"
VALIDATION_TEST = u"testMode"
VALIDATION_LIVE = u"liveMode"
ACCOUNT_CHECKING = u"checking"
ACCOUNT_SAVINGS = u"savings"
ACCOUNT_BUSINESS_CHECKING = u"businessChecking"
AUTH_ONLY = u"auth_only"
CAPTURE_ONLY = u"capture_only"
AUTH_CAPTURE = u"auth_capture"
CREDIT = u"credit"
PRIOR_AUTH_CAPTURE = u"prior_auth_capture"
VOID = u"void"
class AuthorizeSystemError(Exception):
"""
I'm a serious kind of exception and I'm raised when something
went really bad at a lower level than the application level, like
when Authorize is down or when they return an unparseable response
"""
def __init__(self, *args):
self.args = args
def __str__(self):
return "Exception: %s caused by %s" % self.args
def __repr__(self):
# Here we are printing a tuple, the , at the end is _required_
return "AuthorizeSystemError%s" % (self.args,)
c = re.compile(r'([A-Z]+[a-z_]+)')
def convert(arg):
"""
Convert an object to its xml representation
"""
if iselement(arg):
return arg # the element
if isinstance(arg, dict_accessor):
try:
return arg.text_
except:
raise Exception("Cannot serialize %s, missing text_ attribute" % (arg,))
if isinstance(arg, dict):
return arg # attributes of the element
if isinstance(arg, unicode):
return arg
if isinstance(arg, decimal.Decimal):
return unicode(arg)
if arg is True:
return 'true'
if arg is False:
return 'false'
if isinstance(arg, float):
return unicode(round(arg, 2)) # there's nothing less than cents anyway
if isinstance(arg, (int, long)):
return unicode(arg)
if isinstance(arg, str):
raise Exception("'%s' not unicode: can only accept unicode strings" % (arg,))
raise Exception("Cannot convert %s of type %s" % (arg, type(arg)))
def utf8convert(arg):
"""
Further extend L{convert} to return UTF-8 strings instead of unicode.
"""
value = convert(arg)
if isinstance(value, unicode):
return value.encode('utf-8')
return value
class XMLBuilder(object):
"""
XMLBuilder tries to be slightly clever in order to be easier for
the programmer. If you try to add arguments that are None they
won't be added to the output because empty XML tags are not worth
the bandwidth and actually mean something different than None.
"""
def __getattr__(self, key):
def _wrapper_func(*args):
converted = [convert(arg) for arg in args if arg is not None]
if not converted:
return None
el = Element(key)
settext = False
setatts = False
for arg in converted:
if iselement(arg):
el.append(arg)
elif isinstance(arg, basestring):
assert not settext, "cannot set text twice"
el.text = arg
settext = True
elif isinstance(arg, dict):
assert not setatts, "cannot set attributes twice"
for k, v in arg.iteritems():
el.set(k, v)
setatts = True
else:
raise TypeError("unhandled argument type: %s" % type(arg))
return el
return _wrapper_func
x = XMLBuilder()
def flatten(tree):
"""
Return a flattened tree in string format encoded in utf-8
"""
return tostring(tree, "utf-8")
def purify(s):
"""
s is an etree.tag and contains also information on the namespace,
if that information is present try to remove it, then convert the
camelCaseTags to underscore_notation_more_python_friendly.
"""
if s.startswith(PREFIX):
s = s[len(PREFIX):]
return '_'.join(atom.lower() for atom in c.split(s) if atom)
class dict_accessor(dict):
"""
Allow accessing a dictionary content also using dot-notation.
"""
def __getattr__(self, attr):
return super(dict_accessor, self).__getitem__(attr)
def __setattr__(self, attr, value):
super(dict_accessor, self).__setitem__(attr, value)
def parse_node(node):
"""
Return a dict_accessor representation of the node.
"""
new = dict_accessor({})
if node.text and node.text.strip():
t = node.text
if isinstance(t, unicode):
new['text_'] = t
else:
new['text_'] = t.decode('utf-8', "replace")
if node.attrib:
new['attrib_'] = dict_accessor(node.attrib)
for child in node.getchildren():
tag = purify(child.tag)
child = parse_node(child)
if tag not in new:
new[tag] = child
else:
old = new[tag]
if not isinstance(old, list):
new[tag] = [old]
new[tag].append(child)
return new
def to_dict(s, error_codes, do_raise=True, delimiter=u',', encapsulator=u'', uniform=False):
"""
Return a dict_accessor representation of the given string, if raise_
is True an exception is raised when an error code is present.
"""
try:
t = fromstring(s)
except SyntaxError, e:
raise AuthorizeSystemError(e, s)
parsed = dict_accessor(parse_node(t)) # discard the root node which is useless
try:
if isinstance(parsed.messages.message, list): # there's more than a child
return parsed
code = parsed.messages.message.code.text_
if uniform:
parsed.messages.message = [parsed.messages.message]
except KeyError:
return parsed
if code in error_codes:
if do_raise:
raise error_codes[code]
dr = None
if parsed.get('direct_response') is not None:
dr = parsed.direct_response.text_
elif parsed.get('validation_direct_response') is not None:
dr = parsed.validation_direct_response.text_
if dr is not None:
parsed.direct_response = parse_direct_response(dr,
delimiter,
encapsulator)
return parsed
m = ['code', 'subcode', 'reason_code', 'reason_text', 'auth_code',
'avs', 'trans_id', 'invoice_number', 'description', 'amount', 'method',
'trans_type', 'customer_id', 'first_name', 'last_name', 'company',
'address', 'city', 'state', 'zip', 'country', 'phone', 'fax', 'email',
'ship_first_name', 'ship_last_name', 'ship_company', 'ship_address',
'ship_city', 'ship_state', 'ship_zip', 'ship_country', 'tax', 'duty',
'freight', 'tax_exempt', 'po_number', 'md5_hash', 'ccv',
'holder_verification']
def parse_direct_response(s, delimiter=u',', encapsulator=u''):
"""
Very simple format but made of many fields, the most complex ones
have the following meanings:
code:
see L{responses.aim_codes} for all the codes
avs:
see L{responses.avs_codes} for all the codes
method: CC or ECHECK
trans_type:
AUTH_CAPTURE
AUTH_ONLY
CAPTURE_ONLY
CREDIT
PRIOR_AUTH_CAPTURE
VOID
tax_exempt: true, false, T, F, YES, NO, Y, N, 1, 0
ccv:
see L{responses.ccv_codes} for all the codes
holder_verification:
see L{responses.holder_verification_codes} for all the codes
"""
if not isinstance(s, unicode):
s = s.decode('utf-8', 'replace')
# being <e> the encapsulator and <d> the delimiter
# this is the format of the direct response:
# <e>field<e><d><e>field<e><d><e>field<e>
#
# Here's a regexp that would parse this:
# "\<e>([^\<d>\<e>]*)\<e>\<d>?"
# But it has a problem when <e> is '' and I don't
# have the will to do the much harder one that actually
# does it well... So let's just split and strip.
e = encapsulator
d = delimiter
v = s.split(e+d+e)
v[0] = v[0].lstrip(e)
v[-1] = v[-1].rstrip(e)
if not len(v) >= len(m):
d = dict_accessor({'error': "Couldn't parse the direct response"})
else:
d = dict_accessor(dict(zip(m, v)))
d.original = s
return d
def macro(action, login, key, *body):
"""
Main XML structure re-used by every request.
"""
return getattr(x, action)(
{'xmlns': API_SCHEMA_NS},
x.merchantAuthentication(
x.name(login),
x.transactionKey(key)
),
*body
)
def _address(pre='', kw={}, *extra):
"""
Basic address components with extension capability.
"""
return [
x.firstName(kw.get(pre+'first_name')), # optional
x.lastName(kw.get(pre+'last_name')), # optional
x.company(kw.get(pre+'company')), # optional
x.address(kw.get(pre+'address')), # optional
x.city(kw.get(pre+'city')), # optional
x.state(kw.get(pre+'state')), # optional
x.zip(kw.get(pre+'zip')), # optional
x.country(kw.get(pre+'country')) # optional
] + list(extra)
def address(pre='', **kw):
"""
Simple address with prefixing possibility
"""
return x.address(
*_address(pre, kw)
)
def address_2(pre='', **kw):
"""
Extended address with phoneNumber and faxNumber in the same tag
"""
return x.address(
*_address(pre, kw,
x.phoneNumber(kw.get(pre+'phone')),
x.faxNumber(kw.get(pre+'fax'))
)
)
def update_address(**kw):
return x.address(
*_address('ship_', kw,
x.phoneNumber(kw.get('ship_phone')),
x.faxNumber(kw.get('ship_fax')),
x.customerAddressId(kw['customer_address_id'])
)
)
def billTo(**kw):
return x.billTo(
*_address('bill_', kw,
x.phoneNumber(kw.get('bill_phone')), # optional
x.faxNumber(kw.get('bill_fax')) # optional
)# optional
)
def arbBillTo(**kw):
# This is just to be sure that they were passed.
# as the spec requires
kw['bill_first_name']
kw['bill_last_name']
return x.billTo(
*_address('bill_', kw)
)
def _shipTo(**kw):
return _address('ship_', kw,
x.phoneNumber(kw.get('ship_phone')),
x.faxNumber(kw.get('ship_fax'))
)
def shipToList(**kw):
return x.shipToList(
*_shipTo(**kw)
)
def shipTo(**kw):
return x.shipTo(
*_shipTo(**kw)
)
def payment(**kw):
profile_type = kw.get('profile_type', CREDIT_CARD)
if profile_type == CREDIT_CARD:
return x.payment(
x.creditCard(
x.cardNumber(kw['card_number']),
x.expirationDate(kw['expiration_date']), # YYYY-MM
x.cardCode(kw['csc'])
)
)
elif profile_type == BANK:
return x.payment(
x.bankAccount(
x.accountType(kw.get('account_type')), # optional: checking, savings, businessChecking
x.routingNumber(kw['routing_number']), # 9 digits
x.accountNumber(kw['account_number']), # 5 to 17 digits
x.nameOnAccount(kw['name_on_account']),
x.echeckType(kw.get('echeck_type')), # optional: CCD, PPD, TEL, WEB
x.bankName(kw.get('bank_name')) # optional
)
)
def transaction(**kw):
assert len(kw.get('line_items', [])) <= 30
content = [
x.amount(kw['amount']),
x.tax(
x.amount(kw.get('tax_amount')),
x.name(kw.get('tax_name')),
x.description(kw.get('tax_descr'))
),
x.shipping(
x.amount(kw.get('ship_amount')),
x.name(kw.get('ship_name')),
x.name(kw.get('ship_description'))
),
x.duty(
x.amount(kw.get('duty_amount')),
x.name(kw.get('duty_name')),
x.description(kw.get('duty_description'))
)
] + list(
x.lineItems(
x.itemId(line.get('item_id')),
x.name(line['name']),
x.description(line.get('description')),
x.quantity(line.get('quantity')),
x.unitPrice(line.get('unit_price')),
x.taxable(line.get('taxable'))
)
for line in kw.get('line_items', [])
) + [
x.customerProfileId(kw['customer_profile_id']),
x.customerPaymentProfileId(kw['customer_payment_profile_id']),
x.customerAddressId(kw.get('customer_address_id')),
]
ptype = kw.get('profile_type', AUTH_ONLY)
if ptype in (AUTH_ONLY, CAPTURE_ONLY, AUTH_CAPTURE, CREDIT):
content += [
x.order(
x.invoiceNumber(kw.get('invoice_number')),
x.description(kw.get('description')),
x.purchaseOrderNumber(kw.get('purchase_order_number'))
)
]
if ptype in (AUTH_ONLY, CAPTURE_ONLY, AUTH_CAPTURE):
content += [
x.taxExempt(kw.get('tax_exempt', False)),
x.recurringBilling(kw.get('recurring', False)),
x.cardCode(kw.get('ccv'))
]
if ptype == AUTH_ONLY:
profile_type = x.profileTransAuthOnly(
*content
)
elif ptype == CAPTURE_ONLY:
profile_type = x.profileTransCaptureOnly(
*(content + [x.approvalCode(kw['approval_code'])])
)
elif ptype == AUTH_CAPTURE:
profile_type = x.profileTransAuthCapture(
*content
)
elif ptype == PRIOR_AUTH_CAPTURE:
profile_type = x.profileTransPriorAuthCapture(
*(content + [x.transId(kw['trans_id'])])
)
# NOTE: It is possible to issue a refund without the customerProfileId and
# the customerPaymentProfileId being supplied. However, this is not
# currently supported, and requires sending the masked credit card number.
elif ptype == CREDIT:
profile_type = x.profileTransRefund(
*(content + [x.transId(kw['trans_id'])])
)
elif ptype == VOID:
profile_type = x.profileTransVoid(
*(content + [x.transId(kw['trans_id'])])
)
else:
raise Exception("Unsupported profile type: %r" % (ptype,))
return x.transaction(profile_type)
def paymentProfiles(**kw):
return x.paymentProfiles(
x.customerType(kw.get('customer_type')), # optional: individual, business
billTo(**kw),
payment(**kw)
)
def update_paymentProfile(**kw):
return x.paymentProfile(
x.customerType(kw.get('customer_type')), # optional
billTo(**kw),
payment(**kw),
x.customerPaymentProfileId(kw['customer_payment_profile_id'])
)
def paymentProfile(**kw):
return x.paymentProfile(
x.customerType(kw.get('customer_type')), # optional
billTo(**kw),
payment(**kw)
)
def profile(**kw):
content = [
x.merchantCustomerId(kw['customer_id']),
x.description(kw.get('description')),
x.email(kw.get('email')),
]
payment_profiles = kw.get('payment_profiles', None)
if payment_profiles is not None:
content = content + list(
paymentProfiles(**prof)
for prof in payment_profiles
)
else:
if kw.get('card_number') or kw.get("routing_number"):
content = content + [paymentProfiles(**kw)]
return x.profile(
*(content + [shipToList(**kw)])
)
def subscription(**kw):
trial_occurrences = kw.get('trial_occurrences')
trial_amount = None
if trial_occurrences is not None:
trial_amount = kw['trial_amount']
return x.subscription(
x.name(kw.get('subscription_name')),
x.paymentSchedule(
x.interval(
x.length(kw.get('interval_length')), # up to 3 digits, 1-12 for months, 7-365 days
x.unit(kw.get('interval_unit')) # days or months
),
x.startDate(kw.get('start_date')), # YYYY-MM-DD
x.totalOccurrences(kw.get('total_occurrences', 9999)),
x.trialOccurrences(trial_occurrences)
),
x.amount(kw.get('amount')),
x.trialAmount(trial_amount),
payment(**kw),
x.order(
x.invoiceNumber(kw.get('invoice_number')),
x.description(kw.get('description'))
),
x.customer(
x.type(kw.get('customer_type')), # individual, business
x.id(kw.get('customer_id')),
x.email(kw.get('customer_email')),
x.phoneNumber(kw.get('phone')),
x.faxNumber(kw.get('fax')),
x.driversLicense(
x.number(kw.get('driver_number')),
x.state(kw.get('driver_state')),
x.dateOfBirth(kw.get('driver_birth'))
),
x.taxId(kw.get('tax_id'))
),
arbBillTo(**kw),
shipTo(**kw)
)
def base(action, login, key, kw, *main):
return flatten(
macro(action, login, key,
x.refId(kw.get('ref_id')),
*main
)
)
__doc__ = """\
Please refer to http://www.authorize.net/support/CIM_XML_guide.pdf
for documentation on the XML protocol implemented here.
"""
| mit | 7,084,245,678,588,358,000 | 30.236934 | 102 | 0.566481 | false |
singingwolfboy/flask-dance | flask_dance/contrib/gitlab.py | 1 | 4319 | from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.requests import OAuth2Session
from functools import partial
from flask.globals import LocalProxy, _lookup_app_object
from flask import _app_ctx_stack as stack
__maintainer__ = "Justin Georgeson <[email protected]>"
class NoVerifyOAuth2Session(OAuth2Session):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.verify = False
def make_gitlab_blueprint(
client_id=None,
client_secret=None,
*,
scope=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
hostname="gitlab.com",
verify_tls_certificates=True,
rule_kwargs=None,
):
"""
Make a blueprint for authenticating with GitLab using OAuth 2. This requires
a client ID and client secret from GitLab. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`GITLAB_OAUTH_CLIENT_ID` and
:envvar:`GITLAB_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your application on GitLab.
client_secret (str): The client secret for your application on GitLab
scope (str, optional): comma-separated list of scopes for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/gitlab``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/gitlab/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
hostname (str, optional): If using a private instance of GitLab CE/EE,
specify the hostname, default is ``gitlab.com``.
verify_tls_certificates (bool, optional): Specify whether TLS
certificates should be verified. Set this to ``False`` if
certificates fail to validate for self-hosted GitLab instances.
rule_kwargs (dict, optional): Additional arguments that should be passed when adding
the login and authorized routes. Defaults to ``None``.
specify the hostname, default is ``gitlab.com``
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :doc:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
if not verify_tls_certificates:
if session_class:
raise ValueError(
"cannot override session_class and disable certificate validation"
)
else:
session_class = NoVerifyOAuth2Session
gitlab_bp = OAuth2ConsumerBlueprint(
"gitlab",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url=f"https://{hostname}/api/v4/",
authorization_url=f"https://{hostname}/oauth/authorize",
token_url=f"https://{hostname}/oauth/token",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
token_url_params={"verify": verify_tls_certificates},
rule_kwargs=rule_kwargs,
)
gitlab_bp.from_config["client_id"] = "GITLAB_OAUTH_CLIENT_ID"
gitlab_bp.from_config["client_secret"] = "GITLAB_OAUTH_CLIENT_SECRET"
@gitlab_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.gitlab_oauth = gitlab_bp.session
return gitlab_bp
gitlab = LocalProxy(partial(_lookup_app_object, "gitlab_oauth"))
| mit | -1,558,331,981,668,643,000 | 38.990741 | 92 | 0.659875 | false |
badbytes/pymeg | pdf2py/update_data_header.py | 1 | 2127 | # update_data_header.py
#
# Copyright 2010 dan collins <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
'''update_data_header.py
updates the p.data.hdr header to prepare for rewrite of 4D format data.
accomidates when channels read are less that in orig file, or window changes, etc'''
from numpy import *
def cutchannels(data):
data.hdr.header_data.total_chans = array([data.channels.indexlist.__len__()], dtype=data.hdr.header_data.total_chans.dtype)
channel_ref_data = arange(data.channels.indexlist.__len__()).tolist()
#channel_ref_data = []
#for i in data.channels.indexlist:
for i in range(0, data.channels.indexlist.__len__()):
#print data.channels.reverseindex[i], channel_ref_data, data.hdr.channel_ref_data[data.channels.reverseindex[i]]
try:
channel_ref_data[i] = data.hdr.channel_ref_data[data.channels.indexlist[i]]
#channel_ref_data.append(data.hdr.channel_ref_data[i])
#data.hdr.channel_ref_data[i].index = array([channel_ref_data.__len__()], dtype=data.hdr.channel_ref_data[i].index.dtype)
channel_ref_data[i].index = array([i], dtype=data.hdr.channel_ref_data[data.channels.indexlist[i]].index.dtype)
except IndexError:
print 'IndexError... NEED TO FIX'
#print channel_ref_data[i].index
data.hdr.channel_ref_data = channel_ref_data
| gpl-3.0 | -5,053,035,822,500,065,000 | 47.340909 | 129 | 0.684532 | false |
cliftonmcintosh/openstates | openstates/il/__init__.py | 1 | 5376 | from billy.utils.fulltext import text_after_line_numbers
import lxml.html
from .bills import ILBillScraper
from .legislators import ILLegislatorScraper
from .committees import ILCommitteeScraper
from .events import ILEventScraper
metadata = {
'abbreviation': 'il',
'name': 'Illinois',
'legislature_name': 'Illinois General Assembly',
'legislature_url': 'http://www.ilga.gov/',
'capitol_timezone': 'America/Chicago',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{'name': '93rd', 'sessions': ['93rd', 'Special_93rd'],
'start_year': 2003, 'end_year': 2004},
{'name': '94th', 'sessions': ['94th'],
'start_year': 2005, 'end_year': 2006},
{'name': '95th', 'sessions': ['95th', 'Special_95th'],
'start_year': 2007, 'end_year': 2008},
{'name': '96th', 'sessions': ['96th', 'Special_96th'],
'start_year': 2009, 'end_year': 2010},
{'name': '97th', 'sessions': ['97th'],
'start_year': 2011, 'end_year': 2012},
{'name': '98th', 'sessions': ['98th'],
'start_year': 2013, 'end_year': 2014},
{'name': '99th', 'sessions': ['99th'],
'start_year': 2015, 'end_year': 2016},
{'name': '100th', 'sessions': ['100th'],
'start_year': 2017, 'end_year': 2018},
],
'feature_flags': [ 'events', 'influenceexplorer' ],
'session_details': {
'100th': {'display_name': '100th Regular Session (2017-2018)',
'_scraped_name': '100 (2017-2018)',
'speaker': 'Madigan',
'president': 'Cullerton',
'params': { 'GA': '100', 'SessionId': '91' },
},
'99th': {'display_name': '99th Regular Session (2015-2016)',
'_scraped_name': '99 (2015-2016)',
'speaker': 'Madigan',
'president': 'Cullerton',
'params': { 'GA': '99', 'SessionId': '88' },
},
'98th': {'display_name': '98th Regular Session (2013-2014)',
'_scraped_name': '98 (2013-2014)',
'speaker': 'Madigan',
'president': 'Cullerton',
'params': { 'GA': '98', 'SessionId': '85' },
},
'97th': {'display_name': '97th Regular Session (2011-2012)',
'_scraped_name': '',
'params': { 'GA': '97', 'SessionId': '84' },
'speaker': 'Madigan',
'president': 'Cullerton',
},
'96th': {'display_name': '96th Regular Session (2009-2010)',
'_scraped_name': '96 (2009-2010)',
'params': { 'GA': '96', 'SessionId': '76' },
'speaker': 'Madigan',
'president': 'Cullerton',
},
'Special_96th': {'display_name': '96th Special Session (2009-2010)',
'params': { 'GA': '96', 'SessionId': '82', 'SpecSess': '1' },
'speaker': 'Madigan',
'president': 'Cullerton',
},
'95th': {'display_name': '95th Regular Session (2007-2008)',
'_scraped_name': '95 (2007-2008)',
'params': { 'GA': '95', 'SessionId': '51' },
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'Special_95th': {'display_name': '95th Special Session (2007-2008)',
'params': { 'GA': '95', 'SessionId': '52', 'SpecSess': '1' },
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'94th': {'display_name': '94th Regular Session (2005-2006)',
'_scraped_name': '94 (2005-2006)',
'params': { 'GA': '94', 'SessionId': '50' },
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'93rd': {'display_name': '93rd Regular Session (2003-2004)',
'_scraped_name': '93 (2003-2004)',
'params': { 'GA': '93', 'SessionId': '3' },
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'Special_93rd': {'display_name': '93rd Special Session (2003-2004)',
'params': { 'GA': '93', 'SessionID': '14', 'SpecSess': '1' },
'speaker': 'Madigan',
'president': 'Jones, E.',
},
},
'_ignored_scraped_sessions': [
'97 (2011-2012)',
'92 (2001-2002)',
'91 (1999-2000)',
'90 (1997-1998)',
'89 (1995-1996)',
'88 (1993-1994)',
'87 (1991-1992)',
'86 (1989-1990)',
'85 (1987-1988)',
'84 (1985-1986)',
'83 (1983-1984)',
'82 (1981-1982)',
'81 (1979-1980)',
'80 (1977-1978)',
'79 (1975-1976)',
'78 (1973-1974)',
'77 (1971-1972)']
}
def session_list():
from billy.scrape.utils import url_xpath
return url_xpath('http://ilga.gov/PreviousGA.asp',
'//option/text()')
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = ' '.join(x.text_content() for x in doc.xpath('//td[@class="xsl"]'))
return text
| gpl-3.0 | -7,407,785,103,301,975,000 | 37.956522 | 86 | 0.456101 | false |
google/revisiting-self-supervised | self_supervision/patch_utils.py | 1 | 12719 | #!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for patch based image processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import struct
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import preprocess
import utils
from models.utils import get_net
from trainer import make_estimator
FLAGS = tf.flags.FLAGS
PATCH_H_COUNT = 3
PATCH_W_COUNT = 3
PATCH_COUNT = PATCH_H_COUNT * PATCH_W_COUNT
# It's supposed to be in the root folder, which is also pwd when running, if the
# instructions in the README are followed. Hence not a flag.
PERMUTATION_PATH = 'permutations_100_max.bin'
def apply_model(image_fn,
is_training,
num_outputs,
perms,
make_signature=False):
"""Creates the patch based model output from patches representations.
Args:
image_fn: function returns image tensor.
is_training: is training flag used for batch norm and drop out.
num_outputs: number of output classes.
perms: numpy array with shape [m, k], element range [0, PATCH_COUNT). k
stands for the patch numbers used in a permutation. m stands forthe number
of permutations. Each permutation is used to concat the patch inputs
[n*PATCH_COUNT, h, w, c] into tensor with shape [n*m, h, w, c*k].
make_signature: whether to create signature for hub module.
Returns:
out: output tensor with shape [n*m, 1, 1, num_outputs].
Raises:
ValueError: An error occurred when the architecture is unknown.
"""
images = image_fn()
net = get_net(num_classes=FLAGS.get_flag_value('embed_dim', 1000))
out, end_points = net(images, is_training,
weight_decay=FLAGS.get_flag_value('weight_decay', 1e-4))
print(end_points)
if not make_signature:
out = permutate_and_concat_batch_patches(out, perms)
out = fully_connected(out, num_outputs, is_training=is_training)
out = tf.squeeze(out, [1, 2])
if make_signature:
hub.add_signature(inputs={'image': images}, outputs=out)
hub.add_signature(
name='representation',
inputs={'image': images},
outputs=end_points)
return out
def image_grid(images, ny, nx, padding=0):
"""Create a batch of image grids from a batch of images.
Args:
images: A batch of patches (B,N,H,W,C)
ny: vertical number of images
nx: horizontal number of images
padding: number of zeros between images, if any.
Returns:
A tensor batch of image grids shaped (B,H*ny,W*nx,C), although that is a
simplifying lie: if padding is used h/w will be different.
"""
with tf.name_scope('grid_image'):
if padding:
padding = [padding, padding]
images = tf.pad(images, [[0, 0], [0, 0], padding, padding, [0, 0]])
return tf.concat([
tf.concat([images[:, y * nx + x] for x in range(nx)], axis=-2)
for y in range(ny)], axis=-3)
def creates_estimator_model(images, labels, perms, num_classes, mode):
"""Creates EstimatorSpec for the patch based self supervised models.
Args:
images: images
labels: self supervised labels (class indices)
perms: patch permutations
num_classes: number of different permutations
mode: model's mode: training, eval or prediction
Returns:
EstimatorSpec
"""
print(' +++ Mode: %s, images: %s, labels: %s' % (mode, images, labels))
images = tf.reshape(images, shape=[-1] + images.get_shape().as_list()[-3:])
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
with tf.variable_scope('module'):
image_fn = lambda: images
logits = apply_model(
image_fn=image_fn,
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
num_outputs=num_classes,
perms=perms,
make_signature=False)
else:
input_shape = utils.str2intlist(
FLAGS.get_flag_value('serving_input_shape', 'None,None,None,3'))
image_fn = lambda: tf.placeholder( # pylint: disable=g-long-lambda
shape=input_shape,
dtype=tf.float32)
apply_model_function = functools.partial(
apply_model,
image_fn=image_fn,
num_outputs=num_classes,
perms=perms,
make_signature=True)
tf_hub_module_spec = hub.create_module_spec(
apply_model_function, [(utils.TAGS_IS_TRAINING, {
'is_training': True
}), (set(), {
'is_training': False
})],
drop_collections=['summaries'])
tf_hub_module = hub.Module(tf_hub_module_spec, trainable=False, tags=set())
hub.register_module_for_export(tf_hub_module, export_name='module')
logits = tf_hub_module(images)
return make_estimator(mode, predictions=logits)
# build loss and accuracy
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
eval_metrics = (
lambda labels, logits: { # pylint: disable=g-long-lambda
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=-1))},
[labels, logits])
return make_estimator(mode, loss, eval_metrics, logits)
def fully_connected(inputs,
num_classes=100,
weight_decay=5e-4,
keep_prob=0.5,
is_training=True):
"""Two layers fully connected network copied from Alexnet fc7-fc8."""
net = inputs
_, _, w, _ = net.get_shape().as_list()
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay)
net = tf.layers.conv2d(
net,
filters=4096,
kernel_size=w,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.constant_initializer(0.1),
kernel_regularizer=kernel_regularizer)
net = tf.layers.batch_normalization(
net, momentum=0.997, epsilon=1e-5, fused=None, training=is_training)
net = tf.nn.relu(net)
if is_training:
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.layers.conv2d(
net,
filters=num_classes,
kernel_size=1,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=kernel_regularizer)
return net
def generate_patch_locations():
"""Generates relative patch locations."""
perms = np.array([(i, 4) for i in range(9) if i != 4])
return perms, len(perms)
def load_permutations():
"""Loads a set of pre-defined permutations."""
with tf.gfile.Open(PERMUTATION_PATH, 'rb') as f:
int32_size = 4
s = f.read(int32_size * 2)
[num_perms, c] = struct.unpack('<ll', s)
perms = []
for _ in range(num_perms * c):
s = f.read(int32_size)
x = struct.unpack('<l', s)
perms.append(x[0])
perms = np.reshape(perms, [num_perms, c])
# The bin file used index [1,9] for permutation, updated to [0, 8] for index.
perms = perms - 1
assert np.min(perms) == 0 and np.max(perms) == PATCH_COUNT - 1
return perms, num_perms
def permutate_and_concat_image_patches(patch_embeddings, perms):
"""Permutates patches from an image according to permutations.
Args:
patch_embeddings: input tensor with shape [PATCH_COUNT, h, w, c], where
PATCH_COUNT is the patch number per image.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [m, h, w, c*k].
"""
_, h, w, c = patch_embeddings.get_shape().as_list()
if isinstance(perms, np.ndarray):
num_perms, perm_len = perms.shape
else:
num_perms, perm_len = perms.get_shape().as_list()
def permutate_patch(perm):
permed = tf.gather(patch_embeddings, perm, axis=0)
concat_tensor = tf.transpose(permed, perm=[1, 2, 3, 0])
concat_tensor = tf.reshape(
concat_tensor, shape=[-1, h, w, perm_len * c])
return concat_tensor
permed_patches = tf.stack([
permutate_patch(perms[i]) for i in range(num_perms)
])
return permed_patches
def permutate_and_concat_batch_patches(batch_patch_embeddings, perms):
"""Permutates patches from a mini batch according to permutations.
Args:
batch_patch_embeddings: input tensor with shape [n*PATCH_COUNT, h, w, c] or
[n*PATCH_COUNT, c], where PATCH_COUNT is the patch number per image
and n is the number of images in this mini batch.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [n*m, h, w, c*k].
"""
print(' +++ permutate patches input: %s' % batch_patch_embeddings)
if len(batch_patch_embeddings.get_shape().as_list()) == 4:
_, h, w, c = batch_patch_embeddings.get_shape().as_list()
elif len(batch_patch_embeddings.get_shape().as_list()) == 2:
_, c = batch_patch_embeddings.get_shape().as_list()
h, w = (1, 1)
else:
raise ValueError('Unexpected batch_patch_embeddings shape: %s' %
batch_patch_embeddings.get_shape().as_list())
patches = tf.reshape(batch_patch_embeddings, shape=[-1, PATCH_COUNT, h, w, c])
patches = tf.stack([
permutate_and_concat_image_patches(patches[i], perms)
for i in range(patches.get_shape().as_list()[0])
])
patches = tf.reshape(patches, shape=[-1, h, w, perms.shape[1] * c])
print(' +++ permutate patches output: %s' % batch_patch_embeddings)
return patches
def get_patch_representation(
images,
hub_module,
patch_preprocess='crop_patches,standardization',
is_training=False,
target_features=9000,
pooling_fn=None,
combine_patches='concat',
signature='representation'):
"""Permutates patches from a mini batch according to permutations.
Args:
images: input images, can be full image (NHWC) or image patchs (NPHWC).
hub_module: hub module.
patch_preprocess: preprocess applied to the image. Note that preprocess may
require setting parameters in the FLAGS.config file.
is_training: is training mode.
target_features: target feature dimension. Note that the features might
exceed this number if there're too many channels.
pooling_fn: pooling method applied to the features.
combine_patches: one of {'concat', 'max_pool', 'avg_pool'}.
signature: signature for the hub module.
Returns:
out: output representation tensors.
Raises:
ValueError: unsupported combine_patches.
"""
if patch_preprocess:
preprocess_fn = preprocess.get_preprocess_fn(patch_preprocess, is_training)
images = preprocess_fn({'image': images})['image']
assert len(images.get_shape().as_list()) == 5, 'Shape must match NPHWC.'
_, num_of_patches, h, w, c = images.get_shape().as_list()
images = tf.reshape(images, shape=[-1, h, w, c])
out_tensors = hub_module(
images,
signature=signature,
as_dict=True)
if combine_patches == 'concat':
target_features = target_features // num_of_patches
if pooling_fn is not None:
out_tensors = pooling_fn(out_tensors)
for k, t in out_tensors.iteritems():
if len(t.get_shape().as_list()) == 2:
t = t[:, None, None, :]
assert len(t.get_shape().as_list()) == 4, 'Unsupported rank %d' % len(
t.get_shape().as_list())
# Take patch-dimension out of batch-dimension: [NP]HWC -> NPHWC
t = tf.reshape(t, [-1, num_of_patches] + t.get_shape().as_list()[-3:])
if combine_patches == 'concat':
# [N, P, H, W, C] -> [N, H, W, P*C]
_, p, h, w, c = t.get_shape().as_list()
out_tensors[k] = tf.reshape(
tf.transpose(t, perm=[0, 2, 3, 4, 1]), tf.stack([-1, h, w, p * c]))
elif combine_patches == 'max_pool':
# Reduce max on P channel of NPHWC.
out_tensors[k] = tf.reduce_max(t, axis=1)
elif combine_patches == 'avg_pool':
# Reduce mean on P channel of NPHWC.
out_tensors[k] = tf.reduce_mean(t, axis=1)
else:
raise ValueError(
'Unsupported combine patches method %s.' % combine_patches)
return out_tensors
| apache-2.0 | 1,947,931,036,470,123,300 | 33.008021 | 80 | 0.652174 | false |
QISKit/qiskit-sdk-py | test/python/quantum_info/test_operators.py | 1 | 1485 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Quick program to test the quantum operators modules."""
import unittest
import numpy as np
from scipy.linalg import expm
from qiskit.quantum_info import process_fidelity, Pauli
from qiskit.test import QiskitTestCase
class TestOperators(QiskitTestCase):
"""Tests for qi.py"""
def test_process_fidelity(self):
"""Test the process_fidelity function"""
unitary1 = Pauli(label='XI').to_matrix()
unitary2 = np.kron(np.array([[0, 1], [1, 0]]), np.eye(2))
process_fidelity(unitary1, unitary2)
self.assertAlmostEqual(process_fidelity(unitary1, unitary2), 1.0, places=7)
theta = 0.2
unitary1 = expm(-1j*theta*Pauli(label='X').to_matrix()/2)
unitary2 = np.array([[np.cos(theta/2), -1j*np.sin(theta/2)],
[-1j*np.sin(theta/2), np.cos(theta/2)]])
self.assertAlmostEqual(process_fidelity(unitary1, unitary2), 1.0, places=7)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,324,461,908,390,927,000 | 34.357143 | 83 | 0.672054 | false |
DevynCJohnson/Pybooster | pylib/code_interpreter.py | 1 | 5388 | #!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Interpret various computer languages using installed interpreters.
@file code_interpreter.py
@package pybooster.code_interpreter
@version 2019.07.14
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from subprocess import getoutput # nosec
from sys import stdout
__all__: list = [
# CLISP #
r'execclispfile',
# COFFEESCRIPT #
r'execcoffeescript',
# JAVASCRIPT #
r'execjs',
r'execjsfile',
# LUA #
r'execlua',
r'execluafile',
# PERL #
r'execperl',
r'execperlfile',
r'initperl',
# PHP #
r'execphp',
r'execphpfile',
# RUBY #
r'execruby',
r'execrubyfile',
# SCALA #
r'execscala',
r'execscala',
# SHELL #
r'execsh',
r'execshfile',
r'initsh'
]
# CLISP #
def execclispfile(_filename: str) -> str:
"""Execute a CLisp file given as a str and return the output as a str."""
return getoutput(r'clisp ' + _filename)
# COFFEESCRIPT #
def execcoffeescript(_code: str) -> str:
"""Execute Coffeescript code given as a str and return the output as a str."""
return getoutput('coffeescript --eval \'' + _code.replace('\'', '\\\'') + '\'')
# JAVASCRIPT #
def execjs(_code: str) -> str:
"""Execute JavaScript code given as a str and return the output as a str."""
return getoutput('jsc -e \'' + _code.replace('\'', '\\\'') + '\'')
def execjsfile(_filename: str) -> str:
"""Execute a JavaScript file given as a str and return the output as a str."""
return getoutput(r'jsc -e ' + _filename)
# LUA #
def execlua(_code: str) -> str:
"""Execute Lua code given as a str and return the output as a str."""
return getoutput('lua -e \'' + _code.replace('\'', '\\\'') + '\'')
def execluafile(_filename: str) -> str:
"""Execute a Lua script given as a str and return the output as a str."""
return getoutput(r'lua ' + _filename)
# PERL #
def execperl(_code: str) -> str:
"""Execute Perl code given as a str and return the output as a str."""
return getoutput('perl -e \'' + _code.replace('\'', '\\\'') + '\'')
def execperlfile(_filename: str) -> str:
"""Execute a Perl script given as a str and return the output as a str."""
return getoutput(r'perl ' + _filename)
def initperl() -> None:
"""Run a Perl REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Perl > ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('perl -e \'' + _input + '\'') + '\n')
# PHP #
def execphp(_code: str) -> str:
"""Execute PHP code given as a str and return the output as a str."""
return getoutput('php -r \'' + _code.replace('\'', '\\\'') + '\'')
def execphpfile(_filename: str) -> str:
"""Execute a PHP script given as a str and return the output as a str."""
return getoutput(r'php -f ' + _filename)
# RUBY #
def execruby(_code: str) -> str:
"""Execute Ruby code given as a str and return the output as a str."""
return getoutput('ruby -e \'' + _code.replace('\'', '\\\'') + '\'')
def execrubyfile(_filename: str) -> str:
"""Execute a Ruby script given as a str and return the output as a str."""
return getoutput(r'ruby ' + _filename)
# SCALA #
def execscala(_code: str) -> str:
"""Execute Scala code given as a str and return the output as a str."""
return getoutput('scala -e \'' + _code.replace('\'', '\\\'') + '\'')
def execscalafile(_filename: str) -> str:
"""Execute a Scala file given as a str and return the output as a str."""
return getoutput(r'scala ' + _filename)
# SHELL #
def execsh(_code: str) -> str:
"""Execute Shell code given as a str and return the output as a str."""
return getoutput('sh -c \'' + _code.replace('\'', '\\\'') + '\'')
def execshfile(_filename: str) -> str:
"""Execute a Shell script given as a str and return the output as a str."""
return getoutput(r'sh ' + _filename)
def initsh() -> None:
"""Run a shell REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Shell: $ ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('sh -c \'' + _input + '\'') + '\n')
| lgpl-3.0 | 9,036,564,200,472,079,000 | 27.209424 | 150 | 0.627506 | false |
ArcherSys/ArcherSys | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py | 1 | 35822 | <<<<<<< HEAD
<<<<<<< HEAD
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
include_dir = ''
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir and not include_dir in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
if not include_dir in gyp_includes_set:
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, target_dicts, data):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return value
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
cc_target = GetCompilerPath(target_list, target_dicts, data)
if cc_target:
command = shlex.split(cc_target)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
=======
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
include_dir = ''
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir and not include_dir in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
if not include_dir in gyp_includes_set:
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, target_dicts, data):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return value
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
cc_target = GetCompilerPath(target_list, target_dicts, data)
if cc_target:
command = shlex.split(cc_target)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
include_dir = ''
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir and not include_dir in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
if not include_dir in gyp_includes_set:
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, target_dicts, data):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return value
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
cc_target = GetCompilerPath(target_list, target_dicts, data)
if cc_target:
command = shlex.split(cc_target)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -5,302,436,504,743,628,000 | 37.51828 | 80 | 0.668249 | false |
Azure/azure-sdk-for-python | sdk/translation/azure-ai-translation-document/tests/test_supported_formats.py | 1 | 1419 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import functools
from testcase import DocumentTranslationTest
from preparer import DocumentTranslationPreparer, DocumentTranslationClientPreparer as _DocumentTranslationClientPreparer
from azure.ai.translation.document import DocumentTranslationClient
DocumentTranslationClientPreparer = functools.partial(_DocumentTranslationClientPreparer, DocumentTranslationClient)
class TestSupportedFormats(DocumentTranslationTest):
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_supported_document_formats(self, client):
# get supported formats
supported_doc_formats = client.get_supported_document_formats()
self.assertIsNotNone(supported_doc_formats)
# validate
for doc_format in supported_doc_formats:
self._validate_format(doc_format)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_supported_glossary_formats(self, client):
# get supported formats
supported_glossary_formats = client.get_supported_glossary_formats()
self.assertIsNotNone(supported_glossary_formats)
# validate
for glossary_format in supported_glossary_formats:
self._validate_format(glossary_format) | mit | 510,935,908,131,361,900 | 40.764706 | 121 | 0.727273 | false |
dcluna/screenkey | setup.py | 1 | 1475 | #!/usr/bin/env python
from setuptools import setup
setup(name='screenkey', version='0.5',
description='A screencast tool to display keys',
author='Pablo Seminario',
author_email='[email protected]',
maintainer='Yuri D\'Elia',
maintainer_email='[email protected]',
license='GPLv3+',
keywords='screencast keyboard keys',
url='https://github.com/wavexx/screenkey',
download_url='https://github.com/wavexx/screenkey/releases',
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Education',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Video :: Capture'],
long_description="""
Screenkey is a useful tool for presentations or screencasts.
Inspired by ScreenFlick and initially based on the key-mon project code.
""",
scripts=['screenkey'],
packages=['Screenkey'],
data_files=[('share/applications', ['data/screenkey.desktop']),
('share/doc/screenkey', ['README.rst', 'NEWS.rst'])],
)
| gpl-3.0 | -1,585,662,621,625,723,100 | 39.972222 | 96 | 0.583051 | false |
ryfeus/lambda-packs | Opencv_pil/source36/numpy/core/tests/test_shape_base.py | 1 | 24251 | from __future__ import division, absolute_import, print_function
import pytest
import sys
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns
)
from numpy.compat import long
class TestAtleast1d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(long(3)).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack(object):
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
if sys.version_info.major > 2:
# map returns a list on Python 2
with assert_warns(FutureWarning):
hstack(map(lambda x: x, np.ones((3, 2))))
class TestVstack(object):
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
vstack((np.arange(3) for _ in range(2)))
class TestConcatenate(object):
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None)
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
def test_out_dtype(self):
out = np.empty(4, np.float32)
res = concatenate((array([1, 2]), array([3, 4])), out=out)
assert_(out is res)
out = np.empty(4, np.complex64)
res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
assert_(out is res)
# invalid cast
out = np.empty(4, np.int32)
assert_raises(TypeError, concatenate,
(array([0.1, 0.2]), array([0.3, 0.4])), out=out)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# generator is deprecated
with assert_warns(FutureWarning):
result = stack((x for x in range(3)))
assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock(object):
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
# 2D
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike(object):
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
| mit | -6,761,586,550,664,110,000 | 33.545584 | 87 | 0.478619 | false |
auvsi-suas/interop | server/auvsi_suas/views/odlcs.py | 1 | 16208 | """Odlcs view."""
from PIL import Image
import io
import json
import logging
import os
import os.path
import re
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.models.odlc import Odlc
from auvsi_suas.proto import interop_admin_api_pb2
from auvsi_suas.proto import interop_api_pb2
from auvsi_suas.views.decorators import require_login
from auvsi_suas.views.decorators import require_superuser
from auvsi_suas.views.json import ProtoJsonEncoder
from django.contrib.auth.models import User
from django.core.files.images import ImageFile
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseNotFound
from django.utils.decorators import method_decorator
from django.views.generic import View
from google.protobuf import json_format
from sendfile import sendfile
logger = logging.getLogger(__name__)
ALPHANUMERIC_RE = re.compile(r"^[A-Z0-9]$")
ODLC_MAX = 20 # Limit in the rules.
ODLC_BUFFER = 2 # Buffer for swaps.
ODLC_UPLOAD_LIMIT = (ODLC_MAX + ODLC_BUFFER) * 2 # Account for auto/not.
def odlc_to_proto(odlc):
"""Converts an ODLC into protobuf format."""
odlc_proto = interop_api_pb2.Odlc()
odlc_proto.id = odlc.pk
odlc_proto.mission = odlc.mission.pk
odlc_proto.type = odlc.odlc_type
if odlc.location is not None:
odlc_proto.latitude = odlc.location.latitude
odlc_proto.longitude = odlc.location.longitude
if odlc.orientation is not None:
odlc_proto.orientation = odlc.orientation
if odlc.shape is not None:
odlc_proto.shape = odlc.shape
if odlc.alphanumeric:
odlc_proto.alphanumeric = odlc.alphanumeric
if odlc.shape_color is not None:
odlc_proto.shape_color = odlc.shape_color
if odlc.alphanumeric_color is not None:
odlc_proto.alphanumeric_color = odlc.alphanumeric_color
if odlc.description:
odlc_proto.description = odlc.description
odlc_proto.autonomous = odlc.autonomous
return odlc_proto
def validate_odlc_proto(odlc_proto):
"""Validates ODLC proto, raising ValueError if invalid."""
if not odlc_proto.HasField('mission'):
raise ValueError('ODLC mission is required.')
try:
MissionConfig.objects.get(pk=odlc_proto.mission)
except MissionConfig.DoesNotExist:
raise ValueError('Mission for ODLC does not exist.')
if not odlc_proto.HasField('type'):
raise ValueError('ODLC type is required.')
if odlc_proto.HasField('latitude') != odlc_proto.HasField('longitude'):
raise ValueError('Must specify both latitude and longitude.')
if odlc_proto.HasField('latitude') and (odlc_proto.latitude < -90
or odlc_proto.latitude > 90):
raise ValueError('Invalid latitude "%f", must be -90 <= lat <= 90' %
odlc_proto.latitude)
if odlc_proto.HasField('longitude') and (odlc_proto.longitude < -180
or odlc_proto.longitude > 180):
raise ValueError('Invalid longitude "%s", must be -180 <= lat <= 180' %
odlc_proto.longitude)
if (odlc_proto.HasField('alphanumeric')
and ALPHANUMERIC_RE.fullmatch(odlc_proto.alphanumeric) is None):
raise ValueError('Alphanumeric is invalid.')
def update_odlc_from_proto(odlc, odlc_proto):
"""Sets fields of the ODLC from the proto format."""
odlc.mission_id = odlc_proto.mission
odlc.odlc_type = odlc_proto.type
if odlc_proto.HasField('latitude') and odlc_proto.HasField('longitude'):
if odlc.location is None:
l = GpsPosition(latitude=odlc_proto.latitude,
longitude=odlc_proto.longitude)
l.save()
odlc.location = l
else:
odlc.location.latitude = odlc_proto.latitude
odlc.location.longitude = odlc_proto.longitude
odlc.location.save()
else:
# Don't delete underlying GPS position in case it's shared by admin.
# Just unreference it.
odlc.location = None
if odlc_proto.HasField('orientation'):
odlc.orientation = odlc_proto.orientation
else:
odlc.orientation = None
if odlc_proto.HasField('shape'):
odlc.shape = odlc_proto.shape
else:
odlc.shape = None
if odlc_proto.HasField('alphanumeric'):
odlc.alphanumeric = odlc_proto.alphanumeric
else:
odlc.alphanumeric = ''
if odlc_proto.HasField('shape_color'):
odlc.shape_color = odlc_proto.shape_color
else:
odlc.shape_color = None
if odlc_proto.HasField('alphanumeric_color'):
odlc.alphanumeric_color = odlc_proto.alphanumeric_color
else:
odlc.alphanumeric_color = None
if odlc_proto.HasField('description'):
odlc.description = odlc_proto.description
else:
odlc.description = ''
if odlc_proto.HasField('autonomous'):
odlc.autonomous = odlc_proto.autonomous
else:
odlc.autonomous = False
class Odlcs(View):
"""POST new odlc."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(Odlcs, self).dispatch(*args, **kwargs)
def get(self, request):
# Restrict ODLCs to those for user, and optionally a mission.
odlcs = Odlc.objects.filter(user=request.user)
if 'mission' in request.GET:
try:
mission_id = int(request.GET['mission'])
except:
return HttpResponseBadRequest('Provided invalid mission ID.')
odlcs = odlcs.filter(mission=mission_id)
# Limit serving to 100 odlcs to prevent slowdown and isolation problems.
odlcs = odlcs.all()[:100]
odlc_protos = [odlc_to_proto(o) for o in odlcs]
return HttpResponse(json.dumps(odlc_protos, cls=ProtoJsonEncoder),
content_type="application/json")
def post(self, request):
odlc_proto = interop_api_pb2.Odlc()
try:
json_format.Parse(request.body, odlc_proto)
except Exception as e:
return HttpResponseBadRequest(
'Failed to parse request. Error: %s' % str(e))
# Validate ODLC proto fields.
try:
validate_odlc_proto(odlc_proto)
except ValueError as e:
return HttpResponseBadRequest(str(e))
# Cannot set ODLC ID on a post.
if odlc_proto.HasField('id'):
return HttpResponseBadRequest(
'Cannot specify ID for POST request.')
# Check that there aren't too many ODLCs uploaded already.
odlc_count = Odlc.objects.filter(user=request.user).filter(
mission=odlc_proto.mission).count()
if odlc_count >= ODLC_UPLOAD_LIMIT:
return HttpResponseBadRequest(
'Reached upload limit for ODLCs for mission.')
# Build the ODLC object from the request.
odlc = Odlc()
odlc.user = request.user
update_odlc_from_proto(odlc, odlc_proto)
odlc.save()
return HttpResponse(json_format.MessageToJson(odlc_to_proto(odlc)),
content_type="application/json")
def find_odlc(request, pk):
"""Lookup requested Odlc model.
Only the request's user's odlcs will be returned.
Args:
request: Request object
pk: Odlc primary key
Raises:
Odlc.DoesNotExist: pk not found
ValueError: Odlc not owned by this user.
"""
odlc = Odlc.objects.get(pk=pk)
# We only let users get their own odlcs, unless a superuser.
if odlc.user == request.user or request.user.is_superuser:
return odlc
else:
raise ValueError("Accessing odlc %d not allowed" % pk)
class OdlcsId(View):
"""Get or update a specific odlc."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(OdlcsId, self).dispatch(*args, **kwargs)
def get(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
return HttpResponse(json_format.MessageToJson(odlc_to_proto(odlc)),
content_type="application/json")
def put(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
odlc_proto = interop_api_pb2.Odlc()
try:
json_format.Parse(request.body, odlc_proto)
except Exception as e:
return HttpResponseBadRequest(
'Failed to parse request. Error: %s' % str(e))
# Validate ODLC proto fields.
try:
validate_odlc_proto(odlc_proto)
except ValueError as e:
return HttpResponseBadRequest(str(e))
# ID provided in proto must match object.
if odlc_proto.HasField('id') and odlc_proto.id != odlc.pk:
return HttpResponseBadRequest('ID in request does not match URL.')
# Update the ODLC object from the request.
update_odlc_from_proto(odlc, odlc_proto)
odlc.update_last_modified()
odlc.save()
return HttpResponse(json_format.MessageToJson(odlc_to_proto(odlc)),
content_type="application/json")
def delete(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
# Remember the thumbnail path so we can delete it from disk.
thumbnail = odlc.thumbnail.path if odlc.thumbnail else None
odlc.delete()
if thumbnail:
try:
os.remove(thumbnail)
except OSError as e:
logger.warning("Unable to delete thumbnail: %s", e)
return HttpResponse("Odlc deleted.")
class OdlcsIdImage(View):
"""Get or add/update odlc image."""
@method_decorator(require_login)
def dispatch(self, *args, **kwargs):
return super(OdlcsIdImage, self).dispatch(*args, **kwargs)
def get(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
if not odlc.thumbnail or not odlc.thumbnail.name:
return HttpResponseNotFound('Odlc %s has no image' % pk)
# Tell sendfile to serve the thumbnail.
return sendfile(request, odlc.thumbnail.path)
def post(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
# Request body is the file
f = io.BytesIO(request.body)
# Verify that this is a valid image
try:
i = Image.open(f)
i.verify()
except IOError as e:
return HttpResponseBadRequest(str(e))
if i.format not in ['JPEG', 'PNG']:
return HttpResponseBadRequest(
'Invalid image format %s, only JPEG and PNG allowed' %
(i.format))
# Clear thumbnail review state.
if odlc.thumbnail_approved is not None:
odlc.thumbnail_approved = None
# Save the thumbnail, note old path.
old_path = odlc.thumbnail.path if odlc.thumbnail else None
odlc.thumbnail.save('%d.%s' % (odlc.pk, i.format), ImageFile(f))
# ODLC has been modified.
odlc.update_last_modified()
odlc.save()
# Check whether old thumbnail should be deleted. Ignore errors.
if old_path and odlc.thumbnail.path != old_path:
try:
os.remove(old_path)
except OSError as e:
logger.warning("Unable to delete old thumbnail: %s", e)
return HttpResponse("Image uploaded.")
def put(self, request, pk):
"""We simply make PUT do the same as POST."""
return self.post(request, pk)
def delete(self, request, pk):
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
if not odlc.thumbnail or not odlc.thumbnail.path:
return HttpResponseNotFound('Odlc %s has no image' % pk)
# Clear thumbnail review state.
if odlc.thumbnail_approved is not None:
odlc.thumbnail_approved = None
odlc.save()
path = odlc.thumbnail.path
# Remove the thumbnail from the odlc.
# Note that this does not delete it from disk!
odlc.thumbnail.delete()
try:
os.remove(path)
except OSError as e:
logger.warning("Unable to delete thumbnail: %s", e)
return HttpResponse("Image deleted.")
def odlc_to_review_proto(odlc):
"""Converts an ODLC into a review proto."""
review_proto = interop_admin_api_pb2.OdlcReview()
review_proto.odlc.CopyFrom(odlc_to_proto(odlc))
review_proto.last_modified_timestamp = odlc.last_modified_time.isoformat()
if odlc.thumbnail_approved is not None:
review_proto.thumbnail_approved = odlc.thumbnail_approved
if odlc.description_approved is not None:
review_proto.description_approved = odlc.description_approved
return review_proto
def update_odlc_from_review_proto(odlc, review_proto):
"""Sets fields of the ODLC from the review."""
if review_proto.HasField('thumbnail_approved'):
odlc.thumbnail_approved = review_proto.thumbnail_approved
else:
odlc.thumbnail_approved = False
if review_proto.HasField('description_approved'):
odlc.description_approved = review_proto.description_approved
else:
odlc.description_approved = False
class OdlcsAdminReview(View):
"""Get or update review status for odlcs."""
@method_decorator(require_superuser)
def dispatch(self, *args, **kwargs):
return super(OdlcsAdminReview, self).dispatch(*args, **kwargs)
def get(self, request):
"""Gets all of the odlcs ready for review."""
# Get all odlcs which have a thumbnail to review.
odlcs = [t for t in Odlc.objects.all() if t.thumbnail]
# Sort odlcs by last edit time.
odlcs.sort(key=lambda t: t.last_modified_time)
# Convert to review protos.
odlc_review_protos = [odlc_to_review_proto(odlc) for odlc in odlcs]
return HttpResponse(json.dumps(odlc_review_protos,
cls=ProtoJsonEncoder),
content_type="application/json")
def put(self, request, pk):
"""Updates the review status of a odlc."""
review_proto = interop_admin_api_pb2.OdlcReview()
try:
json_format.Parse(request.body, review_proto)
except Exception:
return HttpResponseBadRequest('Failed to parse review proto.')
try:
odlc = find_odlc(request, int(pk))
except Odlc.DoesNotExist:
return HttpResponseNotFound('Odlc %s not found' % pk)
except ValueError as e:
return HttpResponseForbidden(str(e))
update_odlc_from_review_proto(odlc, review_proto)
odlc.save()
return HttpResponse(json_format.MessageToJson(
odlc_to_review_proto(odlc)),
content_type="application/json")
| apache-2.0 | -4,131,356,739,532,436,000 | 34.234783 | 80 | 0.627159 | false |
CoinEXchange/CoinX | coindb/db.py | 1 | 2156 | import peewee
from peewee import *
DEBUG = True
db = MySQLDatabase(None)
class COINModel(Model):
class Meta:
database = db
class Order(COINModel):
class Meta:
db_table = 'order'
## CREATE TABLE `order` (
## `address` varchar(40) NOT NULL DEFAULT '',
## `btc_address` varchar(40) DEFAULT NULL,
## `ltc_address` varchar(40) DEFAULT NULL,
## `order_created` timestamp NULL DEFAULT '0000-00-00 00:00:00',
## `order_type` varchar(1) DEFAULT NULL,
## `price` bigint(11) DEFAULT NULL,
## `amount` bigint(20) DEFAULT NULL,
## `amount_settled` bigint(20) DEFAULT NULL,
## `valid_until` timestamp NULL DEFAULT '0000-00-00 00:00:00',
## `confirmations` int(11) DEFAULT NULL,
## PRIMARY KEY (`address`)
## ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
## CREATE INDEX created ON btcltc (order_created);
## CREATE VIEW orders AS SELECT * FROM btcltc WHERE confirmations > '5';
send_to_address = CharField(primary_key=True,max_length=40)
sender_address = CharField(max_length=40, default="")
receiver_address = CharField(max_length=40, default="")
created = DateTimeField(default='0000-00-00 00:00:00')
active = DateTimeField(default='0000-00-00 00:00:00')
last_update = DateTimeField(default='0000-00-00 00:00:00')
order_type = IntegerField(default=0)
status = IntegerField(default=0)
source = CharField(max_length=3, default=' ')
target = CharField(max_length=3, default=' ')
amount = BigIntegerField(default=0)
amount_settled = BigIntegerField(default=0)
price_ask = DecimalField(default=0)
price_bought = DecimalField(default=0)
amount_ask = BigIntegerField(default=0)
amount_bought = BigIntegerField(default=0)
amount_send = BigIntegerField(default=0)
tid_send = CharField(max_length=40, default="")
valid_until = DateTimeField(default='0000-00-00 00:00:00')
confirmations = IntegerField(default=0)
# status values
STATUS_POOL_FREE = 0
STATUS_PREORDER = 100
STATUS_ACTIVE = 200
STATUS_SETTLED = 300
STATUS_TOPAY = 400
STATUS_PAYED = 500
STATUS_CONFIRMED = 550
STATUS_DELETE = 999
| gpl-2.0 | -7,011,038,118,931,080,000 | 31.179104 | 72 | 0.673469 | false |
dario-chiappetta/Due | due/test_event.py | 1 | 2458 | import unittest
import tempfile
import os
from due.persistence import serialize, deserialize
from due.models.dummy import DummyAgent
from due.event import *
from due.action import Action, RecordedAction
from datetime import datetime
T_0 = datetime(2018, 1, 1, 12, 0, 0, 0)
class TestEvent(unittest.TestCase):
def test_mark_acted(self):
e1 = Event(Event.Type.Utterance, T_0, None, "hello there")
self.assertIsNone(e1.acted)
now = datetime.now()
e1.mark_acted()
self.assertIsNotNone(e1.acted)
self.assertGreaterEqual(e1.acted, now)
self.assertLessEqual(e1.acted, datetime.now())
e1 = Event(Event.Type.Utterance, T_0, None, "hello there")
e1.mark_acted(datetime(2018, 2, 4, 18, 5, 25, 261308))
self.assertEqual(e1.acted, datetime(2018, 2, 4, 18, 5, 25, 261308))
def test_clone(self):
e1 = Event(Event.Type.Utterance, T_0, None, "hello there")
e2 = e1.clone()
self.assertEqual(e1, e2)
e1 = Event(Event.Type.Utterance, T_0, None, "hello there")
e1.mark_acted()
e2 = e1.clone()
self.assertEqual(e1, e2)
self.assertIsNone(e2.acted)
def test_equal(self):
a = DummyAgent('Alice')
e0 = Event(Event.Type.Utterance, T_0, None, "hello there")
e1 = Event(Event.Type.Utterance, T_0, None, "hello there")
e2 = Event(Event.Type.Action, T_0, None, "hello there")
e3 = Event(Event.Type.Utterance, T_0, None, "general Kenobi!")
e4 = Event(Event.Type.Utterance, datetime.now(), None, "hello there")
e5 = Event(Event.Type.Utterance, T_0, a.id, "hello there")
self.assertEqual(e0, e1)
self.assertNotEqual(e0, e3)
self.assertNotEqual(e0, e4)
self.assertNotEqual(e0, e5)
def test_event_save(self):
a = DummyAgent('Alice')
now = datetime.now()
e = Event(Event.Type.Utterance, now, a.id, "hello there")
test_dir = tempfile.mkdtemp()
test_path = os.path.join(test_dir, 'test_event_save.pkl')
serialize(e.save(), test_path)
loaded_e = Event.load(deserialize(test_path))
self.assertEqual(loaded_e[0], Event.Type.Utterance)
self.assertEqual(loaded_e[1], now)
self.assertEqual(loaded_e[2], a.id)
self.assertEqual(loaded_e[3], 'hello there')
def test_event_save_action(self):
"""Save and load an Action event that contains an object payload"""
a = RecordedAction()
event = Event(Event.Type.Action, datetime.now(), 'fake-agent-id', a)
saved_event = event.save()
loaded_event = Event.load(saved_event)
assert event == loaded_event
assert isinstance(loaded_event.payload, Action)
| gpl-3.0 | 3,232,078,789,086,964,000 | 30.922078 | 71 | 0.70057 | false |
dbroudy/django-apollo | apollo/views.py | 1 | 2271 | from django.contrib.sites.models import get_current_site
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseBadRequest
from django.shortcuts import render, get_object_or_404
import json
import re
from apollo.models import Page, Button, Survey, SurveyAnswer, Answer
from apollo.forms import SurveyForm, SurveyAnswerFormSet
_content_idx = re.compile('^(.+)\[(\d+)\]$')
def page(request, slug):
page = get_object_or_404(Page, slug=slug, site=get_current_site(request))
content = dict()
arrays = dict()
for c in page.content.all():
m = _content_idx.match(c.key)
if m:
base = m.group(1)
idx = int(m.group(2))
if not base in arrays:
arrays[base] = list()
l = len(arrays[base])
if idx >= l:
arrays[base] = arrays[base] + [''] * (idx-l+1)
arrays[base][idx] = c.content
else:
content[c.key] = c.content
for k,a in arrays.items():
content[k] = a
#content = dict((c.key, c.content) for c in page.content.all())
return render(request, page.template, {
'content': content,
'buttons': page.buttons.all(),
'button_width': int(12 / page.buttons.count()),
})
def register(request, button_id):
button = Button.objects.get(id=button_id)
button.clicks += 1
button.save()
survey = Survey(button=button)
survey.save()
for q in button.questions.all():
survey.answers.create(question=q)
form = SurveyForm(instance=survey)
formset = SurveyAnswerFormSet(instance=survey)
return render(request, 'apollo/confirm.html', {
'button': button,
'surveyform': form,
'answerform': formset
})
def questions(request, survey_id):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
survey = get_object_or_404(Survey, id=survey_id)
form = SurveyForm(request.POST, instance=survey)
if form.is_valid():
form.save()
formset = SurveyAnswerFormSet(request.POST, instance=survey)
formset.save()
if not form.is_valid() or not formset.is_valid():
return render(request, 'apollo/forms.html', {
'surveyform': form,
'answerform': formset
}, status=202)
# return 200 when complete
return HttpResponse(status=200)
| mit | 8,743,457,726,549,913,000 | 27.3875 | 84 | 0.646852 | false |
spiralx/mypy | mypy/spiralx/rest.py | 1 | 1666 | #! python3
import logging
import requests
import re
from urllib.parse import urlsplit, urlunsplit, urljoin
_logger = logging.getLogger("spiralx.rest")
_logger.setLevel(logging.DEBUG)
# -------------------------------------------------------------
_has_scheme = lambda u: re.match(r"(?:https?:)?//", u) is not None
def slack_urlsplit(url, scheme="http"):
if not _has_scheme(url):
url = "//" + url
if url[-1] != "/":
url += "/"
return urlsplit(url, scheme=scheme)
# -------------------------------------------------------------
class RestApi:
"""
Wrapper around REST API requests.
>>> api = RestApi()
>>> api.base
http://localhost/
>>> api.get_url("photos")
http://localhost/photos
>>> api.get_url("photos", 1)
http://localhost/photos/1
"""
def __init__(self, url="http://localhost/"):
self._base = urlunsplit(slack_urlsplit(url))
@property
def base(self):
return self._base
def get_url(self, *params):
path = "/".join(str(p) for p in params)
return urljoin(self.base_url, path)
# -------------------------------------------------------------
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Make requests to a server with a REST API.")
parser.add_argument("-u", "--uri", default="localhost", help="URI of the REST API")
parser.add_argument("-t", "--host", default="localhost", help="")
parser.add_argument("params", nargs="*", type=int, default=8000, help="Parameters to pass to the request")
args = parser.parse_args()
print(args)
| mit | 7,442,002,649,318,131,000 | 23.865672 | 110 | 0.528812 | false |
chinapnr/fish_base | fishbase/fish_crypt.py | 1 | 7173 | # coding=utf-8
"""
``fish_crypt`` 包含的是一些加密、编码数据的函数,比如 MD5、SHA256 的计算。
原来这些方法属于 fish_common 模块, 因 fish_common 过于杂乱,故重新进行分类整理。
"""
# 2019.01.21 v1.1.6 created by Hu Jun
import hashlib
import hmac
import base64
# 2018.5.8 edit by David Yi, edit from Jia Chunying,#19026
# 2018.6.12 edit by Hu Jun, edit from Jia Chunying,#37
# 2018.10.28 edit by Hu Jun, #99
# 2019.01.06 edit by Hu Jun, #152
# 2019.01.21 v1.1.6 edit by Hu Jun, #200 move fish_common.FishMD5 to fish_crypt.FishMD5
class FishMD5(object):
"""
计算普通字符串和一般的文件,对于大文件采取逐步读入的方式,也可以快速计算;基于 Python 的 hashlib.md5() 进行封装和扩展;
举例如下::
print('--- md5 demo ---')
print('string md5:', GetMD5.string('hello world!'))
file_path = get_abs_filename_with_sub_path('test_conf', 'test_conf.ini')[1])
print('file md5:', GetMD5.file(file_path)
big_file_path = get_abs_filename_with_sub_path('test_conf', 'test_conf.ini')[1])
print('big file md5:', GetMD5.big_file(big_file_path)
print('string hmac_md5:', GetMD5.hmac_md5('hello world!', 'salt'))
print('---')
执行结果::
--- md5 demo ---
string md5: fc3ff98e8c6a0d3087d515c0473f8677
file md5: fb7528c9778b2377e30b0f7e4c26fef0
big file md5: fb7528c9778b2377e30b0f7e4c26fef0
string hmac_md5: 191f82804523bfdafe0188bbbddd6587
---
"""
@staticmethod
def string(s, salt=None):
"""
获取一个字符串的 MD5 值
:param:
* s: (string) 需要进行 hash 的字符串
* salt: (string) 随机字符串,默认为 None
:return:
* result: (string) 32 位小写 MD5 值
"""
m = hashlib.md5()
s = s.encode('utf-8') + salt.encode('utf-8') if salt is not None else s.encode('utf-8')
m.update(s)
result = m.hexdigest()
return result
@staticmethod
def file(filename):
"""
获取一个文件的 MD5 值
:param:
* filename: (string) 需要进行 hash 的文件名
:return:
* result: (string) 32位小写 MD5 值
"""
m = hashlib.md5()
with open(filename, 'rb') as f:
m.update(f.read())
result = m.hexdigest()
return result
@staticmethod
def big_file(filename):
"""
获取一个大文件的 MD5 值
:param:
* filename: (string) 需要进行 hash 的大文件路径
:return:
* result: (string) 32位小写 MD5 值
"""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
result = md5.hexdigest()
return result
@staticmethod
def hmac_md5(s, salt):
"""
获取一个字符串的 使用 salt 加密的 hmac MD5 值
:param:
* s: (string) 需要进行 hash 的字符串
* salt: (string) 随机字符串
:return:
* result: (string) 32位小写 MD5 值
"""
hmac_md5 = hmac.new(salt.encode('utf-8'), s.encode('utf-8'),
digestmod=hashlib.md5).hexdigest()
return hmac_md5
# v1.0.14 edit by Hu Jun, #59
# 2019.01.21 v1.1.6 edit by Hu Jun, #200 move fish_common.Base64 to fish_crypt.Base64
class FishBase64(object):
"""
计算返回文件和字符串的 base64 编码字符串
举例如下::
print('--- FishBase64 demo ---')
print('string base64:', FishBase64.string('hello world!'))
file_path = get_abs_filename_with_sub_path('test_conf', 'test_conf.ini')[1])
print('file base64:', FishBase64.file(file_path)
print('decode base64:', Base64.decode(b'aGVsbG8gd29ybGQ='))
print('---')
执行结果::
--- FishBase64 demo ---
string base64: b'aGVsbG8gd29ybGQ='
file base64: (b'IyEvYmluL2Jhc2gKCmNkIC9yb290L3d3dy9zaW5nbGVfcWEKCm5vaHVwIC9yb2
90L2FwcC9weXRob24zNjIvYmluL2d1bmljb3JuIC1jIGd1bmljb3JuLmNvbmYgc2luZ2xlX3NlcnZlcjphcHAK')
decode base64: b'hello world'
---
"""
@staticmethod
def string(s):
"""
获取一个字符串的 base64 值
:param:
* s: (string) 需要进行 base64 编码 的字符串
:return:
* (bytes) base64 编码结果
"""
return base64.b64encode(s.encode('utf-8'))
@staticmethod
def file(filename):
"""
获取一个文件的 base64 值
:param:
* filename: (string) 需要进行 base64 编码 文件路径
:return:
* (bytes) base64 编码结果
"""
with open(filename, 'rb') as f:
return base64.b64encode(f.read())
@staticmethod
def decode(s):
"""
获取 base64 解码结果
:param:
* filename: (string) 需要进行 base64 编码 文件路径
:return:
* (bytes) base64 解码结果
"""
return base64.b64decode(s)
# v1.1.3 edit by Hu Jun, #100
# 2019.01.06 v1.1.6 edit by Hu Jun, #152
# 2019.01.21 v1.1.6 edit by Hu Jun, #200 move fish_common.FishSha256 to fish_crypt.FishSha256
class FishSha256(object):
"""
计算字符串和密钥的 sha256 算法哈希值
举例如下::
print('--- GetSha256 demo ---')
# 定义哈希字符串
message = 'Hello HMAC'
# 定义密钥
secret = '12345678'
print('hmac_sha256:', GetSha256.hmac_sha256(secret, message))
print('hashlib_sha256:', GetSha256.hashlib_sha256(message))
print('---')
执行结果::
--- GetSha256 demo ---
hmac_sha256: 5eb8bdabdaa43f61fb220473028e49d40728444b4322f3093decd9a356afd18f
hashlib_sha256: 4a1601381dfb85d6e713853a414f6b43daa76a82956911108512202f5a1c0ce4
---
"""
@staticmethod
def hmac_sha256(secret, message):
"""
获取一个字符串的在密钥 secret 加密下的 sha256 哈希值
:param:
* secret: (string) 哈希算法的密钥
* message: (string) 需要进行哈希的字符串
:return:
* hashed_str: sha256 算法哈希值
"""
hashed_str = hmac.new(secret.encode('utf-8'),
message.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest()
return hashed_str
@staticmethod
def hashlib_sha256(message):
"""
获取一个字符串的 sha256 哈希值
:param:
* message: (string) 需要进行哈希的字符串
:return:
* hashed_str: sha256 算法哈希值
"""
hashlib_sha256 = hashlib.sha256()
hashlib_sha256.update(message.encode('utf-8'))
hashed_str = hashlib_sha256.hexdigest()
return hashed_str
| mit | 3,705,349,012,075,000,300 | 26.469828 | 96 | 0.555625 | false |
mirestrepo/voxels-at-lems | bvpl/bvpl_octree/PCA/release/reconstruction_error/compute_pca_error_scene.py | 1 | 5454 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 14, 2011
@author:Isabel Restrepo
Compuets PCA reconstruction error. Each block is processed in a separate thread.
This script assumes that the pca basis has been computed as gone by extract_pca_kernels.py
"""
import os;
import bvpl_octree_batch
import multiprocessing
import Queue
import time
import random
import optparse
import sys
#time.sleep(30);
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
class pca_error_job():
def __init__(self, pca_info, pca_error_scenes, block_i, block_j, block_k, dim):
self.pca_info = pca_info;
self.pca_error_scenes = pca_error_scenes;
self.block_i = block_i;
self.block_j = block_j;
self.block_k = block_k;
self.dim=dim;
def execute_jobs(jobs, num_procs=4):
work_queue=multiprocessing.Queue();
result_queue=multiprocessing.Queue();
for job in jobs:
work_queue.put(job)
for i in range(num_procs):
worker= pca_error_worker(work_queue,result_queue)
worker.start();
print("worker with name ",worker.name," started!")
class pca_error_worker(multiprocessing.Process):
def __init__(self,work_queue,result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
job = self.work_queue.get_nowait()
except Queue.Empty:
break
start_time = time.time();
print("Computing Error Scene");
bvpl_octree_batch.init_process("bvplComputePCAErrorBlockProcess");
bvpl_octree_batch.set_input_from_db(0,job.pca_info);
bvpl_octree_batch.set_input_from_db(1,job.pca_error_scenes);
bvpl_octree_batch.set_input_int(2, job.block_i);
bvpl_octree_batch.set_input_int(3, job.block_j);
bvpl_octree_batch.set_input_int(4, job.block_k);
bvpl_octree_batch.set_input_unsigned(5, job.dim);
bvpl_octree_batch.run_process();
print ("Runing time for worker:", self.name)
print(time.time() - start_time);
#*******************The Main Algorithm ************************#
if __name__=="__main__":
bvpl_octree_batch.register_processes();
bvpl_octree_batch.register_datatypes();
#Parse inputs
parser = optparse.OptionParser(description='Compute PCA Error Scene');
parser.add_option('--model_dir', action="store", dest="model_dir");
parser.add_option('--pca_dir', action="store", dest="pca_dir");
parser.add_option('--num_cores', action="store", dest="num_cores", type="int", default=4);
parser.add_option('--nblocks_x', action="store", dest="nblocks_x", type="int");
parser.add_option('--nblocks_y', action="store", dest="nblocks_y", type="int");
parser.add_option('--nblocks_z', action="store", dest="nblocks_z", type="int");
parser.add_option('--dimension', action="store", dest="dimension", type="int");
options, args = parser.parse_args()
model_dir = options.model_dir;
pca_dir = options.pca_dir;
nblocks_x = options.nblocks_x;
nblocks_y = options.nblocks_y;
nblocks_z = options.nblocks_z;
num_cores = options.num_cores;
dimension = options.dimension;
if not os.path.isdir(model_dir +"/"):
print "Invalid Model Dir"
sys.exit(-1);
if not os.path.isdir(pca_dir +"/"):
print "Invalid PCA Dir"
sys.exit(-1);
print("Loading Data Scene");
bvpl_octree_batch.init_process("boxmCreateSceneProcess");
bvpl_octree_batch.set_input_string(0, model_dir +"/mean_color_scene.xml");
bvpl_octree_batch.run_process();
(scene_id, scene_type) = bvpl_octree_batch.commit_output(0);
data_scene= dbvalue(scene_id, scene_type);
#Load pca scenes
pca_feature_dim = 125;
print("Loading PCA Error Scenes");
bvpl_octree_batch.init_process("bvplLoadPCAErrorSceneProcess");
bvpl_octree_batch.set_input_from_db(0, data_scene);
bvpl_octree_batch.set_input_string(1, pca_dir);
bvpl_octree_batch.set_input_unsigned(2, pca_feature_dim); #dimension pca feature
bvpl_octree_batch.run_process();
(id, type) = bvpl_octree_batch.commit_output(0);
pca_scenes = dbvalue(id, type);
print("Loading PCA Info");
bvpl_octree_batch.init_process("bvplLoadPCAInfoProcess");
bvpl_octree_batch.set_input_string(0, pca_dir);
bvpl_octree_batch.run_process();
(id, type) = bvpl_octree_batch.commit_output(0);
pca_info = dbvalue(id, type);
#Begin multiprocessing
work_queue=multiprocessing.Queue();
job_list=[];
#Enqueue jobs
all_indeces=[]
for block_i in range(0,nblocks_x):
for block_j in range(0,nblocks_y):
for block_k in range(0,nblocks_z):
idx = [block_i, block_j, block_k];
all_indeces.append(idx);
random.shuffle(all_indeces);
for i in range (0, len(all_indeces)):
idx = all_indeces[i];
current_job = pca_error_job(pca_info, pca_scenes, idx[0], idx[1], idx[2], dimension);
job_list.append(current_job);
execute_jobs(job_list, num_cores);
| bsd-2-clause | -1,480,236,010,235,916,800 | 32.060606 | 92 | 0.623029 | false |
eroicaleo/LearningPython | PythonForDA/ch05/DataFrame.py | 1 | 1827 | import pandas as pd
import numpy as np
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
frame
frame.head()
frame = pd.DataFrame(data, columns=['year', 'state'])
frame
frame = pd.DataFrame(data, columns=['year', 'state', 'pop'])
frame
frame = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index = ['one', 'two', 'three', 'four', 'five', 'six'])
frame
frame = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index = ['one', 'two', 'three', 'four', 'five', 'six', 'seven'])
frame = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index = ['one', 'two', 'three', 'four', 'five', 'six'])
frame
frame.columns
frame.year
frame['state']
frame['pop']
frame['debt']
frame[2]
frame.loc('three')
frame.loc['three']
frame.debt = 1.6
frame
frame.debt = np.arange(6)
frame
frame.debt = np.arange(6.)
frame
frame[0, debt]
frame[0, 'debt']
frame.debt = np.nan
frame
val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
val
frame.debt = val
val
frame
frame.state == 'Ohio'
type(frame.state == 'Ohio')
frame['eastern'] = frame.state == 'Ohio'
frame
del frame.eastern
del frame['eastern']
frame
frame.columns
pop = {'Nevada': {2001: 2.4, 2002: 2.9}, 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
pop
frame3 = pd.DataFrame(pop)
frame3
frame3.T
pd.DataFrame(pop, index=[2001, 2002, 2003]
)
pd.DataFrame(pop, index=[2001, 2002])
pd.DataFrame(pop)
pd.DataFrame(pop, index=[2001, 2002])
pop
pd.DataFrame(pop, index=np.array([2001, 2002]))
pdata = {'Ohio': frame3['Ohio'][:-1], 'Nevada': frame3['Nevada'][:2]}
pd.DataFrame(pdata)
frame3
frame3.columns.name = 'state'
frame3
frame3.index.name = 'year'
frame3
frame
frame.values
frame3.values
| mit | 2,222,649,027,941,006,600 | 25.1 | 133 | 0.635468 | false |
SKIRT/PTS | evolve/core/utils.py | 1 | 11501 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.utils This is the utility module, with some utility functions of general
# use, like list item swap, random utilities and etc.
# -----------------------------------------------------------------
# Import standard modules
from math import sqrt as math_sqrt
# Import the relevant PTS classes and modules
from ...core.basics.log import log
from ...core.tools.random import prng
# -----------------------------------------------------------------
def randomFlipCoin(p):
"""Returns True with the *p* probability. If *p* is 1, the
function will always return True. If *p* is 0, the function will
return always False.
Example:
>>> randomFlipCoin(1.0)
True
:param p: probability, between 0.0 and 1.0
:rtype: True or False
"""
if p == 1.0:
return True
if p == 0.0:
return False
return prng.random_sample() <= p
# -----------------------------------------------------------------
def listSwapElement(lst, indexa, indexb):
""" Swaps elements A and B in a list.
Example:
>>> l = [1, 2, 3]
>>> listSwapElement(l, 1, 2)
>>> l
[1, 3, 2]
:param lst: the list
:param indexa: the swap element A
:param indexb: the swap element B
:rtype: None
"""
lst[indexa], lst[indexb] = lst[indexb], lst[indexa]
# -----------------------------------------------------------------
def list2DSwapElement(lst, indexa, indexb):
""" Swaps elements A and B in a 2D list (matrix).
Example:
>>> l = [ [1,2,3], [4,5,6] ]
>>> list2DSwapElement(l, (0,1), (1,1) )
>>> l
[[1, 5, 3], [4, 2, 6]]
:param lst: the list
:param indexa: the swap element A
:param indexb: the swap element B
:rtype: None
"""
temp = lst[indexa[0]][indexa[1]]
lst[indexa[0]][indexa[1]] = lst[indexb[0]][indexb[1]]
lst[indexb[0]][indexb[1]] = temp
# -----------------------------------------------------------------
def raiseException(message, expt=None):
""" Raise an exception and logs the message.
Example:
>>> raiseException('The value is not an integer', ValueError)
:param message: the message of exception
:param expt: the exception class
:rtype: None
"""
log.critical(message)
if expt is None:
raise Exception(message)
else:
raise (expt, message)
# -----------------------------------------------------------------
def cmp_individual_raw(a, b):
""" Compares two individual raw scores
Example:
>>> cmp_individual_raw(a, b)
:param a: the A individual instance
:param b: the B individual instance
:rtype: 0 if the two individuals raw score are the same,
-1 if the B individual raw score is greater than A and
1 if the A individual raw score is greater than B.
.. note:: this function is used to sorte the population individuals
"""
if a.score < b.score:
return -1
if a.score > b.score:
return 1
return 0
# -----------------------------------------------------------------
def cmp_individual_scaled(a, b):
"""
Compares two individual fitness scores, used for sorting population
Example:
>>> cmp_individual_scaled(a, b)
:param a: the A individual instance
:param b: the B individual instance
:rtype: 0 if the two individuals fitness score are the same,
-1 if the B individual fitness score is greater than A and
1 if the A individual fitness score is greater than B.
.. note:: this function is used to sort the population individuals
"""
if a.fitness < b.fitness:
return -1
if a.fitness > b.fitness:
return 1
return 0
# -----------------------------------------------------------------
def importSpecial(name):
"""
This function will import the *name* module, if fails,
it will raise an ImportError exception and a message
:param name: the module name
:rtype: the module object
.. versionadded:: 0.6
The *import_special* function
"""
from . import constants
try:
imp_mod = __import__(name)
except ImportError:
raiseException("Cannot import module %s: %s" % (name, constants.CDefImportList[name]), expt=ImportError)
return imp_mod
# -----------------------------------------------------------------
class ErrorAccumulator(object):
""" An accumulator for the Root Mean Square Error (RMSE) and the
Mean Square Error (MSE)
"""
def __init__(self):
"""
The constructor ...
"""
self.acc = 0.0
self.acc_square = 0.0
self.acc_len = 0
# -----------------------------------------------------------------
def reset(self):
""" Reset the accumulator """
self.acc_square = 0.0
self.acc = 0.0
self.acc_len = 0
# -----------------------------------------------------------------
def append(self, target, evaluated):
""" Add value to the accumulator
:param target: the target value
:param evaluated: the evaluated value
"""
self.acc_square += (target - evaluated) ** 2
self.acc += abs(target - evaluated)
self.acc_len += 1
# -----------------------------------------------------------------
def __iadd__(self, value):
""" The same as append, but you must pass a tuple """
self.append(*value)
return self
# -----------------------------------------------------------------
def getMean(self):
""" Return the mean of the non-squared accumulator """
return self.acc / self.acc_len
# -----------------------------------------------------------------
def getSquared(self):
""" Returns the squared accumulator """
return self.acc_square
# -----------------------------------------------------------------
def getNonSquared(self):
""" Returns the non-squared accumulator """
return self.acc
# -----------------------------------------------------------------
def getAdjusted(self):
""" Returns the adjusted fitness
This fitness is calculated as 1 / (1 + standardized fitness)
"""
return 1.0 / (1.0 + self.acc)
# -----------------------------------------------------------------
def getRMSE(self):
""" Return the root mean square error
:rtype: float RMSE
"""
return math_sqrt(self.acc_square / float(self.acc_len))
# -----------------------------------------------------------------
def getMSE(self):
""" Return the mean square error
:rtype: float MSE
"""
return self.acc_square / float(self.acc_len)
# -----------------------------------------------------------------
class Graph(object):
""" The Graph class
Example:
>>> g = Graph()
>>> g.addEdge("a", "b")
>>> g.addEdge("b", "c")
>>> for node in g:
... print node
a
b
c
.. versionadded:: 0.6
The *Graph* class.
"""
def __init__(self):
""" The constructor """
self.adjacent = {}
# -----------------------------------------------------------------
def __iter__(self):
""" Returns an iterator to the all graph elements """
return iter(self.adjacent)
# -----------------------------------------------------------------
def addNode(self, node):
""" Add the node
:param node: the node to add
"""
if node not in self.adjacent:
self.adjacent[node] = {}
# -----------------------------------------------------------------
def __iadd__(self, node):
""" Add a node using the += operator """
self.addNode(node)
return self
# -----------------------------------------------------------------
def addEdge(self, a, b):
""" Add an edge between two nodes, if the nodes
doesn't exists, they will be created
:param a: the first node
:param b: the second node
"""
if a not in self.adjacent:
self.adjacent[a] = {}
if b not in self.adjacent:
self.adjacent[b] = {}
self.adjacent[a][b] = True
self.adjacent[b][a] = True
# -----------------------------------------------------------------
def getNodes(self):
""" Returns all the current nodes on the graph
:rtype: the list of nodes
"""
return self.adjacent.keys()
# -----------------------------------------------------------------
def reset(self):
""" Deletes all nodes of the graph """
self.adjacent.clear()
# -----------------------------------------------------------------
def getNeighbors(self, node):
""" Returns the neighbors of the node
:param node: the node
"""
return self.adjacent[node].keys()
# -----------------------------------------------------------------
def __getitem__(self, node):
""" Returns the adjacent nodes of the node """
return self.adjacent[node].keys()
# -----------------------------------------------------------------
def __repr__(self):
ret = "- Graph\n"
ret += "\tNode list:\n"
for node in self:
ret += "\t\tNode [%s] = %s\n" % (node, self.getNeighbors(node))
return ret
# -----------------------------------------------------------------
def G1DListGetEdgesComposite(mom, dad):
"""
Get the edges and the merge between the edges of two G1DList individuals
:param mom: the mom G1DList individual
:param dad: the dad G1DList individual
:rtype: a tuple (mom edges, dad edges, merge)
"""
mom_edges = G1DListGetEdges(mom)
dad_edges = G1DListGetEdges(dad)
return mom_edges, dad_edges, G1DListMergeEdges(mom_edges, dad_edges)
# -----------------------------------------------------------------
def G1DListGetEdges(individual):
"""
Get the edges of a G1DList individual
:param individual: the G1DList individual
:rtype: the edges dictionary
"""
edg = {}
ind_list = individual.getInternalList()
for i in xrange(len(ind_list)):
a, b = ind_list[i], ind_list[i - 1]
if a not in edg:
edg[a] = []
else:
edg[a].append(b)
if b not in edg:
edg[b] = []
else:
edg[b].append(a)
return edg
# -----------------------------------------------------------------
def G1DListMergeEdges(eda, edb):
"""
Get the merge between the two individual edges
:param eda: the edges of the first G1DList genome
:param edb: the edges of the second G1DList genome
:rtype: the merged dictionary
"""
edges = {}
for value, near in eda.items():
for adj in near:
if (value in edb) and (adj in edb[value]):
edges.setdefault(value, []).append(adj)
return edges
# -----------------------------------------------------------------
| agpl-3.0 | -2,755,724,891,698,071,600 | 24.612472 | 112 | 0.453565 | false |
aetilley/revscoring | setup.py | 1 | 1234 | import os
from setuptools import find_packages, setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def requirements(fname):
return [line.strip()
for line in open(os.path.join(os.path.dirname(__file__), fname))]
setup(
name="revscoring",
version="0.4.10", # change in revscoring/__init__.py
author="Aaron Halfaker",
author_email="[email protected]",
description=("A set of utilities for generating quality scores for " + \
"MediaWiki revisions"),
license="MIT",
entry_points = {
'console_scripts': [
'revscoring = revscoring.revscoring:main',
],
},
url="https://github.com/halfak/Revision-Scores",
packages=find_packages(),
long_description=read('README.rst'),
install_requires=requirements("requirements.txt"),
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
)
| mit | -5,696,906,683,495,392,000 | 29.85 | 77 | 0.60859 | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_conv2d_op.py | 1 | 35379 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest, convert_float_to_uint16, get_numeric_gradient
from paddle.fluid.tests.unittests.testsuite import create_op
from paddle.fluid import Program, program_guard
def conv2d_forward_naive(input,
filter,
group,
conv_param,
padding_algorithm='EXPLICIT',
data_format='NCHW'):
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." %
str(padding_algorithm))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError("Unknown Attr(data_format): '%s' ."
"It can only be 'NCHW' or 'NHWC'." % str(data_format))
channel_last = (data_format == "NHWC")
if channel_last:
input = np.transpose(input, [0, 3, 1, 2])
in_n, in_c, in_h, in_w = input.shape
f_n, f_c, f_h, f_w = filter.shape
out_n = in_n
out_c = f_n
assert f_c * group == in_c
assert np.mod(out_c, group) == 0
sub_out_c = out_c // group
sub_f_n = f_n // group
stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
'dilation']
# update pad and dilation
def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding = []
for input_size, filter_size, stride_size in zip(input_shape, pool_size,
pool_stride):
out_size = int((input_size + stride_size - 1) / stride_size)
pad_sum = np.max((
(out_size - 1) * stride_size + filter_size - input_size, 0))
pad_0 = int(pad_sum / 2)
pad_1 = int(pad_sum - pad_0)
padding.append(pad_0)
padding.append(pad_1)
return padding
ksize = filter.shape[2:4]
if padding_algorithm == "VALID":
pad = [0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1]
input_data_shape = input.shape[2:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_h_0, pad_h_1 = pad[0], pad[0]
pad_w_0, pad_w_1 = pad[1], pad[1]
if len(pad) == 4:
pad_h_0, pad_h_1 = pad[0], pad[1]
pad_w_0, pad_w_1 = pad[2], pad[3]
out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[0] *
(f_h - 1) + 1)) // stride[0]
out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[1] *
(f_w - 1) + 1)) // stride[1]
out = np.zeros((out_n, out_c, out_h, out_w))
d_bolck_h = (dilation[0] * (f_h - 1) + 1)
d_bolck_w = (dilation[1] * (f_w - 1) + 1)
input_pad = np.pad(input, ((0, 0), (0, 0), (pad_h_0, pad_h_1),
(pad_w_0, pad_w_1)),
mode='constant',
constant_values=0)
filter_dilation = np.zeros((f_n, f_c, d_bolck_h, d_bolck_w))
filter_dilation[:, :, 0:d_bolck_h:dilation[0], 0:d_bolck_w:dilation[
1]] = filter
for i in range(out_h):
for j in range(out_w):
for g in range(group):
input_pad_masked = \
input_pad[:, g * f_c:(g + 1) * f_c,
i * stride[0]:i * stride[0] + d_bolck_h,
j * stride[1]:j * stride[1] + d_bolck_w]
f_sub = filter_dilation[g * sub_f_n:(g + 1) * sub_f_n, :, :, :]
# sub_f_n == sub_out_c
for k in range(sub_out_c):
# Multiplication of Corresponding Elements, then sum all
out[:, g * sub_out_c + k, i, j] = \
np.sum(input_pad_masked * f_sub[k, :, :, :],
axis=(1, 2, 3))
if channel_last:
out = np.transpose(out, [0, 2, 3, 1])
return out, in_n, out_h, out_w, out_c
def create_test_cudnn_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
cls_name = "{0}_{1}".format(parent.__name__, "CUDNN")
TestCUDNNCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNCase
def create_test_cudnn_fp16_class(parent, grad_check=True):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestConv2DCUDNNFp16(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set=set(['Input']))
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16")
TestConv2DCUDNNFp16.__name__ = cls_name
globals()[cls_name] = TestConv2DCUDNNFp16
def create_test_cudnn_bf16_class(parent):
@unittest.skipIf(
not core.is_compiled_with_cuda() or core.cudnn_version() < 8100,
"core is not compiled with CUDA and cudnn version need larger than 8.1.0"
)
class TestConv2DCUDNNBF16(parent):
def get_numeric_grad(self, place, check_name):
scope = core.Scope()
self._check_grad_helper()
op = create_op(scope, self.op_type, self.inputs, self.outputs,
self.attrs)
return get_numeric_gradient(place, scope, op, self.inputs_fp32,
check_name, ['Output'])
def init_kernel_type(self):
self.use_cudnn = True
self.no_need_check_grad = True
self.dtype = np.uint16
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
numeric_grads = self.get_numeric_grad(place, 'Input')
self.check_grad_with_place(
place, ['Input'],
'Output',
no_grad_set=set(['Filter']),
user_defined_grads=[numeric_grads])
def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
numeric_grads = self.get_numeric_grad(place, 'Filter')
self.check_grad_with_place(
place, ['Filter'],
'Output',
no_grad_set=set(['Input']),
user_defined_grads=[numeric_grads])
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNBF16")
TestConv2DCUDNNBF16.__name__ = cls_name
globals()[cls_name] = TestConv2DCUDNNBF16
def create_test_channel_last_class(parent):
class TestChannelLastCase(parent):
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast")
TestChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestChannelLastCase
def create_test_cudnn_channel_last_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCudnnChannelLastCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast")
TestCudnnChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestCudnnChannelLastCase
def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCudnnChannelLastFp16(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set=set(['Input']))
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLastFp16")
TestCudnnChannelLastFp16.__name__ = cls_name
globals()[cls_name] = TestCudnnChannelLastFp16
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
def create_test_cudnn_padding_SAME_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingSMAECase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
TestCUDNNPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingSMAECase
def create_test_cudnn_padding_VALID_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingVALIDCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
TestCUDNNPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingVALIDCase
class TestConv2DOp(OpTest):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
self.exhaustive_search = False
self.use_cuda = False
self.use_mkldnn = False
self.fuse_relu_before_depthwise_conv = False
self.data_format = "AnyLayout"
self.dtype = np.float64
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_test_case()
conv2d_param = {
'stride': self.stride,
'pad': self.pad,
'dilation': self.dilations
}
if self.is_bfloat16_op():
input = np.random.random(self.input_size).astype(np.float32)
filter = np.random.uniform(-1, 1,
self.filter_size).astype(np.float32)
else:
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.uniform(-1, 1,
self.filter_size).astype(self.dtype)
if not self.has_cuda():
self.fuse_relu_before_depthwise_conv = False
if self.fuse_relu_before_depthwise_conv:
input = input - 0.5
input -= (input < 0) * 0.1
input += (input >= 0) * 0.1
input2 = np.maximum(input, 0.0)
else:
input2 = input
output, _, _, _, _ = conv2d_forward_naive(input2, filter, self.groups,
conv2d_param)
if self.is_bfloat16_op():
output = output.astype(np.float32)
self.inputs = {
'Input': convert_float_to_uint16(input),
'Filter': convert_float_to_uint16(filter)
}
self.inputs_fp32 = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
else:
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format,
'fuse_relu_before_depthwise_conv':
self.fuse_relu_before_depthwise_conv,
'exhaustive_search': self.exhaustive_search
}
self.outputs = {'Output': output}
def has_cuda(self):
return core.is_compiled_with_cuda() and (self.use_cudnn or
self.use_cuda)
def test_check_output(self):
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, {'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_filter(self):
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_input(self):
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, ['Filter'],
'Output',
no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False))
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
def init_test_case_2(self):
pass
def init_dilation(self):
self.dilations = [1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
class TestWithPad(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
class TestWithStride(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
class TestWithGroup(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.group = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [18, f_c, 3, 3]
class TestWith1x1(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
class TestWithDepthWise3x3(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [3, 4, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def init_dilation(self):
self.dilations = [2, 2]
def init_group(self):
self.groups = 4
class TestWithDepthWise5x5(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 4, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [8, f_c, 5, 5]
def init_group(self):
self.groups = 4
class TestWithDepthWise7x7(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 8, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [16, f_c, 7, 7]
def init_group(self):
self.groups = 8
class TestWithDilation(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def init_dilation(self):
self.dilations = [2, 2]
def init_group(self):
self.groups = 3
class TestWithInput1x1Filter1x1(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [100, 3, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
#----------------Conv2DCUDNN----------------
create_test_cudnn_class(TestConv2DOp)
create_test_cudnn_class(TestWithPad)
create_test_cudnn_class(TestWithStride)
create_test_cudnn_class(TestWithGroup)
create_test_cudnn_class(TestWith1x1)
create_test_cudnn_class(TestWithInput1x1Filter1x1)
#----------------Conv2DCUDNN fp16----------------
create_test_cudnn_fp16_class(TestConv2DOp, grad_check=False)
create_test_cudnn_fp16_class(TestWithPad, grad_check=False)
create_test_cudnn_fp16_class(TestWithStride, grad_check=False)
create_test_cudnn_fp16_class(TestWithGroup, grad_check=False)
create_test_cudnn_fp16_class(TestWith1x1, grad_check=False)
create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False)
#----------------Conv2DCUDNN bf16----------------
create_test_cudnn_bf16_class(TestConv2DOp)
create_test_cudnn_bf16_class(TestWithPad)
create_test_cudnn_bf16_class(TestWithStride)
create_test_cudnn_bf16_class(TestWithGroup)
create_test_cudnn_bf16_class(TestWith1x1)
create_test_cudnn_bf16_class(TestWithInput1x1Filter1x1)
class TestCUDNNExhaustiveSearch(TestConv2DOp):
def init_kernel_type(self):
self.use_cudnn = True
self.exhaustive_search = True
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
class TestConv2DOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
# the input of conv2d must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
fluid.layers.conv2d(x1, 1, 1)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
# the input dtype of conv2d must be float16 or float32 or float64
# float16 only can be set on GPU place
x2 = fluid.layers.data(
name='x2', shape=[3, 4, 5, 6], dtype="int32")
fluid.layers.conv2d(x2, 1, 1)
self.assertRaises(TypeError, test_dtype)
# Please Don't remove the following code.
# Currently, CI use cudnn V5.0 which not support dilation conv.
# class TestCUDNNWithDilation(TestWithDilation):
# def init_op_type(self):
# self.op_type = "conv_cudnn"
# ---- test asymmetric padding ----
class TestConv2DOp_v2(OpTest):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
self.exhaustive_search = False
self.use_cuda = False
self.use_mkldnn = False
self.fuse_relu_before_depthwise_conv = False
self.dtype = np.float64
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_data_format()
self.init_test_case()
self.init_paddings()
self.init_test_case_2()
conv2d_param = {
'stride': self.stride,
'pad': self.pad,
'dilation': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
if not self.has_cuda():
self.fuse_relu_before_depthwise_conv = False
if self.fuse_relu_before_depthwise_conv:
input = input - 0.5
input -= (input < 0) * 0.1
input += (input >= 0) * 0.1
input2 = np.maximum(input, 0.0)
else:
input2 = input
filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype)
output, _, _, _, _ = conv2d_forward_naive(
input2, filter, self.groups, conv2d_param, self.padding_algorithm,
self.data_format)
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'padding_algorithm': self.padding_algorithm,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format,
'fuse_relu_before_depthwise_conv':
self.fuse_relu_before_depthwise_conv,
'exhaustive_search': self.exhaustive_search
}
self.outputs = {'Output': output}
def has_cuda(self):
return core.is_compiled_with_cuda() and (self.use_cudnn or
self.use_cuda)
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place(
place, {'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_filter(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_input(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_grad_with_place(
place, ['Filter'],
'Output',
no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False))
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 2]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 4, 3]
def init_dilation(self):
self.dilations = [1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "EXPLICIT"
def init_data_format(self):
self.data_format = "NCHW"
def init_test_case_2(self):
pass
class TestConv2DOp_AsyPadding(TestConv2DOp_v2):
def init_paddings(self):
self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithPad_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
def init_paddings(self):
self.pad = [2, 1, 3, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithStride_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
def init_paddings(self):
self.pad = [2, 1, 3, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.group = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 4, 3]
class TestWith1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [2, 2, 4, 0]
self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [3, 4, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [16, f_c, 3, 3]
def init_dilation(self):
self.dilations = [2, 2]
def init_group(self):
self.groups = 4
def init_paddings(self):
self.pad = [1, 3, 2, 1]
self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 4, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [8, f_c, 5, 5]
def init_group(self):
self.groups = 4
def init_paddings(self):
self.pad = [0, 1, 1, 0]
self.padding_algorithm = "EXPLICIT"
class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [2, 2]
self.input_size = [2, 8, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [16, f_c, 7, 7]
def init_group(self):
self.groups = 8
def init_paddings(self):
self.pad = [1, 3, 4, 1]
self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
def init_dilation(self):
self.dilations = [2, 2]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 1, 3, 0]
self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [40, 3, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 3, 4, 0]
self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv2DOp_AsyPadding)
create_test_cudnn_class(TestWithPad_AsyPadding)
create_test_cudnn_class(TestWithStride_AsyPadding)
create_test_cudnn_class(TestWithGroup_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding)
#---------- test SAME VALID -----------
create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_padding_SAME_class(TestWithPad_AsyPadding)
create_test_padding_SAME_class(TestWithStride_AsyPadding)
create_test_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_padding_VALID_class(TestWithPad_AsyPadding)
create_test_padding_VALID_class(TestWithStride_AsyPadding)
create_test_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
# ------------ test channel last ---------
create_test_channel_last_class(TestConv2DOp_AsyPadding)
create_test_channel_last_class(TestWithPad_AsyPadding)
create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv2DOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithPad_AsyPadding)
create_test_cudnn_channel_last_class(TestWithStride_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding)
create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding)
create_test_cudnn_channel_last_fp16_class(
TestConv2DOp_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class(
TestWithPad_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class(
TestWithStride_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class(
TestWithGroup_AsyPadding, grad_check=False)
create_test_cudnn_channel_last_fp16_class(
TestWithDilation_AsyPadding, grad_check=False)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,622,533,692,147,087,000 | 34.27318 | 81 | 0.578027 | false |
mizdebsk/javapackages | java-utils/maven_depmap.py | 1 | 11246 | #
# Copyright (c) 2014, Red Hat, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# 3. Neither the name of Red Hat nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Stanislav Ochotnicky <[email protected]>
#
# this script is used by add_maven_depmap rpm macro to generate
# mapping between maven groupId:artifactId and jar file in our local
# filesystem (i.e. %{_javadir})
# rpm macro expects to find this file as %{_javadir}-utils/maven_depmap.py
from optparse import OptionParser
import os
import shutil
import sys
from os.path import basename, dirname
import zipfile
from time import gmtime, strftime
from copy import deepcopy
from javapackages.maven.pom import POM
from javapackages.metadata.artifact import MetadataArtifact
from javapackages.metadata.alias import MetadataAlias
from javapackages.metadata.metadata import Metadata
from javapackages.common.exception import JavaPackagesToolsException
class PackagingTypeMissingFile(JavaPackagesToolsException):
def __init__(self, pom_path):
self.args=("Packaging type is not 'pom' and no artifact path has been provided for POM %s" % pom_path,)
class IncompatibleFilenames(JavaPackagesToolsException):
def __init__(self, pom_path, jar_path):
self.args=("Filenames of POM %s and JAR %s does not match properly. Check that JAR subdirectories matches '.' in pom name." % (pom_path, jar_path),)
class ExtensionsDontMatch(JavaPackagesToolsException):
def __init__(self, coordinates_ext, file_ext):
self.args=("Extensions don't match: '%s' != '%s'" % (coordinates_ext, file_ext),)
class MissingJarFile(JavaPackagesToolsException):
def __init__(self):
self.args=("JAR seems to be missing in standard directories. Make sure you have installed it",)
class UnknownFileExtension(JavaPackagesToolsException):
def __init__(self, jar_path):
self.args=("Unknown file extension: %s" % (jar_path),)
def _print_path_with_dirs(path, base):
print(path)
path = dirname(path)
while path != base and path != '/':
print("%dir " + path)
path = dirname(path)
def _make_files_versioned(versions, pom_path, jar_path, pom_base, jar_base):
"""Make pom and jar file versioned"""
versions = list(set(versions.split(',')))
vpom_path = pom_path
vjar_path = jar_path
ret_pom_path = pom_path
ret_jar_path = jar_path
# pom
if ':' not in vpom_path:
root, ext = os.path.splitext(vpom_path)
symlink = False
for ver in sorted(versions):
dest = "%s-%s%s" % (root, ver, ext)
if not symlink:
shutil.copy(os.path.realpath(vpom_path), dest)
symlink = True
vpom_path = dest
ret_pom_path = dest
else:
os.symlink(basename(vpom_path), dest)
# output file path for file lists
_print_path_with_dirs(dest, pom_base)
# remove unversioned pom
os.remove(pom_path)
# jar
if vjar_path:
root, ext = os.path.splitext(vjar_path)
symlink = False
for ver in sorted(versions):
dest = "%s-%s%s" % (root, ver, ext)
if not symlink:
shutil.copy(os.path.realpath(vjar_path), dest)
symlink = True
vjar_path = dest
ret_jar_path = dest
else:
os.symlink(basename(vjar_path), dest)
# output file path for file lists
_print_path_with_dirs(dest, jar_base)
# remove unversioned jar
os.remove(jar_path)
# return paths to versioned, but regular files (not symlinks)
return ret_pom_path, ret_jar_path
# Add a file to a ZIP archive (or JAR, WAR, ...) unless the file
# already exists in the archive. Provided by Tomas Radej.
def append_if_missing(archive_name, file_name, file_contents):
archive = zipfile.ZipFile(archive_name, 'a')
try:
if file_name not in archive.namelist():
archive.writestr(file_name, file_contents)
finally:
archive.close()
# Inject pom.properties if JAR doesn't have one. This is necessary to
# identify the origin of JAR files that are present in the repository.
def inject_pom_properties(jar_path, artifact):
if not zipfile.is_zipfile(jar_path):
return
props_path = "META-INF/maven/{a.groupId}/{a.artifactId}/pom.properties".format(a=artifact)
properties = """#Generated by Java Packages Tools
version={a.version}
groupId={a.groupId}
artifactId={a.artifactId}
""".format(a=artifact)
if artifact.extension:
properties = properties + \
"extension={ext}\n".format(ext=artifact.extension)
if artifact.classifier:
properties = properties + \
"classifier={clas}\n".format(clas=artifact.classifier)
append_if_missing(jar_path, props_path, properties)
def add_compat_versions(artifact, versions):
if not versions:
return artifact
artifact.compatVersions = versions.split(',')
return artifact
def add_aliases(artifact, additions):
if not additions:
return artifact
aliases = additions.split(',')
result = set()
for a in aliases:
alias = MetadataAlias.from_mvn_str(a)
alias.extension = artifact.extension
result.add(alias)
artifact.aliases = result
return artifact
def write_metadata(metadata_file, artifacts):
if os.path.exists(metadata_file):
metadata = Metadata.create_from_file(metadata_file)
else:
metadata = Metadata()
# pylint:disable=E1103
metadata.artifacts += deepcopy(artifacts)
metadata.write_to_file(metadata_file)
def _main():
usage="usage: %prog [options] metadata_path pom_path|<MVN spec> [jar_path]"
parser = OptionParser(usage=usage)
parser.add_option("-a","--append",type="str",
help="Additional depmaps to add (gid:aid) [default: %default]")
parser.add_option('-r', '--versions', type="str",
help='Additional versions to add for each depmap')
parser.add_option('-n', '--namespace', type="str",
help='Namespace to use for generated fragments', default="")
parser.add_option('--pom-base', type="str",
help='Base path under which POM files are installed', default="")
parser.add_option('--jar-base', type="str",
help='Base path under which JAR files are installed', default="")
parser.set_defaults(append=None)
(options, args) = parser.parse_args()
append_deps = options.append
add_versions = options.versions
namespace = options.namespace
pom_base = options.pom_base
jar_base = options.jar_base
if len(args) < 2:
parser.error("Incorrect number of arguments")
# These will fail when incorrect number of arguments is given.
metadata_path = args[0].strip()
pom_path = args[1].strip()
jar_path = None
artifact = None
have_pom = False
if len(args) == 3:
jar_path = args[2].strip()
if ':' in pom_path:
pom_str = pom_path.rsplit('/')[-1]
artifact = MetadataArtifact.from_mvn_str(pom_str)
artifact_ext = artifact.extension or "jar"
file_ext = os.path.splitext(jar_path)[1][1:]
if artifact_ext != file_ext:
raise ExtensionsDontMatch(artifact_ext, file_ext)
if artifact.extension == 'jar':
artifact.extension = ''
if not artifact.version:
parser.error("Artifact definition has to include version")
else:
artifact = MetadataArtifact.from_pom(pom_path)
ext = os.path.splitext(jar_path)[1][1:]
if ext != "jar":
artifact.extension = ext
have_pom = True
if artifact:
inject_pom_properties(jar_path, artifact)
else:
# looks like POM only artifact
if ':' not in pom_path:
artifact = MetadataArtifact.from_pom(pom_path)
have_pom = True
if POM(pom_path).packaging != "pom":
raise PackagingTypeMissingFile(pom_path)
else:
sys.exit("JAR file path must be specified when using artifact coordinates")
# output file path for file lists
print(metadata_path)
artifact = add_compat_versions(artifact, add_versions)
if add_versions:
pom_path, jar_path = _make_files_versioned(add_versions, pom_path, jar_path, pom_base, jar_base)
if namespace:
artifact.namespace = namespace
artifact.properties["xmvn.resolver.disableEffectivePom"] = "true"
buildroot = os.environ.get('RPM_BUILD_ROOT')
am = []
if jar_path:
metadata_jar_path = os.path.abspath(jar_path)
artifact.path = metadata_jar_path.replace(buildroot, "") if buildroot else metadata_jar_path
artifact = add_aliases(artifact, append_deps)
if artifact.extension == "jar":
artifact.extension = ""
am.append(artifact.copy())
# output file path for file list (if it's not versioned)
if not add_versions:
_print_path_with_dirs(jar_path, jar_base)
if have_pom:
metadata_pom_path = os.path.abspath(pom_path)
artifact.path = metadata_pom_path.replace(buildroot, "") if buildroot else metadata_pom_path
artifact.extension = "pom"
artifact.aliases = None
artifact = add_aliases(artifact, append_deps)
am.append(artifact.copy())
# output file path for file list (if it's not versioned)
if not add_versions:
_print_path_with_dirs(pom_path, pom_base)
write_metadata(metadata_path, am)
if __name__ == "__main__":
try:
_main()
except JavaPackagesToolsException as e:
sys.exit(e)
| bsd-3-clause | -2,782,331,628,701,944,000 | 34.701587 | 156 | 0.649475 | false |
jonathonwalz/ansible | lib/ansible/modules/cloud/amazon/ec2_group.py | 2 | 23019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
- One of and only one of I(name) or I(group_id) is required.
- Required if I(state=present).
required: false
group_id:
description:
- Id of group to delete (works only with absent).
- One of and only one of I(name) or I(group_id) is required.
required: false
version_added: "2.4"
description:
description:
- Description of the security group. Required when C(state) is C(present).
required: false
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied,
no inbound rules will be enabled. Rules list may include its own name in `group_name`.
This allows idempotent loopback additions (e.g. allow group to access itself).
Rule sources list support was added in version 2.4. This allows to define multiple sources per
source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied,
a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
Rule Egress sources list support was added in version 2.4.
required: false
version_added: "1.6"
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
extends_documentation_fragment:
- aws
- ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
- name: example2 ec2 group
ec2_group:
name: example2
description: an example2 EC2 group
vpc_id: 12345
region: eu-west-1
rules:
# 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- proto: tcp
ports: 22
group_name: example-vpn
- proto: tcp
ports:
- 80
- 443
- 8080-8099
cidr_ip: 0.0.0.0/0
# Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- proto: tcp
ports:
- 6379
- 26379
group_name:
- example-vpn
- example-redis
- proto: tcp
ports: 5665
group_name: example-vpn
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
group_id:
- sg-edcd9784
- name: "Delete group by its id"
ec2_group:
group_id: sg-33b4ee5b
state: absent
'''
import json
import re
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_connect, ec2_argument_spec
try:
import boto.ec2
from boto.ec2.securitygroup import SecurityGroup
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import traceback
def deduplicate_rules_args(rules):
"""Returns unique rules"""
if rules is None:
return None
return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule"""
if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
# fix for 11177
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def addRulesToLookup(rules, prefix, rules_dict):
for rule in rules:
for grant in rule.grants:
rules_dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
if not isinstance(rule, dict):
module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
elif group_name in groups and (vpc_id is None or groups[group_name].vpc_id == vpc_id):
group_id = groups[group_name].id
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
if not module.check_mode:
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
group_id = auto_group.id
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
return group_id, ip, target_group_created
def ports_expand(ports):
# takes a list of ports and returns a list of (port_from, port_to)
ports_expanded = []
for port in ports:
if not isinstance(port, str):
ports_expanded.append((port,) * 2)
elif '-' in port:
ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
else:
ports_expanded.append((port.strip(),) * 2)
return ports_expanded
def rule_expand_ports(rule):
# takes a rule dict and returns a list of expanded rule dicts
if 'ports' not in rule:
return [rule]
ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
rule_expanded = []
for from_to in ports_expand(ports):
temp_rule = rule.copy()
del temp_rule['ports']
temp_rule['from_port'], temp_rule['to_port'] = from_to
rule_expanded.append(temp_rule)
return rule_expanded
def rules_expand_ports(rules):
# takes a list of rules and expands it based on 'ports'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_ports(rule_complex)]
def rule_expand_source(rule, source_type):
# takes a rule dict and returns a list of expanded rule dicts for specified source_type
sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
source_types_all = ('cidr_ip', 'group_id', 'group_name')
rule_expanded = []
for source in sources:
temp_rule = rule.copy()
for s in source_types_all:
temp_rule.pop(s, None)
temp_rule[source_type] = source
rule_expanded.append(temp_rule)
return rule_expanded
def rule_expand_sources(rule):
# takes a rule dict and returns a list of expanded rule discts
source_types = (stype for stype in ('cidr_ip', 'group_id', 'group_name') if stype in rule)
return [r for stype in source_types
for r in rule_expand_source(rule, stype)]
def rules_expand_sources(rules):
# takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_sources(rule_complex)]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(),
group_id=dict(),
description=dict(),
vpc_id=dict(),
rules=dict(type='list'),
rules_egress=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'group_id']],
required_if=[['state', 'present', ['name']]],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
group_id = module.params['group_id']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
if state == 'present' and not description:
module.fail_json(msg='Must provide description when state is present.')
changed = False
ec2 = ec2_connect(module)
# find the group if present
group = None
groups = {}
try:
security_groups = ec2.get_all_security_groups()
except BotoServerError as e:
module.fail_json(msg="Error in get_all_security_groups: %s" % e.message, exception=traceback.format_exc())
for curGroup in security_groups:
groups[curGroup.id] = curGroup
if curGroup.name in groups:
# Prioritise groups from the current VPC
if vpc_id is None or curGroup.vpc_id == vpc_id:
groups[curGroup.name] = curGroup
else:
groups[curGroup.name] = curGroup
if group_id:
if curGroup.id == group_id:
group = curGroup
else:
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
# Ensure requested group is absent
if state == 'absent':
if group:
# found a match, delete it
try:
if not module.check_mode:
group.delete()
except BotoServerError as e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e.message), exception=traceback.format_exc())
else:
group = None
changed = True
else:
# no match found, no changes required
pass
# Ensure requested group is present
elif state == 'present':
if group:
# existing group
if group.description != description:
module.fail_json(msg="Group description does not match existing group. ec2_group does not support this case.")
# if the group doesn't exist, create it now
else:
# no match found, create it
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while len(ec2.get_all_security_groups(filters={'group_id': group.id})) == 0:
time.sleep(0.1)
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
# Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules is not None:
for rule in rules:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
# If rule already exists, don't later delete it
for thisip in ip:
ruleId = make_rule_key('in', rule, group_id, thisip)
if ruleId not in groupRules:
grantGroup = None
if group_id:
grantGroup = groups[group_id]
if not module.check_mode:
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], thisip, grantGroup)
changed = True
else:
del groupRules[ruleId]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for (rule, grant) in groupRules.values():
grantGroup = None
if grant.group_id:
if grant.owner_id != group.owner_id:
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id)
groups[grant.group_id] = group_instance
groups[grant.name] = group_instance
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
addRulesToLookup(group.rules_egress, 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress is not None:
for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
# If rule already exists, don't later delete it
for thisip in ip:
ruleId = make_rule_key('out', rule, group_id, thisip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id].id
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=rule['proto'],
from_port=rule['from_port'],
to_port=rule['to_port'],
src_group_id=grantGroup,
cidr_ip=thisip)
changed = True
else:
# when no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'
if default_egress_rule not in groupRules:
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=-1,
from_port=None,
to_port=None,
src_group_id=None,
cidr_ip='0.0.0.0/0'
)
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress:
for (rule, grant) in groupRules.values():
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
module.exit_json(changed=changed, group_id=None)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,607,996,531,438,057,500 | 35.422468 | 157 | 0.58039 | false |
Torrib/gradestats | grades/migrations/0001_initial.py | 1 | 2914 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-28 17:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('norwegian_name', models.CharField(max_length=255, verbose_name='Norwegian Name')),
('short_name', models.CharField(max_length=50, verbose_name='Short name')),
('code', models.CharField(max_length=15, verbose_name='Code')),
('faculty_code', models.IntegerField(default=0, verbose_name='Faculty Code')),
('english_name', models.CharField(max_length=255, verbose_name='English name')),
('credit', models.FloatField(default=7.5, verbose_name='Credit')),
('study_level', models.SmallIntegerField()),
('taught_in_spring', models.BooleanField(default=False)),
('taught_in_autumn', models.BooleanField(default=False)),
('taught_from', models.IntegerField()),
('taught_in_english', models.BooleanField(default=False)),
('last_year_taught', models.IntegerField(default=0)),
('content', models.TextField()),
('learning_form', models.TextField()),
('learning_goal', models.TextField()),
],
),
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('semester_code', models.CharField(max_length=10, verbose_name='Semester')),
('average_grade', models.FloatField()),
('passed', models.IntegerField(default=0)),
('a', models.SmallIntegerField(default=0)),
('b', models.SmallIntegerField(default=0)),
('c', models.SmallIntegerField(default=0)),
('d', models.SmallIntegerField(default=0)),
('e', models.SmallIntegerField(default=0)),
('f', models.SmallIntegerField(default=0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grades.Course')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=32, verbose_name='Tag text')),
('courses', models.ManyToManyField(to='grades.Course')),
],
),
]
| apache-2.0 | 7,684,794,871,829,306,000 | 46 | 114 | 0.563143 | false |
chslion/raspy | raspytasks/rcsocket/socket.py | 1 | 8753 | # -*- coding: utf-8 -*-
import random
from raspysystem.raspycollection import RasPyCollection
from raspysystem.raspysamplelogger import RasPySampleLogger
from raspysystem.raspyenergymeter import RasPyEnergyMeter
from raspysystem.raspytask import RasPySimpleTask
from rcswitch import SwitchTypeB
class Socket(object):
AUTOMAT_UNKNOWN = "?"
# 2% noise
NOISE_FULLSCALE = 0.02
MODE_USER = 0
MODE_AUTO = 1
def __init__(
self,
location,
name,
prms,
address,
period,
maxlogs,
db_path,
db_prefix
):
self._automat = self.AUTOMAT_UNKNOWN
self._automat_msg = self.AUTOMAT_UNKNOWN
self._mode = self.MODE_USER
self._name = name
self._location = location
self._address = address
self._state_user = False
self._state_auto = False
self._last_state = False
self._state_changed = False
self._prms = prms
self._meter = RasPyEnergyMeter(
period,
db_path,
db_prefix
)
self._log = RasPySampleLogger(maxlogs)
def get_mode(self):
return self._mode
def get_state_user(self):
return self._state_user
def get_state_auto(self):
return self._state_auto
def get_state_changed(self):
return self._state_changed
def get_name(self):
return self._name
def get_location(self):
return self._location
def get_address(self):
return self._address
def get_meter(self):
return self._meter
def switch_auto(self, state, automat, msg):
self._automat = automat
self._automat_msg = msg
self._state_auto = state
def mode_auto(self):
self._mode = self.MODE_AUTO
def mode_user(self, newstate):
self._mode = self.MODE_USER
self._state_user = newstate
def get_state(self):
if self._mode == self.MODE_AUTO:
return self._state_auto
return self._state_user
def update(self, time):
# change event generation
self._state_changed = self.get_state() != self._last_state
self._last_state = self.get_state()
state = self.get_state()
if state:
noise = self._prms * self.NOISE_FULLSCALE
power = self._prms + random.uniform(-noise, +noise)
else:
power = 0
self._meter.update(time, power)
self._log.log(time.jstimestamp(), 1 if state else 0)
def serialize(self):
return dict(
name=self._name,
location=self._location,
state=self.get_state(),
automat=self._automat,
automat_msg=self._automat_msg,
mode=self._mode,
prms=self._prms,
log=self._log.serialize(),
energy=self._meter.serialize()
)
class SocketCollection(RasPyCollection):
def __init__(self, collection):
RasPyCollection.__init__(self, collection)
def find_name(self, name, invert=False):
socks = filter(lambda s: (s.get_name() == name) ^ invert, self._items)
return SocketCollection(socks)
def find_location(self, location, invert=False):
socks = filter(lambda s: (s.get_location() == location) ^ invert, self._items)
return SocketCollection(socks)
def get(self, address):
for socket in self._items:
if socket.get_address() == address:
return socket
return None
def get_index(self, address):
for index in range(len(self._items)):
socket = self._items[index]
if socket.get_address() == address:
return index
return -1
class SocketControllerTask(RasPySimpleTask):
RF_TX_PIN = 4
def __init__(self, parent):
RasPySimpleTask.__init__(self, parent, "socketctrl")
self._rcswitch = SwitchTypeB(self.RF_TX_PIN, True)
self._last_switch_count = 0
self._sockets = list()
# if user wants to switch all
self._force_all = False
def get_rcswitch(self):
return self._rcswitch
def get_sockets(self):
return SocketCollection(self._sockets)
# switch socket only if new state is not old state
# so we dont spam rf every minute
def _switch_all_sockets(self, time, force):
for socket in self._sockets:
if not force and not socket.get_state_changed():
continue
self._rcswitch.switch(
time,
socket.get_address(),
socket.get_state()
)
self._last_switch_count += 1
def _req_force_all(self, args, update):
if not update:
return self.req_statecheck(
"force_all",
self._force_all == True
)
self._force_all = True
return self.REQ_PASS
def _req_mode_user(self, args, update):
address = args["address"]
state = args["state"]
socket = self.get_sockets().get(address)
if not update:
if socket is None:
self.loge("Socket was not found: {}".format(address))
return self.REQ_FAIL
index = self.get_sockets().get_index(address)
return self.req_statecheck(
"socket{}mode".format(index),
(
socket.get_mode() == Socket.MODE_USER and
socket.get_state_user() == state
)
)
socket.mode_user(state)
return self.REQ_PASS
def _req_mode_auto(self, args, update):
address = args["address"]
socket = self.get_sockets().get(address)
if not update:
if socket is None:
self.loge("Socket was not found: {}".format(address))
return self.REQ_FAIL
index = self.get_sockets().get_index(address)
return self.req_statecheck(
"socket{}mode".format(index),
socket.get_mode() == Socket.MODE_AUTO
)
socket.mode_auto()
return self.REQ_PASS
def startup_event(self, db, cfg):
maxlogs = self.kernel().get_updates24h()
# 1) init switch
self._rcswitch.hw_init()
# 2) load from database
# create tables if not exist
db.execute(
"CREATE TABLE IF NOT EXISTS '{}' ({}, {}, {}, {})".format(
"rcsocket_sockets",
"'address' TEXT PRIMARY KEY",
"'location' TEXT",
"'name' TEXT",
"'prms' REAL"
)
)
db.execute("SELECT * FROM rcsocket_sockets")
for r in db.fetchall():
loc = str(r["location"])
name = str(r["name"])
address = str(r["address"])
# check address
if self.get_sockets().get(address) is not None:
self._logger.loge("Socket address is already taken: {}".format(name))
return False
if not self._rcswitch.is_valid_address(address):
self.loge("Socket address is invalid: {}".format(address))
return False
socket = Socket(
loc,
name,
float(r["prms"]),
address,
self.period(),
maxlogs,
self.kernel().get_databasepath(),
"socket{}".format(len(self._sockets))
)
self._sockets.append(socket)
# 3) register requests
if not self.add_requests([
["force_all", dict()],
["mode_user", dict(address="string",state="bool")],
["mode_auto", dict(address="string")]
]):
return False
return True
def run_event(self):
time = self.time()
# update state, energy and log
for socket in self._sockets:
socket.update(time)
self._last_switch_count = 0
self._force_all = False
if time.new_hour():
# force new state every hour
self._switch_all_sockets(time, True)
else:
# switch sockets if needed
self._switch_all_sockets(time, self._force_all)
return True
def report_event(self):
return dict(
sockets=[so.serialize() for so in self._sockets],
switch=dict(
count=self._rcswitch.get_txcount(),
last_count=self._last_switch_count,
code=self._rcswitch.get_txcode(),
timestamp=self._rcswitch.get_txtimestamp()
)
)
| gpl-3.0 | 6,210,325,488,518,129,000 | 27.235484 | 86 | 0.534902 | false |
haje01/mersoz | mersoz/merge.py | 1 | 1768 | import os
import re
import codecs
from optparse import OptionParser
import ConfigParser
from StringIO import StringIO
def main():
parser = OptionParser("Usage: %prog [options] cfgpath cfgsect "
"catalog-path")
(options, args) = parser.parse_args()
if len(args) < 3:
parser.print_usage()
return
cfg = ConfigParser.RawConfigParser(dict(sep=' ',
merge_charset='utf8',
merge_skip_head=0))
cfgpath = os.path.expanduser(args[0])
cfg.read(cfgpath)
cfgsect = args[1]
catalog = args[2]
path_ptrn = re.compile(cfg.get(cfgsect, 'path_ptrn'))
charset = cfg.get(cfgsect, 'merge_charset')
sep = cfg.get(cfgsect, 'seperator')
sep = '\t' if sep == '\\t' else sep
line_head = cfg.get(cfgsect, 'merge_line_head')
merge_skip_head = int(cfg.get(cfgsect, 'merge_skip_head'))
with open(catalog, 'r') as cf:
for cline in cf:
afile = cline.rstrip().split('\t')[0]
match = path_ptrn.search(afile)
if match is None:
continue
ginfo = match.groupdict()
lhead = sep.join(line_head.format(**ginfo).split(','))
buf = StringIO()
with codecs.open(afile, 'r', charset, errors='ignore') as f:
for i, line in enumerate(f.readlines()):
if i < merge_skip_head:
continue
line = line.rstrip()
if len(line) > 0:
buf.write(u'{}{}{}\n'.format(lhead, sep, line))
print buf.getvalue().rstrip().encode('utf8')
buf.close()
if __name__ == "__main__":
main()
| mit | -994,023,040,797,049,200 | 31.740741 | 72 | 0.516968 | false |
hmunfru/fiware-sdc | automatization_scripts/get_software_catalogue.py | 1 | 3580 | '''
Created on 16/04/2013
@author: henar
'''
import httplib
import sys
import os
from xml.dom.minidom import parse, parseString
from xml.dom.minidom import getDOMImplementation
from xml.etree.ElementTree import Element, SubElement, tostring
import md5
import httplib, urllib
import utils
domine = "130.206.80.119"
port = "8080"
resource = "/sdc/rest/catalog/product"
vm_fqn = 'fqn6'
vm_ip = '130.206.80.114'
product_name = 'test'
product_version = '0.1'
#vdc = 'test3'
keystone_ip = "130.206.80.63"
keystone_port = "35357"
vdc = '60b4125450fc4a109f50357894ba2e28'
user = 'henar'
password='vallelado'
project ='henarproject'
token = utils.obtainToken (keystone_ip, keystone_port, user, password, project)
print(token)
headers = {'Content-Type': 'application/xml', 'X-Auth-Token': token , 'Tenant-ID': vdc}
print(headers)
print('Get products in the software catalogue: ')
resource = "/sdc/rest/catalog/product"
data1 = utils.doRequestHttpOperation(domine, port, resource, 'GET',None,headers)
dom = parseString(data1)
try:
product = (dom.getElementsByTagName('product'))[0]
productname = product.firstChild.firstChild.nodeValue
print('First product in the software catalogue: '+productname)
except:
print ("Error in the request to get products")
sys.exit(1)
print('Get Product Details ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/"+product_name, 'GET',None, headers)
print(" OK")
print('Get Product Releases ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/"+product_name+"/release", 'GET',None, headers)
print(" OK")
print('Get Product Release Info ' + product_name + " " + product_version )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/"+product_name+"/release/"+ product_version , 'GET',None, headers)
print(" OK")
print('Get Product Attributes ' + product_name )
data1 = utils.doRequestHttpOperation(domine, port, "/sdc/rest/catalog/product/"+product_name+'/attributes', 'GET',None, headers)
print(" OK")
resource_product_instance = "/sdc/rest/vdc/"+vdc+"/productInstance"
print('Install a product in VM. Product ' + product_name )
productInstanceDto = utils.createProductInstanceDto (vm_ip,vm_fqn, product_name, product_version)
print (tostring(productInstanceDto))
task = utils.doRequestHttpOperation(domine, port, resource_product_instance, 'POST',tostring(productInstanceDto), headers)
print (task)
status = utils.processTask (domine, port, task)
print (" " + status)
resource_get_info_product_instance = "/sdc/rest/vdc/"+vdc+"/productInstance/" + vm_fqn+'_'+product_name+'_'+product_version
print('Get Product Instance Info. Product ' + product_name )
data = utils.doRequestHttpOperation(domine, port, resource_get_info_product_instance, 'GET',None)
print(data)
status = utils.processProductInstanceStatus(data)
#if status != 'INSTALLED':
# print("Status not correct" + status)
resource_delete_product_instance = "/sdc/rest/vdc/"+vdc+"/productInstance/" + vm_fqn+'_'+product_name+'_'+product_version
print('Get Delete Product Instance ' + product_name )
task = utils.doRequestHttpOperation(domine, port, resource_delete_product_instance, 'DELETE',None)
status = utils.processTask (domine, port, task)
print(" OK")
data = utils.doRequestHttpOperation(domine, port, resource_delete_product_instance, 'GET',None)
statusProduct = utils.processProductInstanceStatus(data)
#if status != 'UNINSTALLED':
# print("Status not correct" + statusProduct)
| apache-2.0 | 7,094,310,390,625,184,000 | 30.130435 | 144 | 0.731006 | false |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_tcl.py | 1 | 5625 | #!/usr/bin/env python
import unittest
import os
from test import test_support
from Tkinter import Tcl
from _tkinter import TclError
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def testEvalException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def testEvalFile(self):
tcl = self.interp
filename = "testEvalFile.tcl"
fd = open(filename,'w')
script = """set a 1
set b 2
set c [ expr $a + $b ]
"""
fd.write(script)
fd.close()
tcl.evalfile(filename)
os.remove(filename)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception,e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
def testLoadTk(self):
import os
if 'DISPLAY' not in os.environ:
# skipping test of clean upgradeability
return
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
import os
old_display = None
import sys
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
return # no failure possible on windows?
if 'DISPLAY' in os.environ:
old_display = os.environ['DISPLAY']
del os.environ['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
display = os.popen('echo $DISPLAY').read().strip()
if display:
return
try:
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
finally:
if old_display is not None:
os.environ['DISPLAY'] = old_display
def testLoadWithUNC(self):
import sys
if sys.platform != 'win32':
return
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
return
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
with test_support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
f = os.popen('%s -c "import Tkinter; print Tkinter"' % (unc_name,))
self.assert_('Tkinter.py' in f.read())
# exit code must be zero
self.assertEqual(f.close(), None)
def test_main():
test_support.run_unittest(TclTest)
if __name__ == "__main__":
test_main()
| mit | 7,405,655,839,311,175,000 | 28.906593 | 79 | 0.546667 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/misc/tight_bbox_test.py | 1 | 1357 | """
===============
Tight Bbox Test
===============
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
ax = plt.axes([0.1, 0.3, 0.5, 0.5])
ax.pcolormesh(np.array([[1, 2], [3, 4]]))
plt.yticks([0.5, 1.5], ["long long tick label",
"tick label"])
plt.ylabel("My y-label")
plt.title("Check saved figures for their bboxes")
for ext in ["png", "pdf", "svg", "svgz", "eps"]:
print("saving tight_bbox_test.%s" % (ext,))
plt.savefig("tight_bbox_test.%s" % (ext,), bbox_inches="tight")
pltshow(plt)
| mit | -3,163,077,550,626,046,000 | 24.12963 | 82 | 0.560796 | false |
harshavardhana/minio-py | setup.py | 1 | 2454 | # Minio Python Library for Amazon S3 Compatible Cloud Storage, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from codecs import open
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
version = ''
with open('minio/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
packages = [
'minio',
]
requires = [
'urllib3',
'pytz',
'certifi<=2015.4.28',
]
tests_requires = [
'nose',
'mock',
'fake-factory',
]
setup(
name='minio',
description='Minio Python Library for Amazon S3 Compatible Cloud Storage for Python',
author='Minio, Inc.',
url='https://github.com/minio/minio-py',
download_url='https://github.com/minio/minio-py',
author_email='[email protected]',
version=version,
package_dir={'minio': 'minio'},
packages=packages,
install_requires=requires,
tests_require=tests_requires,
setup_requires=['nose>=1.0'],
license='Apache License 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
],
long_description=readme,
package_data={'': ['LICENSE', 'README.rst']},
include_package_data=True,
)
| apache-2.0 | 3,117,691,991,688,107,500 | 28.214286 | 89 | 0.639364 | false |
nicolacimmino/LoP-RAN | LoPAccessPoint/MacroIP.py | 1 | 2793 | # MacroIP is part of MacroIP Core. Provides Access to IP services through simple
# textual macros.
# Copyright (C) 2014 Nicola Cimmino
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# This service expects a LoPNode connected on serial port ttyUSB0 and set
# to access point mode already (ATAP1). In due time autodiscovery and
# configuration will be built.
import MacroIP_DHCP
import MacroIP_UDP
import MacroIP_ICMP
import MacroIP_STUN
import MacroIP_HTTP
import MacroIP_MSGP2P
outputMacrosQueue = []
# Processes one macro
def processMacroIPMacro(clientid, macro):
if macro.startswith("dhcp."):
MacroIP_DHCP.processMacro(clientid, macro)
if macro.startswith("udp."):
MacroIP_UDP.processMacro(clientid, macro)
if macro.startswith("stun."):
MacroIP_STUN.processMacro(clientid, macro)
if macro.startswith("icmp."):
MacroIP_ICMP.processMacro(clientid, macro)
if macro.startswith("http."):
MacroIP_HTTP.processMacro(clientid, macro)
if macro.startswith("msgp2p."):
MacroIP_MSGP2P.processMacro(clientid, macro)
# Fetches a macro to be sent to a client of the
# host application.
def getOutputMacroIPMacro():
(clientid, macro) = MacroIP_DHCP.getOutputMacroIPMacro()
if(clientid != None):
outputMacrosQueue.append((clientid, macro))
(clientid, macro) = MacroIP_UDP.getOutputMacroIPMacro()
if(clientid != None):
outputMacrosQueue.append((clientid, macro))
(clientid, macro) = MacroIP_ICMP.getOutputMacroIPMacro()
if(clientid != None):
outputMacrosQueue.append((clientid, macro))
(clientid, macro) = MacroIP_STUN.getOutputMacroIPMacro()
if(clientid != None):
outputMacrosQueue.append((clientid, macro))
(clientid, macro) = MacroIP_HTTP.getOutputMacroIPMacro()
if(clientid != None):
outputMacrosQueue.append((clientid, macro))
(clientid, macro) = MacroIP_MSGP2P.getOutputMacroIPMacro()
if(clientid != None):
outputMacrosQueue.append((clientid, macro))
if len(outputMacrosQueue) > 0:
return outputMacrosQueue.pop(0)
else:
return (None, None)
def startActivity():
MacroIP_UDP.startActivity()
MacroIP_ICMP.startActivity()
| gpl-3.0 | -7,017,552,991,264,904,000 | 32.662651 | 80 | 0.723595 | false |
MicroPyramid/Django-CRM | teams/api_views.py | 1 | 8866 | from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from teams import swagger_params
from teams.models import Teams
from teams.tasks import update_team_users, remove_users
from teams.serializer import TeamsSerializer, TeamCreateSerializer
from common.models import User
from common.custom_auth import JSONWebTokenAuthentication
from common.serializer import UserSerializer
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from drf_yasg.utils import swagger_auto_schema
import json
class TeamsListView(APIView, LimitOffsetPagination):
model = Teams
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_context_data(self, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
queryset = self.model.objects.all()
request_post = params
if request_post:
if request_post.get("team_name"):
queryset = queryset.filter(
name__icontains=request_post.get("team_name")
)
if request_post.get("created_by"):
queryset = queryset.filter(created_by=request_post.get("created_by"))
if request_post.get("assigned_users"):
queryset = queryset.filter(
users__id__in=json.loads(request_post.get("assigned_users"))
)
context = {}
search = False
if (
params.get("team_name")
or params.get("created_by")
or params.get("assigned_users")
):
search = True
context["search"] = search
results_teams = self.paginate_queryset(
queryset.distinct(), self.request, view=self
)
teams = TeamsSerializer(results_teams, many=True).data
context["per_page"] = 10
context.update(
{
"teams_count": self.count,
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"page_number": int(self.offset / 10) + 1,
}
)
context["teams"] = teams
users = User.objects.filter(
is_active=True,
).order_by("id")
context["users"] = UserSerializer(users, many=True).data
return context
@swagger_auto_schema(
tags=["Teams"], manual_parameters=swagger_params.teams_list_get_params
)
def get(self, request, *args, **kwargs):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
},
status=status.HTTP_403_FORBIDDEN,
)
context = self.get_context_data(**kwargs)
return Response(context)
@swagger_auto_schema(
tags=["Teams"], manual_parameters=swagger_params.teams_create_post_params
)
def post(self, request, *args, **kwargs):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
},
status=status.HTTP_403_FORBIDDEN,
)
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
serializer = TeamCreateSerializer(data=params, request_obj=request)
data = {}
if serializer.is_valid():
team_obj = serializer.save(created_by=request.user)
if params.get("assign_users"):
assinged_to_users_ids = json.loads(params.get("assign_users"))
for user_id in assinged_to_users_ids:
user = User.objects.filter(id=user_id)
if user.exists():
team_obj.users.add(user_id)
else:
team_obj.delete()
data["users"] = "Please enter valid user"
return Response(
{"error": True, "errors": data},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
{"error": False, "message": "Team Created Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class TeamsDetailView(APIView):
model = Teams
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
return self.model.objects.get(pk=pk)
@swagger_auto_schema(
tags=["Teams"],
)
def get(self, request, pk, **kwargs):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
},
status=status.HTTP_403_FORBIDDEN,
)
self.team_obj = self.get_object(pk)
context = {}
context["team"] = TeamsSerializer(self.team_obj).data
context["users"] = UserSerializer(
User.objects.filter(is_active=True).order_by("email"),
many=True,
).data
return Response(context)
@swagger_auto_schema(
tags=["Teams"], manual_parameters=swagger_params.teams_create_post_params
)
def put(self, request, pk, *args, **kwargs):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
},
status=status.HTTP_403_FORBIDDEN,
)
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
self.team = self.get_object(pk)
actual_users = self.team.get_users()
removed_users = []
serializer = TeamCreateSerializer(
data=params, instance=self.team, request_obj=request
)
data = {}
if serializer.is_valid():
team_obj = serializer.save()
team_obj.users.clear()
if params.get("assign_users"):
assinged_to_users_ids = json.loads(params.get("assign_users"))
for user_id in assinged_to_users_ids:
user = User.objects.filter(id=user_id)
if user.exists():
team_obj.users.add(user_id)
else:
data["users"] = "Please enter valid user"
return Response(
{"error": True, "errors": data},
status=status.HTTP_400_BAD_REQUEST,
)
update_team_users.delay(pk)
latest_users = team_obj.get_users()
for user in actual_users:
if user in latest_users:
pass
else:
removed_users.append(user)
remove_users.delay(removed_users, pk)
return Response(
{"error": False, "message": "Team Updated Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
tags=["Teams"],
)
def delete(self, request, pk, **kwargs):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
},
status=status.HTTP_403_FORBIDDEN,
)
self.team_obj = self.get_object(pk)
self.team_obj.delete()
return Response(
{"error": False, "message": "Team Deleted Successfully"},
status=status.HTTP_200_OK,
)
| mit | -466,532,188,539,851,400 | 35.636364 | 85 | 0.538236 | false |
CaliOpen/CaliOpen | src/backend/interfaces/REST/py.server/caliopen_api/message/message.py | 1 | 4304 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
from cornice.resource import resource, view
from pyramid.response import Response
from caliopen_main.message.objects.message import Message as ObjectMessage
from caliopen_main.message.core import RawMessage
from caliopen_storage.exception import NotFound
from ..base import Api
from ..base.exception import (ResourceNotFound,
MergePatchError)
from pyramid.httpexceptions import HTTPServerError, HTTPMovedPermanently
from caliopen_pi.features import marshal_features
log = logging.getLogger(__name__)
@resource(collection_path='/messages',
path='/messages/{message_id}')
class Message(Api):
def __init__(self, request):
self.request = request
self.user = request.authenticated_userid
@view(renderer='json', permission='authenticated')
def collection_post(self):
data = self.request.json
if 'privacy_features' in data:
features = marshal_features(data['privacy_features'])
data['privacy_features'] = features
# ^ json payload should have been validated by swagger module
try:
message = ObjectMessage.create_draft(user=self.user, **data)
except Exception as exc:
log.exception(exc)
raise MergePatchError(error=exc)
message_url = self.request.route_path('message',
message_id=str(
message.message_id))
message_url = message_url.replace("/v1/", "/v2/")
self.request.response.location = message_url.encode('utf-8')
return {'location': message_url}
@view(renderer='json', permission='authenticated')
def patch(self):
"""Update a message with payload.
method follows the rfc5789 PATCH and rfc7396 Merge patch specifications,
+ 'current_state' caliopen's specs.
stored messages are modified according to the fields within the payload,
ie payload fields squash existing db fields, no other modification done.
If message doesn't existing, response is 404.
If payload fields are not conform to the message db schema, response is
422 (Unprocessable Entity).
Successful response is 204, without a body.
"""
message_id = self.request.swagger_data["message_id"]
patch = self.request.json
if 'privacy_features' in patch:
features = marshal_features(patch['privacy_features'])
patch['privacy_features'] = features
if 'privacy_features' in patch.get('current_state', {}):
current = patch['current_state']['privacy_features']
features = marshal_features(current)
patch['current_state']['privacy_features'] = features
message = ObjectMessage(user=self.user, message_id=message_id)
try:
message.patch_draft(self.user, patch, db=True, index=True,
with_validation=True)
except Exception as exc:
raise MergePatchError(exc)
return Response(None, 204)
@view(renderer='json', permission='authenticated')
def delete(self):
message_id = self.request.swagger_data["message_id"]
message = ObjectMessage(user=self.user, message_id=message_id)
try:
message.get_db()
message.get_index()
except NotFound:
raise ResourceNotFound
try:
message.delete_db()
message.delete_index()
except Exception as exc:
raise HTTPServerError(exc)
return Response(None, 204)
@resource(path='/raws/{raw_msg_id}')
class Raw(Api):
"""returns a raw message"""
def __init__(self, request):
self.request = request
self.user = request.authenticated_userid
@view(renderer='text_plain', permission='authenticated')
def get(self):
# XXX how to check privacy_index ?
raw_msg_id = self.request.matchdict.get('raw_msg_id')
raw = RawMessage.get_for_user(self.user.user_id, raw_msg_id)
if raw:
return raw.raw_data
raise ResourceNotFound('No such message')
| gpl-3.0 | -7,047,381,982,959,401,000 | 35.474576 | 80 | 0.62895 | false |
cloud-ark/cloudark | server/gcloud_handler.py | 1 | 4809 | import ast
from os.path import expanduser
from stevedore import extension
from common import common_functions
from common import fm_logger
from dbmodule.objects import app as app_db
from dbmodule.objects import environment as env_db
from server.server_plugins.gcloud import gcloud_helper
home_dir = expanduser("~")
APP_AND_ENV_STORE_PATH = ("{home_dir}/.cld/data/deployments/").format(home_dir=home_dir)
fmlogger = fm_logger.Logging()
class GCloudHandler(object):
res_mgr = extension.ExtensionManager(
namespace='server.server_plugins.gcloud.resource',
invoke_on_load=True,
)
coe_mgr = extension.ExtensionManager(
namespace='server.server_plugins.gcloud.coe',
invoke_on_load=True,
)
app_mgr = extension.ExtensionManager(
namespace='server.server_plugins.gcloud.app',
invoke_on_load=True,
)
gcloudhelper = gcloud_helper.GCloudHelper()
def create_resources(self, env_id, resource_list):
fmlogger.debug("GCloudHandler create_resources")
resource_details = ''
ret_status_list = []
for resource_defs in resource_list:
resource_details = resource_defs['resource']
type = resource_details['type']
env_db.Environment().update(env_id, {'status': 'creating_' + type})
for name, ext in GCloudHandler.res_mgr.items():
if name == type:
status = ext.obj.create(env_id, resource_details)
if status: ret_status_list.append(status)
return ret_status_list
def delete_resource(self, env_id, resource):
fmlogger.debug("GCloudHandler delete_resource")
type = resource.type
env_db.Environment().update(env_id, {'status': 'deleting_' + type})
for name, ext in GCloudHandler.res_mgr.items():
if name == type:
ext.obj.delete(resource)
def run_command(self, env_id, env_name, resource, command_string):
fmlogger.debug("GCloudHandler run_command")
type = resource.type
command_type = GCloudHandler.gcloudhelper.resource_type_for_command(command_string)
command_output_all = []
for name, ext in GCloudHandler.res_mgr.items():
if name == type:
if name == command_type or command_string == 'help':
command_output = ext.obj.run_command(env_id, env_name, resource, command_string)
command_output_all.extend(command_output)
coe_type = common_functions.get_coe_type(env_id)
for name, ext in GCloudHandler.coe_mgr.items():
if name == coe_type:
if name == command_type or command_string == 'help':
command_output = ext.obj.run_command(env_id, env_name, resource, command_string)
command_output_all.extend(command_output)
return command_output_all
def create_cluster(self, env_id, env_info):
coe_type = common_functions.get_coe_type(env_id)
for name, ext in GCloudHandler.coe_mgr.items():
if name == coe_type:
status = ext.obj.create_cluster(env_id, env_info)
return status
def delete_cluster(self, env_id, env_info, resource):
coe_type = common_functions.get_coe_type(env_id)
for name, ext in GCloudHandler.coe_mgr.items():
if name == coe_type:
ext.obj.delete_cluster(env_id, env_info, resource)
def create_container(self, cont_name, cont_info):
repo_type = cont_info['dep_target']
for name, ext in GCloudHandler.res_mgr.items():
if name == repo_type:
ext.obj.create(cont_name, cont_info)
def delete_container(self, cont_name, cont_info):
repo_type = cont_info['dep_target']
for name, ext in GCloudHandler.res_mgr.items():
if name == repo_type:
ext.obj.delete(cont_name, cont_info)
# App functions
def deploy_application(self, app_id, app_info):
app_type = common_functions.get_app_type(app_id)
for name, ext in GCloudHandler.app_mgr.items():
if name == app_type:
ext.obj.deploy_application(app_id, app_info)
def delete_application(self, app_id, app_info):
app_type = common_functions.get_app_type(app_id)
for name, ext in GCloudHandler.app_mgr.items():
if name == app_type:
ext.obj.delete_application(app_id, app_info)
def get_logs(self, app_id, app_info):
log_lines = ''
app_type = common_functions.get_app_type(app_id)
for name, ext in GCloudHandler.app_mgr.items():
if name == app_type:
log_lines = ext.obj.get_logs(app_id, app_info)
return log_lines | apache-2.0 | -7,426,348,535,844,958,000 | 36.578125 | 100 | 0.614681 | false |
kiruto/Weixin-Article-Spider | storage/sqlite_storage.py | 1 | 6856 | # -*- coding: utf-8 -*-
import hashlib
import json
import re
import sqlite3
import time
import datetime
import common
from storage import db
version = '1.0'
class SQLiteStorage:
def __init__(self):
self._connect = sqlite3.connect(db)
self._connect.text_factory = str
self._create_table()
def subscribe(self, wxid):
c = self._connect.cursor()
c.execute("INSERT INTO wxid(name) VALUES (?)", [wxid])
self._connect.commit()
c.close()
def unsubscribe(self, wxid):
c = self._connect.cursor()
c.execute("DELETE FROM wxid WHERE name=?", [wxid])
self._connect.commit()
c.close()
def batch_subscribe(self, id_list):
c = self._connect.cursor()
data = []
for i in id_list:
i = i.strip()
if len(i) == 0:
continue
if not common.is_wxid(i):
continue
p = (i, )
data.append(p)
try:
c.executemany("INSERT OR REPLACE INTO wxid(name) VALUES (?)", data)
self._connect.commit()
except Exception as e:
print(e)
common.save_raw_error_log(exception=e)
c.close()
def edit_extra(self, wxid, extra_dict):
"""
:param wxid: string 微信id
:type extra_dict: dict
name: 公众号名称
wechatid: 公众号id
jieshao: 介绍
renzhen: 认证,为空表示未认证
qrcode: 二维码
img: 头像图片
url: 最近文章地址
"""
if not wxid or not extra_dict:
return
if isinstance(extra_dict, dict):
extra_dict['version'] = version
extra = json.dumps(extra_dict)
c = self._connect.cursor()
c.execute("UPDATE wxid SET extra=? WHERE name=?", [extra, wxid])
self._connect.commit()
c.close()
def get_wxid_list(self):
c = self._connect.cursor()
result = c.execute("SELECT * FROM wxid").fetchall()
c.close()
result_list = list()
for r in result:
result_list.append(WXIDRecord(r))
result_list.reverse()
return result_list
def insert_article(self, article, local_url, author_name=''):
c = self._connect.cursor()
m = hashlib.md5()
m.update(article['title'])
hash_id = m.hexdigest()
date_time = time.localtime(int(article['datetime']))
date_time = time.strftime("%Y-%m-%d", date_time)
extra = json.dumps(article)
data = (hash_id, date_time, article['title'], "", extra, local_url, version, author_name)
c.execute("""INSERT INTO article(hash_id, date_time, title, info, extra, content, version, author)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""", data)
self._connect.commit()
c.close()
def get_article(self, hash_id):
c = self._connect.cursor()
result = c.execute("SELECT * FROM article WHERE hash_id=?", [hash_id]).fetchone()
c.close()
if not result:
return None
else:
return ArticleRecord(result)
def get_articles_by_date_created(self, date):
c = self._connect.cursor()
result = c.execute("SELECT * FROM article"
" WHERE created_at BETWEEN date(?) AND date(?, '+1 day')", [date, date]).fetchall()
articles = list()
for r in result:
articles.append(ArticleRecord(r))
c.close()
return articles
def get_articles_by_date_written(self, date):
c = self._connect.cursor()
result = c.execute("SELECT * FROM article WHERE date_time=?", [date]).fetchall()
articles = list()
for r in result:
articles.append(ArticleRecord(r))
c.close()
return articles
def get_articles_by_author(self, author):
c = self._connect.cursor()
result = c.execute("SELECT * FROM article WHERE author=?", [author]).fetchall()
articles = list()
for r in result:
articles.append(ArticleRecord(r))
c.close()
return articles
def get_date_by_created(self):
d = datetime.datetime.now()
offset = datetime.timedelta(days=7)
day = d - offset
date_from = datetime.datetime(day.year, day.month, day.day, 0, 0, 0)
date = str(date_from)
c = self._connect.cursor()
result = c.execute("SELECT strftime('%Y-%m-%d', created_at) FROM article"
" WHERE datetime(created_at)>=datetime(?)"
" GROUP BY strftime('%Y-%m-%d', created_at)", [date]).fetchall()
return result
def get_date_by_written(self):
d = datetime.datetime.now()
offset = datetime.timedelta(days=7)
day = d - offset
date_from = datetime.datetime(day.year, day.month, day.day, 0, 0, 0)
date = str(date_from)
c = self._connect.cursor()
result = c.execute("SELECT strftime('%Y-%m-%d', date_time) FROM article"
" WHERE datetime(date_time)>=datetime(?)"
" GROUP BY strftime('%Y-%m-%d', date_time)", [date]).fetchall()
return result
def close(self):
self._connect.close()
def _create_table(self):
c = self._connect.cursor()
create_table_article = """CREATE TABLE IF NOT EXISTS article (
hash_id text PRIMARY KEY,
date_time text,
created_at text NOT NULL DEFAULT (datetime('now', 'localtime')),
title text,
info text,
extra text,
content text,
version text,
author text)"""
create_table_wxid = "CREATE TABLE IF NOT EXISTS wxid (name text PRIMARY KEY, extra text)"
c.execute(create_table_article)
c.execute(create_table_wxid)
self._connect.commit()
c.close()
class WXIDRecord(dict):
def __init__(self, row, **kwargs):
super(WXIDRecord, self).__init__(name=row[0], extra=row[1], **kwargs)
if self['extra']:
try:
self['extra'] = json.loads(self['extra'])
except Exception as e:
print(e)
class ArticleRecord(dict):
def __init__(self, row, **kwargs):
"""
:param row: 从SELECT * FROM articles中出来的原始结果
:param kwargs:
"""
super(ArticleRecord, self).__init__(
hash_id=row[0],
date_time=row[1],
created_at=row[2],
title=row[3],
info=row[4],
extra=row[5],
content=row[6],
version=row[7],
author=row[8],
**kwargs)
self['extra'] = json.loads(self['extra'])
| gpl-3.0 | -1,672,044,169,058,816,300 | 30.774648 | 110 | 0.531324 | false |
havencruise/django-utils | templatetags/fieldset_form.py | 1 | 1863 | from django import template
register = template.Library()
@register.filter('get_form_field')
def get_form_field(form, field):
return form[field]
@register.inclusion_tag('form_as_fieldset.html')
def form_as_fieldset_fields(form, fieldsets=None):
"""
Render the form as a fieldset form.
Example usage in template with 'myform' and 'myfieldsets as context attributes:
{% form_as_fieldset_fields myform myfieldsets %}
Sample fieldset:
MY_FIELDSETS = (
(
'info',
('first_name', 'middle_name', 'last_name', 'is_published')
),
(
'image',
('profile_image', 'avatar_image', 'profile_image_crop')
),
(
'profile',
('title', 'location', 'profile_full', 'profile_brief',
'website_url', 'average_artwork_cost', 'born_year',
'deceased_year')
),
(
'focus area',
('styles', 'mediums')
)
)
"""
if not fieldsets:
fieldsets = (
(
'',
tuple(form.fields.keys()),
),
)
return {'form': form, 'fieldsets' : fieldsets}
@register.filter('field_type')
def field_type(field):
"""
Get the name of the field class.
"""
if hasattr(field, 'field'):
field = field.field
s = (type(field.widget).__name__).replace('Input', '').lower()
return s
@register.filter('strongify')
def strongify(name):
"""
Takes a string and returns formatted strong version as in the example:
Input: "My name is"
Output: "My <strong> name is </strong>"
"""
names = name.split(' ')
if names[1:]:
strong_string = "<strong>" + " ".join(names[1:]) + "</strong>"
return names[0] +" " + strong_string
else:
return name | mit | -6,858,266,544,679,391,000 | 24.534247 | 83 | 0.531938 | false |
codycollier/booster | test/test_appserver_create_webdav.py | 1 | 5138 | #!/usr/bin/env python
import time
import unittest
import boostertest
class TestAppserverCreateWebdav(boostertest.BoosterTestCase):
""" Test the appserver-create-webdav action """
def setUp(self):
""" Set the action and other commonly used fixture data """
self.params = {}
self.params['action'] = "appserver-create-webdav"
self.params['appserver-name'] = "some-web-app"
self.params['group-name'] = "Default"
self.params['database-name'] = "Documents"
self.params['root'] = "/Docs"
self.params['port'] = "8801"
# collect app server names for later teardown
self.teardown_appservers = []
def tearDown(self):
""" Remove items from server created during tests """
params = {}
params['action'] = "appserver-delete"
params['group-name'] = "Default"
for appserver in self.teardown_appservers:
params['appserver-name'] = appserver
response, body = self.booster.request(params)
self.assertTrue(response.status in (404, 200))
time.sleep(3)
def test_basic_webdav_appserver_creation_results_in_201(self):
""" A successful webdav appserver creation should result in a 201 """
params = self.params
params['appserver-name'] = "webdav-loops"
self.teardown_appservers.append("webdav-loops")
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 201)
self.assertEqual(err, "none")
time.sleep(3)
def test_create_webdav_appserver_with_existing_name_results_in_409(self):
""" Attempting to create a pre-existing webdav appserver should result in 409 """
params = self.params
params['appserver-name'] = "grape-nuts"
self.teardown_appservers.append("grape-nuts")
# create the appserver
response, body = self.booster.request(params)
self.assertEqual(response.status, 201)
time.sleep(3)
# second create should result in 409
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 409)
self.assertTrue(err.find("already exists") != -1)
def test_create_webdav_appserver_in_nonexistent_group_results_in_500(self):
""" An appserver-create-webdav should fail with 500 if group does not exist """
params = self.params
params['appserver-name'] = "webdav-crunch"
params['group-name'] = "there-is-no-such-group"
self.teardown_appservers.append("webdav-crunch")
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 500)
self.assertTrue(err.find("Error running action 'appserver-create-webdav'. Error: No such group") > -1)
def test_create_webdav_appserver_with_invalid_name_results_in_500(self):
""" An appserver-create-webdav with invalid appserver-name should be rejected by api and result in 500 """
badnames = ("%%zxcggg", "$fbbhhjh$")
for badname in badnames:
params = self.params
params['appserver-name'] = badname
# create should result in 500
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 500)
self.assertTrue(err.find("Error running action 'appserver-create-webdav'") != -1)
self.assertTrue(err.find("Error: Invalid lexical value") != -1)
def test_create_webdav_appserver_with_missing_required_parameter_results_in_400(self):
""" A missing but required parameters should result in 400 """
required_parameters = ("appserver-name", "group-name", "database-name",
"root", "port")
for rp in required_parameters:
params = self.params.copy()
del params[rp]
response, body = self.booster.request(params)
err = response.get("x-booster-error", "")
self.assertEqual(response.status, 400)
self.assertTrue(err.find("valid set of arguments was not provided") != 1)
def test_create_webdav_appserver_with_empty_required_parameter_results_in_500(self):
""" An empty but required parameters should result in 500 """
required_parameters = ("appserver-name", "group-name", "database-name",
"root", "port")
for rp in required_parameters:
params = self.params.copy()
params[rp] = ""
# create should result in 500
response, body = self.booster.request(params)
err = response.get("x-booster-error", "none")
self.assertEqual(response.status, 500)
self.assertTrue(err.find("Error running action 'appserver-create-webdav'") != -1)
self.assertTrue(err.find("Error: ") != -1)
if __name__=="__main__":
unittest.main()
| apache-2.0 | -315,975,134,436,366,100 | 43.293103 | 114 | 0.619502 | false |
CroissanceCommune/autonomie | autonomie/views/tests.py | 1 | 1588 | # -*- coding: utf-8 -*-
# * Copyright (C) 2012-2013 Croissance Commune
# * Authors:
# * Arezki Feth <[email protected]>;
# * Miotte Julien <[email protected]>;
# * Pettier Gabriel;
# * TJEBBES Gaston <[email protected]>
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
#
"""
View for testing js scripts with qunit
It's not really automated, but it's better than nuts
"""
from autonomie.resources import test_js
def testjs(request):
"""
Only the template is interesting in this view
"""
test_js.need()
return dict(title=u"Page de test pour les composantes javascript")
def includeme(config):
"""
Adding route and view for js tests usefull to test browser problems
"""
config.add_route("testjs", "/testjs")
config.add_view(
testjs,
route_name='testjs',
permission="admin",
renderer='/tests/base.mako',
)
| gpl-3.0 | -6,851,757,328,240,035,000 | 30.76 | 75 | 0.665617 | false |
ROB-Seismology/oq-hazardlib | openquake/hazardlib/tests/gsim/abrahamson_silva_2008_test.py | 1 | 1690 | # The Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.abrahamson_silva_2008 import AbrahamsonSilva2008
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Test data have been generated from Fortran implementation
# of Dave Boore available at:
# http://www.daveboore.com/software_online.html
# Note that the Fortran implementation has been modified not
# to compute the 'Constant Displacement Model' term
class AbrahamsonSilva2008TestCase(BaseGSIMTestCase):
GSIM_CLASS = AbrahamsonSilva2008
def test_mean(self):
self.check('AS08/AS08_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_inter(self):
self.check('AS08/AS08_STD_INTER.csv',
max_discrep_percentage=0.1)
def test_std_intra(self):
self.check('AS08/AS08_STD_INTRA.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('AS08/AS08_STD_TOTAL.csv',
max_discrep_percentage=0.1)
| agpl-3.0 | -2,014,857,389,687,193,000 | 37.409091 | 78 | 0.721302 | false |
rickerc/cinder_audit | cinder/tests/keymgr/mock_key_mgr.py | 1 | 4301 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A mock implementation of a key manager that stores keys in a dictionary.
This key manager implementation is primarily intended for testing. In
particular, it does not store keys persistently. Lack of a centralized key
store also makes this implementation unsuitable for use among different
services.
Note: Instantiating this class multiple times will create separate key stores.
Keys created in one instance will not be accessible from other instances of
this class.
"""
import array
from cinder import exception
from cinder.keymgr import key
from cinder.keymgr import key_mgr
from cinder.openstack.common import uuidutils
from cinder import utils
class MockKeyManager(key_mgr.KeyManager):
"""
This mock key manager implementation supports all the methods specified
by the key manager interface. This implementation stores keys within a
dictionary, and as a result, it is not acceptable for use across different
services. Side effects (e.g., raising exceptions) for each method are
handled as specified by the key manager interface.
This key manager is not suitable for use in production deployments.
"""
def __init__(self):
self.keys = {}
def _generate_hex_key(self, **kwargs):
key_length = kwargs.get('key_length', 256)
# hex digit => 4 bits
hex_encoded = utils.generate_password(length=key_length / 4,
symbolgroups='0123456789ABCDEF')
return hex_encoded
def _generate_key(self, **kwargs):
_hex = self._generate_hex_key(**kwargs)
return key.SymmetricKey('AES',
array.array('B', _hex.decode('hex')).tolist())
def create_key(self, ctxt, **kwargs):
"""Creates a key.
This implementation returns a UUID for the created key. A
NotAuthorized exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.NotAuthorized()
key = self._generate_key(**kwargs)
return self.store_key(ctxt, key)
def _generate_key_id(self):
key_id = uuidutils.generate_uuid()
while key_id in self.keys:
key_id = uuidutils.generate_uuid()
return key_id
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if ctxt is None:
raise exception.NotAuthorized()
key_id = self._generate_key_id()
self.keys[key_id] = key
return key_id
def copy_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
copied_key_id = self._generate_key_id()
self.keys[copied_key_id] = self.keys[key_id]
return copied_key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A NotAuthorized exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
return self.keys[key_id]
def delete_key(self, ctxt, key_id, **kwargs):
"""Deletes the key identified by the specified id.
A NotAuthorized exception is raised if the context is None and a
KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
del self.keys[key_id]
| apache-2.0 | 2,807,719,877,161,916,400 | 33.685484 | 78 | 0.662404 | false |
lingtools/lingtools | lingtools/util/datamanager.py | 1 | 1638 | """
Functions for managing data.
"""
# Copyright 2011-2013 Constantine Lignos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import posixpath
import zipfile
def download(url, path=None):
"""Download a url, save under the same filename or the specified path, and return the path."""
print "Downloading %s..." % url
try:
url_file = urllib2.urlopen(url)
except urllib2.HTTPError:
raise IOError("Couldn't open URL %s." % repr(url))
# Use the provided path, or default to the basename
filename = path if path else posixpath.basename(url)
try:
local_file = open(filename, 'wb')
local_file.write(url_file.read())
local_file.close()
except IOError:
raise IOError("Couldn't write filename %s." % repr(filename))
return filename
def unzip(filepath, destpath='.'):
"""Unzip a file."""
print "Unzipping %s..." % repr(filepath)
try:
zfile = zipfile.ZipFile(filepath, 'r')
except (IOError, zipfile.BadZipfile):
raise IOError("The zip file %s could not be opened." % repr(filepath))
zfile.extractall(destpath)
| apache-2.0 | 6,173,010,345,642,779,000 | 30.5 | 98 | 0.686813 | false |
titeuf87/evennia | evennia/server/sessionhandler.py | 1 | 26285 | """
This module defines handlers for storing sessions when handles
sessions of users connecting to the server.
There are two similar but separate stores of sessions:
- ServerSessionHandler - this stores generic game sessions
for the game. These sessions has no knowledge about
how they are connected to the world.
- PortalSessionHandler - this stores sessions created by
twisted protocols. These are dumb connectors that
handle network communication but holds no game info.
"""
from builtins import object
from future.utils import listvalues
from time import time
from django.conf import settings
from evennia.commands.cmdhandler import CMD_LOGINSTART
from evennia.utils.logger import log_trace
from evennia.utils.utils import (variable_from_module, is_iter,
to_str, to_unicode,
make_iter,
callables_from_module)
from evennia.utils.inlinefuncs import parse_inlinefunc
try:
import cPickle as pickle
except ImportError:
import pickle
_INLINEFUNC_ENABLED = settings.INLINEFUNC_ENABLED
# delayed imports
_PlayerDB = None
_ServerSession = None
_ServerConfig = None
_ScriptDB = None
_OOB_HANDLER = None
class DummySession(object):
sessid = 0
DUMMYSESSION = DummySession()
# AMP signals
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
SCONN = chr(11) # server portal connection (for bots)
PCONNSYNC = chr(12) # portal post-syncing session
PDISCONNALL = chr(13) # portal session discnnect all
# i18n
from django.utils.translation import ugettext as _
_SERVERNAME = settings.SERVERNAME
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_IDLE_TIMEOUT = settings.IDLE_TIMEOUT
_MAX_SERVER_COMMANDS_PER_SECOND = 100.0
_MAX_SESSION_COMMANDS_PER_SECOND = 5.0
_MODEL_MAP = None
# input handlers
_INPUT_FUNCS = {}
for modname in make_iter(settings.INPUT_FUNC_MODULES):
_INPUT_FUNCS.update(callables_from_module(modname))
def delayed_import():
"""
Helper method for delayed import of all needed entities.
"""
global _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB
if not _ServerSession:
# we allow optional arbitrary serversession class for overloading
modulename, classname = settings.SERVER_SESSION_CLASS.rsplit(".", 1)
_ServerSession = variable_from_module(modulename, classname)
if not _PlayerDB:
from evennia.players.models import PlayerDB as _PlayerDB
if not _ServerConfig:
from evennia.server.models import ServerConfig as _ServerConfig
if not _ScriptDB:
from evennia.scripts.models import ScriptDB as _ScriptDB
# including once to avoid warnings in Python syntax checkers
_ServerSession, _PlayerDB, _ServerConfig, _ScriptDB
#-----------------------------------------------------------
# SessionHandler base class
#------------------------------------------------------------
class SessionHandler(dict):
"""
This handler holds a stack of sessions.
"""
def get_sessions(self, include_unloggedin=False):
"""
Returns the connected session objects.
Args:
include_unloggedin (bool, optional): Also list Sessions
that have not yet authenticated.
Returns:
sessions (list): A list of `Session` objects.
"""
if include_unloggedin:
return listvalues(self)
else:
return [session for session in self.values() if session.logged_in]
def get_all_sync_data(self):
"""
Create a dictionary of sessdata dicts representing all
sessions in store.
Returns:
syncdata (dict): A dict of sync data.
"""
return dict((sessid, sess.get_sync_data()) for sessid, sess in self.items())
def clean_senddata(self, session, kwargs):
"""
Clean up data for sending across the AMP wire. Also apply INLINEFUNCS.
Args:
session (Session): The relevant session instance.
kwargs (dict) Each keyword represents a
send-instruction, with the keyword itself being the name
of the instruction (like "text"). Suitable values for each
keyword are:
- arg -> [[arg], {}]
- [args] -> [[args], {}]
- {kwargs} -> [[], {kwargs}]
- [args, {kwargs}] -> [[arg], {kwargs}]
- [[args], {kwargs}] -> [[args], {kwargs}]
Returns:
kwargs (dict): A cleaned dictionary of cmdname:[[args],{kwargs}] pairs,
where the keys, args and kwargs have all been converted to
send-safe entities (strings or numbers), and inlinefuncs have been
applied.
"""
options = kwargs.pop("options", None) or {}
raw = options.get("raw", False)
strip_inlinefunc = options.get("strip_inlinefunc", False)
def _validate(data):
"Helper function to convert data to AMP-safe (picketable) values"
if isinstance(data, dict):
newdict = {}
for key, part in data.items():
newdict[key] = _validate(part)
return newdict
elif hasattr(data, "__iter__"):
return [_validate(part) for part in data]
elif isinstance(data, basestring):
# make sure strings are in a valid encoding
try:
data = data and to_str(to_unicode(data), encoding=session.protocol_flags["ENCODING"])
except LookupError:
# wrong encoding set on the session. Set it to a safe one
session.protocol_flags["ENCODING"] = "utf-8"
data = to_str(to_unicode(data), encoding=session.protocol_flags["ENCODING"])
if _INLINEFUNC_ENABLED and not raw and isinstance(self, ServerSessionHandler):
# only parse inlinefuncs on the outgoing path (sessionhandler->)
data = parse_inlinefunc(data, strip=strip_inlinefunc, session=session)
return data
elif hasattr(data, "id") and hasattr(data, "db_date_created") \
and hasattr(data, '__dbclass__'):
# convert database-object to their string representation.
return _validate(unicode(data))
else:
return data
rkwargs = {}
for key, data in kwargs.iteritems():
key = _validate(key)
if not data:
if key == "text":
# we don't allow sending text = None, this must mean
# that the text command is not to be used.
continue
rkwargs[key] = [ [], {} ]
elif isinstance(data, dict):
rkwargs[key] = [ [], _validate(data) ]
elif hasattr(data, "__iter__"):
if isinstance(data[-1], dict):
if len(data) == 2:
if hasattr(data[0], "__iter__"):
rkwargs[key] = [_validate(data[0]), _validate(data[1])]
else:
rkwargs[key] = [[_validate(data[0])], _validate(data[1])]
else:
rkwargs[key] = [ _validate(data[:-1]), _validate(data[-1]) ]
else:
rkwargs[key] = [ _validate(data), {} ]
else:
rkwargs[key] = [ [_validate(data)], {} ]
rkwargs[key][1]["options"] = options
return rkwargs
#------------------------------------------------------------
# Server-SessionHandler class
#------------------------------------------------------------
class ServerSessionHandler(SessionHandler):
"""
This object holds the stack of sessions active in the game at
any time.
A session register with the handler in two steps, first by
registering itself with the connect() method. This indicates an
non-authenticated session. Whenever the session is authenticated
the session together with the related player is sent to the login()
method.
"""
# AMP communication methods
def __init__(self, *args, **kwargs):
"""
Init the handler.
"""
super(ServerSessionHandler, self).__init__(*args, **kwargs)
self.server = None
self.server_data = {"servername": _SERVERNAME}
def portal_connect(self, portalsessiondata):
"""
Called by Portal when a new session has connected.
Creates a new, unlogged-in game session.
Args:
portalsessiondata (dict): a dictionary of all property:value
keys defining the session and which is marked to be
synced.
"""
delayed_import()
global _ServerSession, _PlayerDB, _ScriptDB
sess = _ServerSession()
sess.sessionhandler = self
sess.load_sync_data(portalsessiondata)
sess.at_sync()
# validate all scripts
_ScriptDB.objects.validate()
self[sess.sessid] = sess
if sess.logged_in and sess.uid:
# Session is already logged in. This can happen in the
# case of auto-authenticating protocols like SSH or
# webclient's session sharing
player = _PlayerDB.objects.get_player_from_uid(sess.uid)
if player:
# this will set player.is_connected too
self.login(sess, player, force=True)
return
else:
sess.logged_in = False
sess.uid = None
# show the first login command
self.data_in(sess, text=[[CMD_LOGINSTART],{}])
def portal_session_sync(self, portalsessiondata):
"""
Called by Portal when it wants to update a single session (e.g.
because of all negotiation protocols have finally replied)
Args:
portalsessiondata (dict): a dictionary of all property:value
keys defining the session and which is marked to be
synced.
"""
sessid = portalsessiondata.get("sessid")
session = self.get(sessid)
if session:
# since some of the session properties may have had
# a chance to change already before the portal gets here
# the portal doesn't send all sessiondata but only
# ones which should only be changed from portal (like
# protocol_flags etc)
session.load_sync_data(portalsessiondata)
def portal_sessions_sync(self, portalsessionsdata):
"""
Syncing all session ids of the portal with the ones of the
server. This is instantiated by the portal when reconnecting.
Args:
portalsessionsdata (dict): A dictionary
`{sessid: {property:value},...}` defining each session and
the properties in it which should be synced.
"""
delayed_import()
global _ServerSession, _PlayerDB, _ServerConfig, _ScriptDB
for sess in self.values():
# we delete the old session to make sure to catch eventual
# lingering references.
del sess
for sessid, sessdict in portalsessionsdata.items():
sess = _ServerSession()
sess.sessionhandler = self
sess.load_sync_data(sessdict)
if sess.uid:
sess.player = _PlayerDB.objects.get_player_from_uid(sess.uid)
self[sessid] = sess
sess.at_sync()
# after sync is complete we force-validate all scripts
# (this also starts them)
init_mode = _ServerConfig.objects.conf("server_restart_mode", default=None)
_ScriptDB.objects.validate(init_mode=init_mode)
_ServerConfig.objects.conf("server_restart_mode", delete=True)
# announce the reconnection
self.announce_all(_(" ... Server restarted."))
def portal_disconnect(self, session):
"""
Called from Portal when Portal session closed from the portal
side. There is no message to report in this case.
Args:
session (Session): The Session to disconnect
"""
# disconnect us without calling Portal since
# Portal already knows.
self.disconnect(session, reason="", sync_portal=False)
def portal_disconnect_all(self):
"""
Called from Portal when Portal is closing down. All
Sessions should die. The Portal should not be informed.
"""
# set a watchdog to avoid self.disconnect from deleting
# the session while we are looping over them
self._disconnect_all = True
for session in self.values:
session.disconnect()
del self._disconnect_all
# server-side access methods
def start_bot_session(self, protocol_path, configdict):
"""
This method allows the server-side to force the Portal to
create a new bot session.
Args:
protocol_path (str): The full python path to the bot's
class.
configdict (dict): This dict will be used to configure
the bot (this depends on the bot protocol).
Examples:
start_bot_session("evennia.server.portal.irc.IRCClient",
{"uid":1, "botname":"evbot", "channel":"#evennia",
"network:"irc.freenode.net", "port": 6667})
Notes:
The new session will use the supplied player-bot uid to
initiate an already logged-in connection. The Portal will
treat this as a normal connection and henceforth so will
the Server.
"""
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION, operation=SCONN,
protocol_path=protocol_path, config=configdict)
def portal_shutdown(self):
"""
Called by server when shutting down the portal.
"""
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SSHUTD)
def login(self, session, player, force=False, testmode=False):
"""
Log in the previously unloggedin session and the player we by
now should know is connected to it. After this point we assume
the session to be logged in one way or another.
Args:
session (Session): The Session to authenticate.
player (Player): The Player identified as associated with this Session.
force (bool): Login also if the session thinks it's already logged in
(this can happen for auto-authenticating protocols)
testmode (bool, optional): This is used by unittesting for
faking login without any AMP being actually active.
"""
if session.logged_in and not force:
# don't log in a session that is already logged in.
return
player.is_connected = True
# sets up and assigns all properties on the session
session.at_login(player)
# player init
player.at_init()
# Check if this is the first time the *player* logs in
if player.db.FIRST_LOGIN:
player.at_first_login()
del player.db.FIRST_LOGIN
player.at_pre_login()
if _MULTISESSION_MODE == 0:
# disconnect all previous sessions.
self.disconnect_duplicate_sessions(session)
nsess = len(self.sessions_from_player(player))
string = "Logged in: {player} {address} ({nsessions} session(s) total)"
string = string.format(player=player,address=session.address, nsessions=nsess)
session.log(string)
session.logged_in = True
# sync the portal to the session
if not testmode:
self.server.amp_protocol.send_AdminServer2Portal(session,
operation=SLOGIN,
sessiondata={"logged_in": True})
player.at_post_login(session=session)
def disconnect(self, session, reason="", sync_portal=True):
"""
Called from server side to remove session and inform portal
of this fact.
Args:
session (Session): The Session to disconnect.
reason (str, optional): A motivation for the disconnect.
sync_portal (bool, optional): Sync the disconnect to
Portal side. This should be done unless this was
called by self.portal_disconnect().
"""
session = self.get(session.sessid)
if not session:
return
if hasattr(session, "player") and session.player:
# only log accounts logging off
nsess = len(self.sessions_from_player(session.player)) - 1
string = "Logged out: {player} {address} ({nsessions} sessions(s) remaining)"
string = string.format(player=session.player, address=session.address, nsessions=nsess)
session.log(string)
session.at_disconnect()
sessid = session.sessid
if sessid in self and not hasattr(self, "_disconnect_all"):
del self[sessid]
if sync_portal:
# inform portal that session should be closed.
self.server.amp_protocol.send_AdminServer2Portal(session,
operation=SDISCONN,
reason=reason)
def all_sessions_portal_sync(self):
"""
This is called by the server when it reboots. It syncs all session data
to the portal. Returns a deferred!
"""
sessdata = self.get_all_sync_data()
return self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SSYNC,
sessiondata=sessdata)
def session_portal_sync(self, session):
"""
This is called by the server when it wants to sync a single session
with the Portal for whatever reason. Returns a deferred!
"""
sessdata = {session.sessid: session.get_sync_data()}
return self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SSYNC,
sessiondata=sessdata,
clean=False)
def disconnect_all_sessions(self, reason="You have been disconnected."):
"""
Cleanly disconnect all of the connected sessions.
Args:
reason (str, optional): The reason for the disconnection.
"""
for session in self:
del session
# tell portal to disconnect all sessions
self.server.amp_protocol.send_AdminServer2Portal(DUMMYSESSION,
operation=SDISCONNALL,
reason=reason)
def disconnect_duplicate_sessions(self, curr_session,
reason=_("Logged in from elsewhere. Disconnecting.")):
"""
Disconnects any existing sessions with the same user.
args:
curr_session (Session): Disconnect all Sessions matching this one.
reason (str, optional): A motivation for disconnecting.
"""
uid = curr_session.uid
doublet_sessions = [sess for sess in self.values()
if sess.logged_in
and sess.uid == uid
and sess != curr_session]
for session in doublet_sessions:
self.disconnect(session, reason)
def validate_sessions(self):
"""
Check all currently connected sessions (logged in and not) and
see if any are dead or idle.
"""
tcurr = time()
reason = _("Idle timeout exceeded, disconnecting.")
for session in (session for session in self.values()
if session.logged_in and _IDLE_TIMEOUT > 0
and (tcurr - session.cmd_last) > _IDLE_TIMEOUT):
self.disconnect(session, reason=reason)
def player_count(self):
"""
Get the number of connected players (not sessions since a
player may have more than one session depending on settings).
Only logged-in players are counted here.
Returns:
nplayer (int): Number of connected players
"""
return len(set(session.uid for session in self.values() if session.logged_in))
def all_connected_players(self):
"""
Get a unique list of connected and logged-in Players.
Returns:
players (list): All conected Players (which may be fewer than the
amount of Sessions due to multi-playing).
"""
return list(set(session.player for session in self.values() if session.logged_in and session.player))
def session_from_sessid(self, sessid):
"""
Get session based on sessid, or None if not found
Args:
sessid (int or list): Session id(s).
Return:
sessions (Session or list): Session(s) found. This
is a list if input was a list.
"""
if is_iter(sessid):
return [self.get(sid) for sid in sessid if sid in self]
return self.get(sessid)
def session_from_player(self, player, sessid):
"""
Given a player and a session id, return the actual session
object.
Args:
player (Player): The Player to get the Session from.
sessid (int or list): Session id(s).
Returns:
sessions (Session or list): Session(s) found.
"""
sessions = [self[sid] for sid in make_iter(sessid)
if sid in self and self[sid].logged_in and player.uid == self[sid].uid]
return sessions[0] if len(sessions) == 1 else sessions
def sessions_from_player(self, player):
"""
Given a player, return all matching sessions.
Args:
player (Player): Player to get sessions from.
Returns:
sessions (list): All Sessions associated with this player.
"""
uid = player.uid
return [session for session in self.values() if session.logged_in and session.uid == uid]
def sessions_from_puppet(self, puppet):
"""
Given a puppeted object, return all controlling sessions.
Args:
puppet (Object): Object puppeted
Returns.
sessions (Session or list): Can be more than one of Object is controlled by
more than one Session (MULTISESSION_MODE > 1).
"""
sessions = puppet.sessid.get()
return sessions[0] if len(sessions) == 1 else sessions
sessions_from_character = sessions_from_puppet
def sessions_from_csessid(self, csessid):
"""
Given a cliend identification hash (for session types that offer them) return all sessions with
a matching hash.
Args
csessid (str): The session hash
"""
return [session for session in self.values()
if session.csessid and session.csessid == csessid]
def announce_all(self, message):
"""
Send message to all connected sessions
Args:
message (str): Message to send.
"""
for session in self.values():
self.data_out(session, text=message)
def data_out(self, session, **kwargs):
"""
Sending data Server -> Portal
Args:
session (Session): Session to relay to.
text (str, optional): text data to return
Notes:
The outdata will be scrubbed for sending across
the wire here.
"""
# clean output for sending
kwargs = self.clean_senddata(session, kwargs)
# send across AMP
self.server.amp_protocol.send_MsgServer2Portal(session,
**kwargs)
def get_inputfuncs(self):
"""
Get all registered inputfuncs (access function)
Returns:
inputfuncs (dict): A dict of {key:inputfunc,...}
"""
return _INPUT_FUNCS
def data_in(self, session, **kwargs):
"""
Data Portal -> Server.
We also intercept OOB communication here.
Args:
sessions (Session): Session.
Kwargs:
kwargs (any): Other data from protocol.
"""
# distribute incoming data to the correct receiving methods.
if session:
input_debug = session.protocol_flags.get("INPUTDEBUG", False)
for cmdname, (cmdargs, cmdkwargs) in kwargs.iteritems():
cname = cmdname.strip().lower()
try:
cmdkwargs.pop("options", None)
if cname in _INPUT_FUNCS:
_INPUT_FUNCS[cname](session, *cmdargs, **cmdkwargs)
else:
_INPUT_FUNCS["default"](session, cname, *cmdargs, **cmdkwargs)
except Exception, err:
if input_debug:
session.msg(err)
log_trace()
SESSION_HANDLER = ServerSessionHandler()
SESSIONS = SESSION_HANDLER # legacy
| bsd-3-clause | -5,545,512,027,563,597,000 | 35.405817 | 109 | 0.568347 | false |
AmritaLonkar/trunk | SU2_PY/SU2/run/adaptation.py | 2 | 1063 |
import os, sys, shutil, copy
from .. import io as su2io
from .. import mesh as su2mesh
from decompose import decompose as su2decomp
def adaptation ( config , kind='' ):
# local copy
konfig = copy.deepcopy(config)
# check kind
if kind: konfig['KIND_ADAPT'] = kind
kind = konfig.get('KIND_ADAPT','NONE')
if kind == 'NONE':
return {}
# check adapted?
# decompose
su2decomp(konfig)
# get adaptation function
adapt_function = su2mesh.adapt.name_map[kind]
# setup problem
suffix = 'adapt'
meshname_orig = konfig['MESH_FILENAME']
meshname_new = su2io.add_suffix( konfig['MESH_FILENAME'], suffix )
konfig['MESH_OUT_FILENAME'] = meshname_new
# Run Adaptation
info = adapt_function(konfig)
# update super config
config['MESH_FILENAME'] = meshname_new
config['KIND_ADAPT'] = kind
# files out
files = { 'MESH' : meshname_new }
# info out
append_nestdict( info, { 'FILES' : files } )
return info
| gpl-2.0 | -1,794,196,554,823,683,300 | 20.714286 | 71 | 0.597366 | false |
jmgc/myhdl-numeric | myhdl/_extractHierarchy.py | 1 | 15467 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" myhdl _extractHierarchy module.
"""
from __future__ import absolute_import
import inspect
import re
import string
import sys
import ast
from ._errors import ExtractHierarchyError, ToVerilogError, ToVHDLError
from ._enum import EnumItemType
from ._Signal import _Signal, _isListOfSigs
from ._compat import integer_types
from ._getcellvars import _getCellVars
from ._misc import _isGenSeq
from ._resolverefs import _resolveRefs
from ._util import _flatten, _genfunc, _isTupleOfInts, _isTupleOfFloats
_profileFunc = None
class _error:
pass
_error.NoInstances = "No instances found"
_error.InconsistentHierarchy = "Inconsistent hierarchy - are all" \
" instances returned ?"
_error.InconsistentToplevel = "Inconsistent top level %s for %s - should be 1"
class _Constant(object):
def __init__(self, orig_name, value):
self.name = None
self.orig_name = orig_name
self.instance = None
self.value = value
self.used = False
class _Instance(object):
__slots__ = ['level', 'obj', 'subs', 'constdict', 'sigdict', 'memdict',
'romdict', 'name', 'func', 'frame',
]
def __init__(self, level, obj, subs, constdict, sigdict, memdict,
romdict, func, frame):
self.level = level
self.obj = obj
self.subs = subs
self.constdict = constdict
self.sigdict = sigdict
self.memdict = memdict
self.romdict = romdict
self.func = func
self.frame = frame
self.name = None
_memInfoMap = {}
class _MemInfo(object):
__slots__ = ['mem', 'name', 'elObj', 'depth', 'type', '_used', '_driven',
'_read']
def __init__(self, mem):
self.mem = mem
self.name = None
self.depth = len(mem)
self.elObj = mem[0]
self.type = None
self._used = False
self._driven = None
self._read = False
@property
def used(self):
return self._used
@used.setter
def used(self, val):
self._used = bool(val)
for s in self.mem:
s._used = bool(val)
def _clear(self):
self._driven = None
self._read = False
for el in self.mem:
el._clear()
def _getMemInfo(mem):
return _memInfoMap[id(mem)]
def _makeMemInfo(mem):
key = id(mem)
if key not in _memInfoMap:
_memInfoMap[key] = _MemInfo(mem)
return _memInfoMap[key]
def _isMem(mem):
return id(mem) in _memInfoMap
_romInfoMap = {}
class _RomInfo(object):
__slots__ = ['mem', 'orig_name', 'name', 'elObj', 'depth', 'type', '_used']
def __init__(self, orig_name, mem):
self.mem = mem
self.orig_name = orig_name
self.name = None
self.depth = len(mem)
if (self.depth > 0):
if isinstance(mem[0], integer_types):
for elObj in mem:
if elObj < 0:
break
else:
elObj = mem[0]
self.elObj = elObj
else:
self.elObj = None
self.type = None
self._used = False
@property
def used(self):
return self._used
@used.setter
def used(self, val):
self._used = bool(val)
def _getRomInfo(mem):
return _romInfoMap[id(mem)]
def _makeRomInfo(n, mem):
key = id(mem)
if key not in _romInfoMap:
_romInfoMap[key] = _RomInfo(n, mem)
return _romInfoMap[key]
def _isRom(mem):
return id(mem) in _romInfoMap
_userCodeMap = {'verilog': {},
'vhdl': {}
}
class _UserCode(object):
__slots__ = ['code', 'namespace', 'funcname', 'func', 'sourcefile',
'sourceline']
def __init__(self, code, namespace, funcname, func, sourcefile,
sourceline):
self.code = code
self.namespace = namespace
self.sourcefile = sourcefile
self.func = func
self.funcname = funcname
self.sourceline = sourceline
def __str__(self):
try:
code = self._interpolate()
except:
tipe, value, _ = sys.exc_info()
info = "in file %s, function %s starting on line %s:\n " % \
(self.sourcefile, self.funcname, self.sourceline)
msg = "%s: %s" % (tipe, value)
self.raiseError(msg, info)
code = "\n%s\n" % code
return code
def _interpolate(self):
return string.Template(self.code).substitute(self.namespace)
class _UserCodeDepr(_UserCode):
def _interpolate(self):
return self.code % self.namespace
class _UserVerilogCode(_UserCode):
def raiseError(self, msg, info):
raise ToVerilogError("Error in user defined Verilog code", msg, info)
class _UserVhdlCode(_UserCode):
def raiseError(self, msg, info):
raise ToVHDLError("Error in user defined VHDL code", msg, info)
class _UserVerilogCodeDepr(_UserVerilogCode, _UserCodeDepr):
pass
class _UserVhdlCodeDepr(_UserVhdlCode, _UserCodeDepr):
pass
class _UserVerilogInstance(_UserVerilogCode):
def __str__(self):
args = inspect.getargspec(self.func)[0]
s = "%s %s(" % (self.funcname, self.code)
sep = ''
for arg in args:
if arg in self.namespace and isinstance(self.namespace[arg],
_Signal):
signame = self.namespace[arg]._name
s += sep
sep = ','
s += "\n .%s(%s)" % (arg, signame)
s += "\n);\n\n"
return s
class _UserVhdlInstance(_UserVhdlCode):
def __str__(self):
args = inspect.getargspec(self.func)[0]
s = "%s: entity work.%s(MyHDL)\n" % (self.code, self.funcname)
s += " port map ("
sep = ''
for arg in args:
if arg in self.namespace and isinstance(self.namespace[arg],
_Signal):
signame = self.namespace[arg]._name
s += sep
sep = ','
s += "\n %s=>%s" % (arg, signame)
s += "\n );\n\n"
return s
def _addUserCode(specs, arg, funcname, func, frame):
classMap = {
'__verilog__': _UserVerilogCodeDepr,
'__vhdl__': _UserVhdlCodeDepr,
'verilog_code': _UserVerilogCode,
'vhdl_code': _UserVhdlCode,
'verilog_instance': _UserVerilogInstance,
'vhdl_instance': _UserVhdlInstance,
}
namespace = frame.f_globals.copy()
namespace.update(frame.f_locals)
sourcefile = inspect.getsourcefile(frame)
sourceline = inspect.getsourcelines(frame)[1]
for hdl in _userCodeMap:
oldspec = "__%s__" % hdl
codespec = "%s_code" % hdl
instancespec = "%s_instance" % hdl
spec = None
# XXX add warning logic
if instancespec in specs:
spec = instancespec
elif codespec in specs:
spec = codespec
elif oldspec in specs:
spec = oldspec
if spec:
assert id(arg) not in _userCodeMap[hdl]
code = specs[spec]
_userCodeMap[hdl][id(arg)] = classMap[spec](code, namespace,
funcname, func,
sourcefile, sourceline)
class _CallFuncVisitor(object):
def __init__(self):
self.linemap = {}
def visitAssign(self, node):
if isinstance(node.expr, ast.Call):
self.lineno = None
self.visit(node.expr)
self.linemap[self.lineno] = node.lineno
def visitName(self, node):
self.lineno = node.lineno
class _HierExtr(object):
def __init__(self, name, dut, *args, **kwargs):
global _profileFunc
_memInfoMap.clear()
for hdl in _userCodeMap:
_userCodeMap[hdl].clear()
self.skipNames = ('always_comb', 'instance',
'always_seq', '_always_seq_decorator',
'always', '_always_decorator',
'instances',
'processes', 'posedge', 'negedge')
self.skip = 0
self.hierarchy = hierarchy = []
self.absnames = absnames = {}
self.level = 0
_profileFunc = self.extractor
sys.setprofile(_profileFunc)
_top = dut(*args, **kwargs)
sys.setprofile(None)
if not hierarchy:
raise ExtractHierarchyError(_error.NoInstances)
self.top = _top
# streamline hierarchy
hierarchy.reverse()
# walk the hierarchy to define relative and absolute names
names = {}
top_inst = hierarchy[0]
obj, subs = top_inst.obj, top_inst.subs
names[id(obj)] = name
absnames[id(obj)] = name
if not top_inst.level == 1:
raise ExtractHierarchyError(_error.InconsistentToplevel %
(top_inst.level, name))
for inst in hierarchy:
obj, subs = inst.obj, inst.subs
if id(obj) not in names:
raise ExtractHierarchyError(_error.InconsistentHierarchy)
inst.name = names[id(obj)]
tn = absnames[id(obj)]
for sn, so in subs:
names[id(so)] = sn
absnames[id(so)] = "%s_%s" % (tn, sn)
if isinstance(so, (tuple, list)):
for i, soi in enumerate(so):
sni = "%s_%s" % (sn, i)
names[id(soi)] = sni
absnames[id(soi)] = "%s_%s_%s" % (tn, sn, i)
def extractor(self, frame, event, arg):
if event == "call":
funcname = frame.f_code.co_name
# skip certain functions
if funcname in self.skipNames:
self.skip += 1
if not self.skip:
self.level += 1
elif event == "return":
funcname = frame.f_code.co_name
func = frame.f_globals.get(funcname)
if func is None:
# Didn't find a func in the global space, try the local "self"
# argument and see if it has a method called *funcname*
obj = frame.f_locals.get('self')
if hasattr(obj, funcname):
func = getattr(obj, funcname)
if not self.skip:
isGenSeq = _isGenSeq(arg)
if isGenSeq:
specs = {}
for hdl in _userCodeMap:
spec = "__%s__" % hdl
if spec in frame.f_locals and frame.f_locals[spec]:
specs[spec] = frame.f_locals[spec]
spec = "%s_code" % hdl
if func and hasattr(func, spec) and \
getattr(func, spec):
specs[spec] = getattr(func, spec)
spec = "%s_instance" % hdl
if func and hasattr(func, spec) and \
getattr(func, spec):
specs[spec] = getattr(func, spec)
if specs:
_addUserCode(specs, arg, funcname, func, frame)
# building hierarchy only makes sense if there are generators
if isGenSeq and arg:
constdict = {}
sigdict = {}
memdict = {}
romdict = {}
symdict = frame.f_globals.copy()
symdict.update(frame.f_locals)
cellvars = []
# All nested functions will be in co_consts
if func:
local_gens = []
consts = func.__code__.co_consts
for item in _flatten(arg):
genfunc = _genfunc(item)
if genfunc.__code__ in consts:
local_gens.append(item)
if local_gens:
cellvarlist = _getCellVars(symdict, local_gens)
cellvars.extend(cellvarlist)
objlist = _resolveRefs(symdict, local_gens)
cellvars.extend(objlist)
for n, v in symdict.items():
# extract signals and memories
# also keep track of whether they are used in
# generators only include objects that are used in
# generators
if isinstance(v, _Signal):
sigdict[n] = v
if n in cellvars:
v._markUsed()
elif isinstance(v, (integer_types, float,
EnumItemType)):
constdict[n] = _Constant(n, v)
elif _isListOfSigs(v):
m = _makeMemInfo(v)
memdict[n] = m
if n in cellvars:
m._used = True
elif _isTupleOfInts(v):
m = _makeRomInfo(n, v)
romdict[n] = m
if n in cellvars:
m._used = True
elif _isTupleOfFloats(v):
m = _makeRomInfo(n, v)
romdict[n] = m
if n in cellvars:
m._used = True
subs = []
for n, sub in frame.f_locals.items():
for elt in _inferArgs(arg):
if elt is sub:
subs.append((n, sub))
inst = _Instance(self.level, arg, subs, constdict,
sigdict, memdict, romdict, func, frame)
self.hierarchy.append(inst)
self.level -= 1
if funcname in self.skipNames:
self.skip -= 1
def _inferArgs(arg):
c = [arg]
if isinstance(arg, (tuple, list)):
c += list(arg)
return c
| lgpl-2.1 | 2,900,433,588,535,602,000 | 31.022774 | 79 | 0.498222 | false |
tseaver/google-cloud-python | bigquery/samples/query_script.py | 1 | 2249 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def query_script(client):
# [START bigquery_query_script]
# TODO(developer): Import the client library.
# from google.cloud import bigquery
# TODO(developer): Construct a BigQuery client object.
# client = bigquery.Client()
# Run a SQL script.
sql_script = """
-- Declare a variable to hold names as an array.
DECLARE top_names ARRAY<STRING>;
-- Build an array of the top 100 names from the year 2017.
SET top_names = (
SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
FROM `bigquery-public-data.usa_names.usa_1910_2013`
WHERE year = 2000
);
-- Which names appear as words in Shakespeare's plays?
SELECT
name AS shakespeare_name
FROM UNNEST(top_names) AS name
WHERE name IN (
SELECT word
FROM `bigquery-public-data.samples.shakespeare`
);
"""
parent_job = client.query(sql_script)
# Wait for the whole script to finish.
rows_iterable = parent_job.result()
print("Script created {} child jobs.".format(parent_job.num_child_jobs))
# Fetch result rows for the final sub-job in the script.
rows = list(rows_iterable)
print(
"{} of the top 100 names from year 2000 also appear in Shakespeare's works.".format(
len(rows)
)
)
# Fetch jobs created by the SQL script.
child_jobs_iterable = client.list_jobs(parent_job=parent_job)
for child_job in child_jobs_iterable:
child_rows = list(child_job.result())
print(
"Child job with ID {} produced {} row(s).".format(
child_job.job_id, len(child_rows)
)
)
# [END bigquery_query_script]
| apache-2.0 | -3,223,353,715,176,114,000 | 31.594203 | 92 | 0.665185 | false |
fhinkel/TweetOptimizer | python/flask_API.py | 1 | 3540 | # -*- coding: utf-8 -*-
'''
Created on Sep 29, 2014
@author: tim
'''
from flask import Flask
from flask import request
from flask import Response
from flask.ext.cors import CORS
from relation_calculator import Relation_Calculator
import sys
import json
import re
from crossdomain import crossdomain
http_regex = re.compile(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', re.DOTALL)
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
base_path = sys.argv[0].replace('flask_API.py','')
rel = Relation_Calculator()
def replacePunctuation(text):
text = http_regex.sub('',text)
text = text.replace(',','').replace('.','').replace(';','').replace('(','').replace(')','')
text = text.replace(':','').replace('!','').replace('?','').replace('RT','')
return text
def getRelatedTerms(search_term, level):
keywords = rel.get_keywords(search_term.lower(), searchtype = level)
data = []
print keywords
for items in keywords:
dictValue = {}
dictValue['tag'] = items[0]
dictValue['ratio'] = items[1]
dictValue['confidence'] = items[2]
data.append(dictValue)
return data
@app.route('/relatedHashtags', methods=['OPTIONS', 'GET', 'POST'])
@crossdomain(origin='*')
def getRelatedHashtags():
global rel
jsondata = request.get_json(force=True)
hashtag = jsondata['term']
return Response(json.dumps(getRelatedTerms(hashtag, 0)), mimetype='application/json')
@app.route('/tweetToKeywordList', methods=['OPTIONS', 'GET', 'POST'])
@crossdomain(origin='*')
def tweetToRelatedWords():
global rel
jsondata = request.get_json(force=True)
tweet = jsondata['tweet']
tweet = replacePunctuation(tweet)
print tweet
keywordsList = []
for word in tweet.split(' '):
if any(x.isupper() for x in word):
keywordsList.append(rel.get_keywords(word.lower(), searchtype = 2)[0:10])
dictKeywords = {'keywordList' : keywordsList}
return Response(json.dumps(dictKeywords), mimetype='application/json')
@app.route('/relatedUsers', methods=['OPTIONS', 'GET', 'POST'])
@crossdomain(origin='*')
def getRelatedUser():
global rel
jsondata = request.get_json(force=True)
hashtag = jsondata['term']
return Response(json.dumps(getRelatedTerms(hashtag, 1)), mimetype='application/json')
@app.route('/relatedWords', methods=['OPTIONS', 'GET', 'POST'])
@crossdomain(origin='*')
def getRelatedWords():
global rel
jsondata = request.get_json(force=True)
hashtag = jsondata['term']
return Response(json.dumps(getRelatedTerms(hashtag, 3)), mimetype='application/json')
@app.route('/relatedAll', methods=['OPTIONS', 'GET', 'POST'])
@crossdomain(origin='*')
def getRelatedAll():
global rel
jsondata = request.get_json(force=True)
hashtag = jsondata['term']
return Response(json.dumps(getRelatedTerms(hashtag, 4)), mimetype='application/json')
@app.route('/wordCount', methods=['OPTIONS', 'GET', 'POST'])
@crossdomain(origin='*')
def getWordCount():
global rel
jsondata = request.get_json(force=True)
word = jsondata['term']
return Response(json.dumps({'count' : rel.get_word_count(replacePunctuation(word))}), mimetype='application/json')
if __name__ == "__main__":
app.debug = True
app.run(threaded=True)
| mit | 6,966,130,194,898,723,000 | 30.247788 | 224 | 0.622096 | false |
TzuChieh/Photon-v2 | BlenderAddon/PhotonBlend/bmodule/material.py | 1 | 5390 | from ..utility import settings
from . import ui
from . import common
from . import node
import sys
import bpy
class PhMaterialHeaderPanel(bpy.types.Panel):
bl_label = ""
bl_context = "material"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_options = {"HIDE_HEADER"}
COMPATIBLE_ENGINES = {settings.renderer_id_name}
@classmethod
def poll(cls, b_context):
render_settings = b_context.scene.render
return (render_settings.engine in cls.COMPATIBLE_ENGINES and
(b_context.material or b_context.object))
def draw(self, b_context):
layout = self.layout
mat = b_context.material
obj = b_context.object
mat_slot = b_context.material_slot
space = b_context.space_data
if obj:
is_sortable = len(obj.material_slots) > 1
rows = 1
if is_sortable:
rows = 4
row = layout.row()
row.template_list("MATERIAL_UL_matslots", "", obj, "material_slots", obj, "active_material_index", rows = rows)
col = row.column(align = True)
col.operator("object.material_slot_add", icon = "ZOOMIN", text = "")
col.operator("object.material_slot_remove", icon = "ZOOMOUT", text = "")
col.menu("MATERIAL_MT_specials", icon = "DOWNARROW_HLT", text = "")
if is_sortable:
col.separator()
col.operator("object.material_slot_move", icon = "TRIA_UP", text = "").direction = "UP"
col.operator("object.material_slot_move", icon = "TRIA_DOWN", text = "").direction = "DOWN"
if obj.mode == 'EDIT':
row = layout.row(align = True)
row.operator("object.material_slot_assign", text = "Assign")
row.operator("object.material_slot_select", text = "Select")
row.operator("object.material_slot_deselect", text = "Deselect")
split = layout.split(percentage = 0.65)
if obj:
split.template_ID(obj, "active_material", new = "material.new")
row = split.row()
if mat_slot:
row.prop(mat_slot, "link", text = "")
else:
row.label()
elif mat:
split.template_ID(space, "pin_id")
split.separator()
class PhAddMaterialNodesOperator(bpy.types.Operator):
"""Adds a node tree for a material."""
bl_idname = "photon.add_material_nodes"
bl_label = "Add Material Nodes"
@classmethod
def poll(cls, b_context):
b_material = getattr(b_context, "material", None)
node_tree = cls.__get_node_tree(b_material)
return b_material is not None and node_tree is None
def execute(self, b_context):
b_material = b_context.material
node_tree_name = common.mangled_node_tree_name(b_material)
node_tree = bpy.data.node_groups.new(node_tree_name, type = "PH_MATERIAL_NODE_TREE")
# Since we use node tree name to remember which node tree is used by a material,
# the node tree's use count will not be increased, resulting in data not being
# stored in .blend file sometimes. Use fake user is sort of hacked.
node_tree.use_fake_user = True
b_material.ph_node_tree_name = node_tree_name
return {"FINISHED"}
@classmethod
def __get_node_tree(cls, b_material):
if b_material is None:
return None
else:
return bpy.data.node_groups.get(b_material.ph_node_tree_name, None)
class PhMaterialPanel(bpy.types.Panel):
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
COMPATIBLE_ENGINES = {settings.renderer_id_name,
settings.cycles_id_name}
@classmethod
def poll(cls, b_context):
render_settings = b_context.scene.render
return (render_settings.engine in cls.COMPATIBLE_ENGINES and
b_context.material)
class PhMainPropertyPanel(PhMaterialPanel):
"""
Setting up primary material properties.
"""
bl_label = "PR - Material"
def draw(self, context):
layout = self.layout
layout.operator(PhAddMaterialNodesOperator.bl_idname)
node_tree = node.find_node_tree(context.material)
output_node = node.find_output_node(node_tree)
if output_node is not None:
for input_socket in output_node.inputs:
layout.template_node_view(node_tree, output_node, input_socket)
# ui.material.display_blender_props(layout, material)
class PhOptionPanel(PhMaterialPanel):
"""
Additional options for tweaking the material.
"""
bl_label = "PR - Options"
bpy.types.Material.ph_is_emissive = bpy.props.BoolProperty(
name = "Emissive",
description = "whether consider current material's emissivity or not",
default = False
)
bpy.types.Material.ph_emitted_radiance = bpy.props.FloatVectorProperty(
name = "Radiance",
description = "radiance emitted by the surface",
default = [0.0, 0.0, 0.0],
min = 0.0,
max = sys.float_info.max,
subtype = "COLOR",
size = 3
)
def draw(self, context):
material = context.material
layout = self.layout
row = layout.row()
row.prop(material, "ph_is_emissive")
row.prop(material, "ph_emitted_radiance")
MATERIAL_PANEL_TYPES = [
PhMaterialHeaderPanel,
PhMainPropertyPanel,
PhOptionPanel
]
MATERIAL_OPERATOR_TYPES = [
PhAddMaterialNodesOperator
]
def register():
ui.material.define_blender_props()
class_types = MATERIAL_PANEL_TYPES + MATERIAL_OPERATOR_TYPES
for class_type in class_types:
bpy.utils.register_class(class_type)
def unregister():
class_types = MATERIAL_PANEL_TYPES + MATERIAL_OPERATOR_TYPES
for class_type in class_types:
bpy.utils.unregister_class(class_type)
if __name__ == "__main__":
register()
| mit | 7,535,028,260,935,148,000 | 25.165049 | 114 | 0.682004 | false |
inyaka/catalog | context_processors.py | 1 | 1986 | from django.utils.text import capfirst
from django.db.models import get_models
from django.utils.safestring import mark_safe
from django.contrib.admin import ModelAdmin
# get_models returns all the models, but there are
# some which we would like to ignore
IGNORE_MODELS = (
"sites",
"sessions",
"admin",
"permission",
"contenttypes",
"thumbnail",
"products_image",
"auth_permission",
"static_precompiler",
)
def app_list(request):
'''
Get all models and add them to the context apps variable.
'''
user = request.user
app_dict = {}
admin_class = ModelAdmin
for model in get_models():
model_admin = admin_class(model, None)
app_label = model._meta.app_label
db_table= model._meta.db_table
if (app_label in IGNORE_MODELS) or (db_table in IGNORE_MODELS):
continue
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),
}
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_url': app_label + '/',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
app_list = app_dict.values()
app_list.sort(key=lambda x: x['name'])
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return {'apps': app_list} | gpl-3.0 | 3,297,654,345,102,705,000 | 34.482143 | 91 | 0.549345 | false |
comodojo/rpcserver | docs/source/conf.py | 1 | 5364 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'comodojo/rpcserver'
copyright = u'2018, Marco Giovinazzi'
author = u'Marco Giovinazzi'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'2.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'comodojorpcserverdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'comodojorpcserver.tex', u'comodojo/rpcserver documentation',
u'Marco Giovinazzi', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'comodojorpcserver', u'comodojo/rpcserver documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'comodojorpcserver', u'comodojo/rpcserver documentation',
author, 'comodojorpcserver', 'Framework-independent XML and JSON(2.0) RPC server',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
epub_tocdepth = 2
| mit | 525,443,802,569,360,100 | 28.96648 | 87 | 0.655108 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.