repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
fatiherikli/komposto.org | sketches/views.py | 1 | 5951 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import json
import markdown
import base64
from uuid import uuid4
from PIL import Image
from django.shortcuts import render, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.list import ListView
from django.http import JsonResponse
from django.views.generic.detail import DetailView
from django.views.generic import TemplateView, View
from django.views.generic import CreateView
from django.http import HttpResponse
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.core.files.base import ContentFile
from auth.mixins import LoginRequiredMixin
from sketches.models import Sketch
from sketches.forms import SketchCreationForm
from sketches.mixins import (
JSONResponseListMixin, JSONResponseDetailMixin,
PaginationMixin
)
class SketchListView(PaginationMixin, JSONResponseListMixin, ListView):
model = Sketch
def get_queryset(self):
return (
self.model.objects.all()[
self.get_offset():
self.get_limit()
]
)
class SketchDetailView(JSONResponseDetailMixin, DetailView):
model = Sketch
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(SketchDetailView, self).dispatch(*args, **kwargs)
def post(self, request, pk):
sketch = get_object_or_404(Sketch, pk=pk)
if sketch.user.pk != request.user.pk:
return HttpResponse(status=403)
payload = json.loads(request.body)
sketch.content = payload.get('content')
sketch.title = payload.get('title')
sketch.description = payload.get('description')
sketch.save();
if payload.get('snapshot'):
snapshot = payload.get('snapshot')
binary = base64.b64decode(snapshot)
content = ContentFile(
binary,
name='%s.png' % sketch.slug
)
sketch.snapshots.create(content=content)
return HttpResponse(status=202)
class SketchForkView(LoginRequiredMixin, View):
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(SketchForkView, self).dispatch(*args, **kwargs)
def post(self, request, pk):
fork_of = get_object_or_404(Sketch, pk=pk)
payload = json.loads(request.body)
sketch = Sketch.objects.create(
user=request.user,
title=payload.get('title'),
description=payload.get('description'),
content=payload.get('content'),
fork_of=fork_of
)
if payload.get('snapshot'):
snapshot = payload.get('snapshot')
binary = base64.b64decode(snapshot)
content = ContentFile(
binary,
name='%s.png' % sketch.slug
)
sketch.snapshots.create(content=content)
return JsonResponse(sketch.serialize(), status=201)
class HomeView(PaginationMixin, TemplateView):
template_name = 'sketches/index.html'
model = Sketch
def get_queryset(self):
return (
self.model.objects.filter(
is_featured=True
)[
self.get_offset():
self.get_limit()
]
)
def get_context_data(self, **kwargs):
return super(HomeView, self).get_context_data(
sketches=self.get_queryset(),
next_page_url=self.get_next_page_url(),
**kwargs
)
class HelpView(TemplateView):
def get_template_names(self):
if self.request.GET.get('only-content'):
return ['sketches/help-content.html']
return ['sketches/help.html']
def get_context_data(self, **kwargs):
path = os.path.join(os.path.dirname(__file__), '../docs/help.md')
content = markdown.markdown(open(path).read())
return super(HelpView, self).get_context_data(
content=content,
**kwargs
)
class AboutView(TemplateView):
template_name = "about.html"
def get_context_data(self, **kwargs):
path = os.path.join(os.path.dirname(__file__), '../docs/about.md')
content = markdown.markdown(open(path).read())
return super(AboutView, self).get_context_data(
content=content,
**kwargs
)
class PlayView(DetailView):
template_name = 'sketches/detail.html'
model = Sketch
def dispatch(self, *args, **kwargs):
nonce = uuid4()
self.request.nonce = nonce
response = super(PlayView, self).dispatch(*args, **kwargs)
response.set_cookie('nonce', nonce)
return response
def get_context_data(self, **kwargs):
return super(PlayView, self).get_context_data(
nonce=self.request.nonce,
**kwargs
)
class SandboxView(DetailView):
template_name = 'sketches/sandbox.html'
model = Sketch
@xframe_options_sameorigin
def dispatch(self, request, *args, **kwargs):
if request.COOKIES.get('nonce') != request.GET.get('nonce'):
return HttpResponse(status=403)
return super(SandboxView, self).dispatch(request, *args, **kwargs)
class NewSketchView(CreateView):
form_class = SketchCreationForm
template_name = "sketches/new.html"
def form_valid(self, form):
form.instance.user = self.request.user
return super(NewSketchView, self).form_valid(form)
class SnapshotView(DetailView):
model = Sketch
def render_to_response(self, context, **response_kwargs):
snapshot = self.object.snapshots.latest('id')
image = Image.new("RGBA", (360, 640))
import pdb; pdb.set_trace();
image.putdata(snapshot.content)
response = HttpResponse(content_type="image/jpg")
image.save(response, "JPEG")
return response
| mit | -3,792,996,439,892,349,400 | 28.315271 | 74 | 0.625945 | false |
dhuang/incubator-airflow | airflow/operators/jdbc_operator.py | 1 | 1847 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.hooks.jdbc_hook import JdbcHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class JdbcOperator(BaseOperator):
"""
Executes sql code in a database using jdbc driver.
Requires jaydebeapi.
:param jdbc_conn_id: reference to a predefined database
:type jdbc_conn_id: string
:param sql: the sql code to be executed
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self, sql,
jdbc_conn_id='jdbc_default', autocommit=False, parameters=None,
*args, **kwargs):
super(JdbcOperator, self).__init__(*args, **kwargs)
self.parameters = parameters
self.sql = sql
self.jdbc_conn_id = jdbc_conn_id
self.autocommit = autocommit
def execute(self, context):
self.log.info('Executing: %s', self.sql)
self.hook = JdbcHook(jdbc_conn_id=self.jdbc_conn_id)
self.hook.run(self.sql, self.autocommit, parameters=self.parameters)
| apache-2.0 | -3,436,255,019,614,622,700 | 34.519231 | 76 | 0.682729 | false |
moriyoshi/payjp-python | payjp/error.py | 1 | 1415 | # coding: utf-8
class PayjpException(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None):
super(PayjpException, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except:
http_body = ('<Could not decode body as utf-8. '
'Please report to [email protected]>')
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
class APIError(PayjpException):
pass
class APIConnectionError(PayjpException):
pass
class CardError(PayjpException):
def __init__(self, message, param, code, http_body=None,
http_status=None, json_body=None):
super(CardError, self).__init__(message,
http_body, http_status, json_body)
self.param = param
self.code = code
class InvalidRequestError(PayjpException):
pass
class AuthenticationError(PayjpException):
pass
class InvalidRequestError(PayjpException):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body)
self.param = param
| mit | -915,718,673,730,715,300 | 25.698113 | 74 | 0.587279 | false |
wfpinedar/fpm_data_cube | src/installer/dc_tasks.py | 1 | 62143 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 14:01:33 2015
@author: jdh
Tile task system for celery
"""
from datacube.api.query import SortType
from matplotlib.mlab import PCA
from datetime import datetime,timedelta
import logging
import os
from osgeo import gdal
import osr #agregado para exportar el archivo de pca (TODO: Evitarlo)
import numpy
import numpy as np
import numexpr as ne
import Image
import sklearn
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cPickle as pickl
from sklearn.preprocessing import normalize
from datacube.api.model import DatasetType, Ls57Arg25Bands, Satellite, Ls8Arg25Bands
from datacube.api.utils import NDV, empty_array, get_dataset_metadata, get_dataset_data_with_pq, raster_create,get_dataset_data, PqaMask
from datacube.api.query import list_tiles
from datacube.api.model import DatasetType
from datacube.api.model import Ls57Arg25Bands, TciBands, NdviBands, EviBands
from datacube.api.query import list_tiles
from datacube.api.utils import get_mask_pqa, get_dataset_data_masked, OutputFormat
import time
from pprint import pprint
import itertools
import random
import string
from gdalconst import *
from datacube_worker import celery, cache, database
import Image
import math
from scipy.cluster.vq import kmeans,vq
#app = Celery('tasks',backend='redis://localhost',broker='amqp://')
satellites = {'ls7':Satellite.LS7,'ls8':Satellite.LS8}
FILE_EXT = {"png":".png","GTiff":".tif","VRT":".vrt","JPEG":".jpeg"}
@celery.task()
def get_tile_info(xa,ya,start,end,satellite,datasets,months=None):
"""
Get Tile Info
"""
tiles = list_tiles(x=xa,y=ya,acq_min=start,acq_max=end,satellites = satellite,dataset_types=datasets)
data = "{\"request\":\"DONE\",\"tiles\":["
data_arr = []
for tile in tiles:
if months:
print tile.start_datetime.month
if tile.start_datetime.month in months:
data_arr.append()
else:
data_arr.append("{\"x\":"+str(tile.x)+",\"y\":"+str(tile.y)+",\"date\":\""+str(tile.start_datetime)+"\"}")
data+=','.join(data_arr)+"]}"
return data
@celery.task()
def get_tile_listing(xa,ya,start,end,satellite,datasets,months=None):
"""
List tiles. Months will only show the requested months
"""
tiles = list_tiles(x=xa,y=ya,acq_min=start,acq_max=end,satellites = satellite,dataset_types=datasets)
data = "{\"request\":\"DONE\",\"tiles\":["
data_arr = []
for tile in tiles:
if months:
print tile.start_datetime.month
if tile.start_datetime.month in months:
data_arr.append("{\"x\":"+str(tile.x)+",\"y\":"+str(tile.y)+",\"date\":\""+str(tile.start_datetime)+"\"}")
else:
data_arr.append("{\"x\":"+str(tile.x)+",\"y\":"+str(tile.y)+",\"date\":\""+str(tile.start_datetime)+"\"}")
data+=','.join(data_arr)+"]}"
return data
@celery.task()
def obtain_cloudfree_mosaic(x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None):
StartDate = start
EndDate = end
print "starting cloudfree mosaic"
best_data = {}
band_str = "+".join([band.name for band in bands])
sat_str = "+".join([sat.name for sat in satellite])
cache_id = [str(x),str(y),str(start),str(end),band_str,sat_str,str(xsize),str(ysize),file_format,str(iterations)]
f_name = "_".join(cache_id)
f_name = f_name.replace(" ","_")
c_name = f_name
cached_res = cache.get(c_name)
if cached_res:
return str(cached_res)
f_name = os.path.join("/tilestore/tile_cache",f_name)
tiles = list_tiles(x=[x], y=[y],acq_min=StartDate,acq_max=EndDate,satellites=satellite,dataset_types=[DatasetType.ARG25,DatasetType.PQ25], sort=SortType.ASC)
tile_metadata = None
tile_count = 0
tile_filled = False
stats_file = open(f_name+'.csv','w+')
for tile in tiles:
if tile_filled:
break
if months:
print tile.start_datetime.month
if not tile.start_datetime.month in months:
continue
#print "merging on tile "+str(tile.x)+", "+str(tile.y)
tile_count+=1
dataset = DatasetType.ARG25 in tile.datasets and tile.datasets[DatasetType.ARG25] or None
if dataset is None:
print "No dataset availible"
tile_count-=1
continue
tile_metadata = get_dataset_metadata(dataset)
if tile_metadata is None:
print "NO METADATA"
tile_count-=1
continue
pqa = DatasetType.PQ25 in tile.datasets and tile.datasets[DatasetType.PQ25] or None
mask = None
mask = get_mask_pqa(pqa,[PqaMask.PQ_MASK_CLEAR],mask=mask)
if tile.dataset.find('LC8') >= 0:
nbands = map(lambda x: Ls8Arg25Bands(x.value+1),bands)
else:
nbands = bands
band_data = get_dataset_data_masked(dataset, mask=mask,bands=nbands)
if tile.dataset.find('LC8') >= 0:
band_data = dict(map(lambda (k,v): (Ls57Arg25Bands(k.value-1),v), band_data.iteritems()))
swap_arr = None
best = None
for band in bands:
if not band in best_data:
#print "Adding "+band.name
#print band_data[band]
best_data[band]=band_data[band]
best = numpy.array(best_data[band])
swap_arr=numpy.in1d(best.ravel(),-999).reshape(best.shape)
else:
best = numpy.array(best_data[band])
swap_arr=numpy.in1d(best.ravel(),-999).reshape(best.shape)
b_data = numpy.array(band_data[band])
# extend array if source data is smaller than best data
while b_data.shape[1] < swap_arr.shape[1]:
col = numpy.zeros((b_data.shape[0],1))
col.fill(-999)
b_data = numpy.append(b_data,col,axis=1)
while b_data.shape[0] < swap_arr.shape[0]:
row = numpy.zeros((1,b_data.shape[1]))
row.fill(-999)
b_data = numpy.append(b_data,row,axis=0)
best[swap_arr]=b_data[swap_arr]
best_data[band]=numpy.copy(best)
del b_data
stats_file.write(str(tile.start_datetime.year)+','+str(tile.start_datetime.month)+','+str(len(best[swap_arr]))+"\n")
del swap_arr
del best
if iterations > 0:
if tile_count>iterations:
print "Exiting after "+str(iterations)+" iterations"
break
numberOfBands=len(bands)
if numberOfBands == 0:
return "None"
if bands[0] not in best_data:
print "No data was merged for "+str(x)+", "+str(y)
return "None"
print "mosaic created"
numberOfPixelsInXDirection=len(best_data[bands[0]])
print numberOfPixelsInXDirection
numberOfPixelsInYDirection=len(best_data[bands[0]][0])
print numberOfPixelsInYDirection
pixels = numberOfPixelsInXDirection
if numberOfPixelsInYDirection > numberOfPixelsInXDirection:
pixels = numberOfPixelsInYDirection
if tile_count <1:
print "No tiles found for "+str(x)+", "+str(y)
return "None"
driver = gdal.GetDriverByName(file_format)
if driver is None:
print "No driver found for "+file_format
return "None"
#print f_name+'.tif'
raster = driver.Create(f_name+'.tif', pixels, pixels, numberOfBands, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform(tile_metadata.transform)
raster.SetProjection(tile_metadata.projection)
index = 1
stats_file.close()
for band in bands:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(-999)
stack_band.WriteArray(best_data[band])
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
cache.set(c_name,f_name+".tif")
return f_name+".tif"
@celery.task()
def matrix_obtain_mosaic(x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None, normalized=False):
"""
Obtains a dict with the query results, one matrix per band
MATRIX OBTAIN MOSAIC
"""
StartDate = start
EndDate = end
print("____________________matriz_obtain_mosaic____________________")
tiles = list_tiles(x=[x], y=[y],acq_min=StartDate,acq_max=EndDate,satellites=satellite,dataset_types=[DatasetType.ARG25,DatasetType.PQ25], sort=SortType.ASC)
tile_metadata = None
tile_count = 0
tile_filled = False
total_ins = 0
all_bands={}
avgs_band={}
st_band={}
count_band={}
for tile in tiles:
if tile_filled:
break
if months:
print tile.start_datetime.month
if not tile.start_datetime.month in months:
continue
tile_count+=1
dataset = DatasetType.ARG25 in tile.datasets and tile.datasets[DatasetType.ARG25] or None
if dataset is None:
print "No dataset availible"
tile_count-=1
continue
tile_metadata = get_dataset_metadata(dataset)
if tile_metadata is None:
print "NO METADATA"
tile_count-=1
continue
pqa = DatasetType.PQ25 in tile.datasets and tile.datasets[DatasetType.PQ25] or None
mask = None
mask = get_mask_pqa(pqa,[PqaMask.PQ_MASK_CLEAR],mask=mask)
band_data = get_dataset_data_masked(dataset, mask=mask,bands=bands)
del mask
for band in band_data:
# print "Adding "+band.name
data = numpy.array(band_data[band]).astype(numpy.float32)
non_data=numpy.in1d(data.ravel(),-999).reshape(data.shape)
data[non_data]=numpy.NaN
if normalized:
m=np.nanmean(data)
st=np.nanstd(data)
if not np.isnan(m):
avgs_band[band.name]=avgs_band[band.name]+m if avgs_band.has_key(band.name) else m
st_band[band.name]=st_band[band.name]+st if st_band.has_key(band.name) else st
count_band[band.name] =(count_band[band.name]+1) if count_band.has_key(band.name) else 1
if not np.isnan(m):
# print ("Media: "+str(m)+" STD: "+str(st))
data=np.true_divide(np.subtract(data,m),st)
if not np.isnan(data).all():
if all_bands.has_key(band.name):
all_bands[band.name]=numpy.dstack((all_bands[band.name], data))
else:
all_bands[band.name]=data
if normalized:
for band in bands:
if count_band.has_key(band.name):
all_bands[band.name]=(all_bands[band.name]*(st_band[band.name]/count_band[band.name]))+(avgs_band[band.name]/count_band[band.name])
return all_bands,tile_metadata
@celery.task()
def obtain_median(validate_range,x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_Float32,months=None):
median_bands,meta=matrix_obtain_mosaic(x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=data_type,months=None,normalized=True)
print "OBTAIN MEDIAN"
print "Terminó consulta"
median_data=None
for bandCONST in bands:
#b =np.apply_along_axis(median_min,2,median_bands[band],validate_range)
band=bandCONST.name
print band
if not band in median_bands:
continue
print median_bands[band].shape
if len(median_bands[band].shape)>2:
b=np.nanmedian(median_bands[band],2)
allNan=~np.isnan(median_bands[band])
b[np.sum(allNan,2)<validate_range]=np.nan
del allNan
else:
b=median_bands[band]
if validate_range>1:
b[:]=np.nan
if median_data is None:
median_data=b
else:
median_data=np.dstack((median_data, b))
#print median_data.shape
del median_bands
return median_data,meta
@celery.task()
def obtain_median_mosaic(validate_range,x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CFloat32,months=None):
medians,meta=obtain_median(validate_range,x,y,start,end, bands, satellite,iterations,xsize,ysize,file_format,data_type,months)
if medians is None:
return "None"
pprint(medians.shape)
pprint(len(medians.shape))
nf=medians.shape[0]
nc=medians.shape[1]
if len(medians.shape)>=3:
nb=medians.shape[2]
else:
nb=1
band_str = "+".join([band.name for band in bands])
sat_str = "+".join([sat.name for sat in satellite])
cache_id = [str(x),str(y),str(start),str(end),band_str,sat_str,str(xsize),str(ysize),file_format,str(iterations)]
f_name = "_".join(cache_id)
f_name = "res_median_"+f_name.replace(" ","_")
c_name = f_name
f_name = os.path.join("/tilestore/tile_cache",f_name)
tile_metadata=meta
numberOfBands=nb
if numberOfBands == 0:
return "None"
numberOfPixelsInXDirection=nc
print numberOfPixelsInXDirection
numberOfPixelsInYDirection=nf
print numberOfPixelsInYDirection
pixels = numberOfPixelsInXDirection
if numberOfPixelsInYDirection > numberOfPixelsInXDirection:
pixels = numberOfPixelsInYDirection
driver = gdal.GetDriverByName(file_format)
if driver is None:
print "No driver found for "+file_format
return "None"
raster = driver.Create(f_name+'.tif', pixels, pixels, numberOfBands, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform(tile_metadata.transform)
raster.SetProjection(tile_metadata.projection)
index = 1
#medians[np.isnan(medians)]=-999
for band in range (0,nb):
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(-999)
if nb==1:
stack_band.WriteArray(medians)
else:
stack_band.WriteArray(medians[:,:,band])
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
cache.set(c_name,f_name+".tif")
return f_name+".tif"
def obtain_histogram_info(x,y,start,end, selectedBand, satellite):
median_bands,meta=matrix_obtain_mosaic(x,y,start,end, [selectedBand], satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None)
median_data=None
band=selectedBand.name
if not(median_bands.has_key(band)):
pprint('No data for period'+str(x)+' '+str(y)+' '+str(start)+' '+str(end))
return None,[],0,0,0
allNan=~np.isnan(median_bands[band])
tileSizeArray=allNan.shape
numberTiles=1
if len(tileSizeArray)>=3:
numberTiles=tileSizeArray[2]
if numberTiles>1:
matrixCount=np.sum(allNan,2)
else:
matrixCount=np.sum(allNan)
del allNan
histogram=np.histogram(np.ravel(matrixCount),density=False)
bincount=np.bincount(np.ravel(matrixCount))
min=np.min(matrixCount)
max=np.max(matrixCount)
return histogram,bincount,min,max,numberTiles
@celery.task()
def obtain_forest_noforest(x, y, start_date, end_date, satellite = [Satellite.LS7], months = None, min_ok = 1, vegetation_rate = 0.5, ndvi_threshold = 0.7, slice_size = 3):
period_ndvi,metadata = obtain_ndvi(x, y, start_date, end_date, satellite = satellite, months = months, min_ok = min_ok)
if period_ndvi is None:
return "None"
height = period_ndvi.shape[0]
width = period_ndvi.shape[1]
nan_mask=np.isnan(period_ndvi)
original_ndvi=period_ndvi.astype(float)
original_nvdi=np.clip(original_ndvi,-1,1)
for y1 in xrange(0, height, slice_size):
for x1 in xrange(0, width, slice_size):
x2 = x1 + slice_size
y2 = y1 + slice_size
if(x2 > width):
x2 = width
if(y2 > height):
y2 = height
submatrix = period_ndvi[y1:y2,x1:x2]
ok_pixels = np.count_nonzero(~np.isnan(submatrix))
submatrix[np.isnan(submatrix)]=-1
if ok_pixels==0:
period_ndvi[y1:y2,x1:x2] = 1
elif float(np.sum(submatrix>ndvi_threshold))/float(ok_pixels) >= vegetation_rate :
period_ndvi[y1:y2,x1:x2] = 2
else:
period_ndvi[y1:y2,x1:x2] = 1
period_ndvi[nan_mask] = np.nan
composite_all=np.dstack((period_ndvi,original_ndvi))
pprint("Max nvdi es:"+str(np.nanmax(original_ndvi)))
pprint("Min nvdi es:"+str(np.nanmin(original_ndvi)))
# Prepara el nombre base de los archivos de salida
bands = [ Ls57Arg25Bands.RED, Ls57Arg25Bands.NEAR_INFRARED ]
bands_str = '+'.join(each_band.name for each_band in bands)
satellites_str = '+'.join(each_satellite.name for each_satellite in satellite)
image_filename = ("_".join([str(x), str(y), str(start_date), str(end_date), bands_str, satellites_str])).replace(" ","_")
# generate_rgb_image(period_ndvi, period_ndvi, period_ndvi, temp_directory, output_name = "FOREST_NOFOREST_" + image_filename, width = width, height = height, scale = 0.3)
file=generate_geotiff_image(composite_all, width, height, "/tilestore/tile_cache/", metadata = metadata, output_name = "FOREST_NOFOREST_" + image_filename)
return file
def obtain_ndvi(x, y, start_date, end_date, satellite = [Satellite.LS7], months = None, min_ok = 2):
print "BEGIN NDVI PROCESS"
# Lista las bandas necesarias para operar NDVI
bands = [ Ls57Arg25Bands.RED, Ls57Arg25Bands.NEAR_INFRARED ]
# Obtiene los compuestos de medianas del periodos 1
period, metadata = obtain_median(min_ok,x, y, start_date, end_date,bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_Float32,months=None)
if period is None:
return None, metadata
mask_nan=np.any(np.isnan(period),axis=2)
# Separa los canales rojo e infrarrojo cercano
period_red = period[:,:,0]
period_red[mask_nan]=0
period_nir = period[:,:,1]
period_nir[mask_nan]=0
# Genera NDVI del periodo 1
period_ndvi = np.true_divide( np.subtract(period_nir,period_red) , np.add(period_nir,period_red) )
period_nvdi2=np.copy(period_ndvi)
np.clip(period_ndvi,0,1,out=period_nvdi2)
period_nvdi2[mask_nan]=np.nan
return period_nvdi2, metadata
def obtain_bands_dict(x, y, start, end, bands, satellite, months=None):
"""
Obtains a dict with the query results, one matrix per band
"""
tiles = list_tiles(x=[x], y=[y],acq_min=start,acq_max=end,satellites=satellite,dataset_types=[DatasetType.ARG25,DatasetType.PQ25], sort=SortType.ASC)
tile_metadata = None
tile_count = 0
tile_filled = False
total_ins = 0
all_bands={}
for tile in tiles:
if tile_filled:
break
if months:
print tile.start_datetime.month
if not tile.start_datetime.month in months:
continue
tile_count+=1
dataset = DatasetType.ARG25 in tile.datasets and tile.datasets[DatasetType.ARG25] or None
if dataset is None:
print "No dataset availible"
tile_count-=1
continue
tile_metadata = get_dataset_metadata(dataset)
if tile_metadata is None:
print "NO METADATA"
tile_count-=1
continue
pqa = DatasetType.PQ25 in tile.datasets and tile.datasets[DatasetType.PQ25] or None
mask = None
mask = get_mask_pqa(pqa,[PqaMask.PQ_MASK_CLEAR],mask=mask)
band_data = get_dataset_data_masked(dataset, mask=mask,bands=bands)
for band in band_data:
data = np.array(band_data[band]).astype(np.float32)
non_data=np.in1d(data.ravel(),-999).reshape(data.shape)
data[non_data]=np.NaN
if all_bands.has_key(band.name):
all_bands[band.name]=np.dstack((all_bands[band.name], data))
else:
all_bands[band.name]=np.array(data)
return all_bands, tile_metadata
def ravel_compounds(compounds):
flattened_compounds = None
for compound in xrange(0, compounds.shape[2]):
flattened_compound = compounds[:,:,compound].ravel()
if flattened_compounds is None:
flattened_compounds = flattened_compound
else:
flattened_compounds = np.vstack((flattened_compounds, flattened_compound))
return flattened_compounds.T
def obtain_medians_compound(x, y, start, end, bands, satellite, months = None, validate_range = 2):
median_bands, metadata = obtain_bands_dict(x, y, start, end, bands, satellite, months)
print "Terminó consulta"
if median_bands is None:
return None, metadata
median_data=None
for bandCONST in bands:
#b =np.apply_along_axis(median_min,2,median_bands[band],validate_range)
band=bandCONST.name
print band
print median_bands[band].shape
if len(median_bands[band].shape)>2:
b=np.nanmedian(median_bands[band],2)
allNan=~np.isnan(median_bands[band])
b[np.sum(allNan,2)<validate_range]=np.nan
del allNan
else:
b=median_bands[band]
if validate_range>1:
b[:]=np.nan
if median_data is None:
median_data=b
else:
median_data=np.dstack((median_data, b))
#print median_data.shape
del median_bands
return median_data,metadata
@celery.task()
def obtain_convolution_nvdi(prueba,NDVI_result_final,percetage_ndvi=0.3,threshold_ndvi=0.7):
print ("_______________obtain_convolution_nvdiL____________")
[height,weight]=NDVI_result_final.shape
#print ("Alto",height)
#print ("Ancho",weight)
test=(prueba+"entro convolucion")
nueva_matriz=None
for x1 in xrange(0,height,3):
for y1 in xrange(0,weight,3):
auxX=x1+3
auxY=y1+3
if(auxX>=height):
auxX=height-1
if(auxY>=weight):
auxY=weight-1
auxMatriz=NDVI_result_final[xrange(x1,auxX),:] [:,xrange(y1,auxY)]
#print auxMatriz.shape
count_pixel=auxMatriz.shape[0]*auxMatriz.shape[1]
pixel_nan=np.count_nonzero(np.isnan(auxMatriz))
pixel_forest=np.sum(np.where(auxMatriz>threshold_ndvi,1,0))
if(x1==0 and y1==0):
print("AUX_X______",auxX)
print("AUX_Y_______",auxY)
print("AUX_AUXM______",auxMatriz)
print("AUX_COUPIX______",count_pixel)
print("AUX_COU_NAN______",pixel_nan)
print("AUX_PIX_FOR______",pixel_forest)
if(count_pixel-pixel_nan>0):
auxResult=(pixel_forest)/(count_pixel-pixel_nan)
if(auxResult>percetage_ndvi):
#print ("ENTRO ERROR")
NDVI_result_final[x1:auxX, y1:auxY]=1
else:
NDVI_result_final[x1:auxX, y1:auxY]=0
else:
NDVI_result_final[x1:auxX, y1:auxY]=np.nan
if(x1==0 and y1==0):
print ("FINAL TEST",NDVI_result_final[xrange(x1,auxX),:] [:,xrange(y1,auxY)])
print NDVI_result_final
return test
def generate_geotiff_image(input_array, width, height, output_path, metadata, output_name = "oimage4", data_type = gdal.GDT_Float32 ):
n_bands=1
if len(input_array.shape)>=3:
n_bands = input_array.shape[2]
gtiff_driver = gdal.GetDriverByName('GTiff')
f_name=output_path + output_name
raster = gtiff_driver.Create( f_name+ '.tif', width, height, n_bands, eType = data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
if metadata:
raster.SetGeoTransform(metadata.transform)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
raster.SetProjection(srs.ExportToWkt())
for band in xrange(0,n_bands):
raster_band = raster.GetRasterBand(band+1)
raster_band.SetNoDataValue(-999)
if n_bands==1:
raster_band.WriteArray(input_array)
else:
raster_band.WriteArray(input_array[:,:,band])
raster_band.ComputeStatistics(True)
raster_band.FlushCache()
raster.FlushCache()
return f_name+ '.tif'
def generate_rgb_image(r_array, g_array, b_array, output_path, output_name = "oimage", width = None, height = None, scale = 1, format = "jpg"):
input_array = np.zeros(((width*height),3))
input_array[:,0] = r_array
input_array[:,1] = g_array
input_array[:,2] = b_array
if len(input_array.shape) == 2:
input_array = input_array.reshape((height, width, 3))
max_value = np.nanmax(input_array)
input_array = (input_array/max_value)*255
output_img = Image.fromarray(np.uint8(input_array), 'RGB')
width = int(np.ceil(output_img.size[0]*scale))
height = int(np.ceil(output_img.size[1]*scale))
output_img = output_img.resize((width, height))
output_img.save(output_path + output_name + "." + format)
@celery.task()
def obtain_pca_png(validate_range,x,y,start1,end1,start2,end2, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None):
median_p1=obtain_median(validate_range,x,y,start1,end1, bands, satellite,iterations,xsize,ysize,file_format,data_type,months)
median_p2=obtain_median(validate_range,x,y,start2,end2, bands, satellite,iterations,xsize,ysize,file_format,data_type,months)
pickl.dump( median_p1, open( "median_p_1.p", "wb" ) )
pickl.dump( median_p2, open( "median_p_2.p", "wb" ) )
##GUARDANDO DATOS MEDIANA
component_p1=pre_process_ravel(median_p1)
component_p2=pre_process_ravel(median_p2)
#________________ciclo Normalizacion______________________________
for x in xrange(0,component_p1.shape[1]):
component_p2[:,x]=normalize(component_p1[:,x],component_p2[:,x])
#________________ciclo mascara______________________________
mask_comp = None
for x in xrange(0,component_p1.shape[1]):
if(mask_comp is None) :
mask_comp = combine_masks(np.zeros(len(component_p1[:,x])),component_p1[:,x])
mask_comp = combine_masks(mask_comp,component_p2[:,x])
else:
mask_comp = combine_masks(mask_comp,(combine_masks(component_p1[:,x],component_p2[:,x])))
#________________ciclo change NAN______________________________
pre_pca_bands=numpy.concatenate((component_p1,component_p2),1)
a= pre_pca_bands.flatten()
median_array_pre_pca=np.nanmedian(a)
print("MEDIANA PREPCA",median_array_pre_pca)
for x in xrange(0,pre_pca_bands.shape[1]):
pre_pca_bands[:,x]=convert_nan_to_median(pre_pca_bands[:,x],median_array_pre_pca)
print ("RESULTADO FINAL",pre_pca_bands.shape)
print("COMPUESTO SIN NAN",pre_pca_bands)
print ("RESULTADO MASCARA PARA COMPARAR DATOS ",mask_comp)
##GUARDANDO DATOS TEST
print ("GUARDE LOS DATOS")
f_pca=PCA(pre_pca_bands)
size_ma=f_pca.Y.T.shape
pickl.dump( f_pca, open( "f_pca2.p", "wb" ) )
pickl.dump( mask_comp, open( "mask_comp2.p", "wb" ) )
presult=f_pca.Y[:,0].reshape(3705,3705)
presult2=f_pca.Y[:,2].reshape(3705,3705)
#normalizacion
presult *= (255.0/presult.max())
im = Image.fromarray(np.uint8(cm.gist_earth(presult)*255))
im2 = Image.fromarray(np.uint8(cm.gist_earth(presult2)*255))
print ("MATRIX ok2",im)
im.save('test__TEST2.jpeg')
im2.save('test72.png')
return 0
@celery.task()
def obtain_median_png(validate_range,x,y,start1,end1,start2,end2, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None):
mediana= pickl.load( open( "median_p1.p", "rb" ) )
print("PRUEBA",prueba)
print("PRUEBA2",prueba.shape)
print mediana
print mediana.shape
#rgbArray = np.zeros((512,512,3), 'uint8')
r=mediana[..., 0]
g=mediana[..., 1]
b=mediana[..., 1]
print("PRUEBA",mediana)
print("R",r)
print("G",g)
print("B",b)
return 0
def obtain_pca_all(validate_range,x,y,start1,end1,start2,end2, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None):
print("OBTAIN PCA_ALL")
raw_b1,meta=obtain_median(validate_range,x,y,start1,end1, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None)
median_p1=raw_b1
nf=raw_b1.shape[0]
nc=raw_b1.shape[1]
nb=raw_b1.shape[2]*2
median_p2,meta2=obtain_median(validate_range,x,y,start2,end2, bands, satellite,iterations,xsize,ysize,file_format,data_type,months)
pickl.dump( median_p1, open( "26_median_p_1_all_f.p", "wb" ) )
pickl.dump( median_p2, open( "26_median_p_2_all_f.p", "wb" ) )
##GUARDANDO DATOS MEDIANA
component_p1=pre_process_ravel(raw_b1)
component_p2=pre_process_ravel(median_p2)
#________________ciclo Normalizacion______________________________
for x in xrange(0,component_p1.shape[1]):
component_p2[:,x]=normalize(component_p1[:,x],component_p2[:,x])
#________________ciclo mascara______________________________
mask_comp = None
for x in xrange(0,component_p1.shape[1]):
if(mask_comp is None) :
mask_comp = component_p1[:,x]
mask_comp = combine_masks(mask_comp,component_p2[:,x])
else:
mask_comp = combine_masks(mask_comp,(combine_masks(component_p1[:,x],component_p2[:,x])))
#________________ciclo change NAN______________________________
pre_pca_bands=numpy.concatenate((component_p1,component_p2),1)
a= pre_pca_bands.flatten()
median_array_pre_pca=np.nanmedian(a)
print("MEDIANA PREPCA",median_array_pre_pca)
for x in xrange(0,pre_pca_bands.shape[1]):
pre_pca_bands[:,x]=convert_nan_to_median(pre_pca_bands[:,x],median_array_pre_pca)
print ("RESULTADO FINAL",pre_pca_bands.shape)
print("COMPUESTO SIN NAN",pre_pca_bands)
print ("RESULTADO MASCARA PARA COMPARAR DATOS ",mask_comp)
##GUARDANDO DATOS TEST
print ("GUARDE LOS DATOS")
f_pca=PCA(pre_pca_bands)
size_ma=f_pca.Y.T.shape
presult=f_pca.Y.T
pickl.dump( f_pca, open( "26_pca_final_25.p", "wb" ) )
pickl.dump( presult, open( "26_pca_final_trasn.p", "wb" ) )
presult1=f_pca.Y[:,0].reshape(3705,3705)
presult2=f_pca.Y[:,2].reshape(3705,3705)
#normalizacion
presult1 *= (255.0/presult1.max())
im = Image.fromarray(np.uint8(cm.gist_earth(presult1)*255))
im2 = Image.fromarray(np.uint8(cm.gist_earth(presult2)*255))
print ("MATRIX ok2",im)
im.save('26_presentacion.jpeg')
im2.save('26_presentacion_norma.jpeg')
#-_-------------------_-----------------------
km_centroids,_=kmeans(f_pca.Y, 2) #Generar los centroides
print km_centroids
"""
Guardar el archivo:
"""
band_str = "+".join([band.name for band in bands])
sat_str = "+".join([sat.name for sat in satellite])
cache_id = [str(x),str(y),str(start1),str(end1),str(start2),str(end2),band_str,sat_str,str(xsize),str(ysize),file_format,str(iterations)]
f_name = "_".join(cache_id)
f_name = "26_celery"+f_name.replace(" ","_")
c_name = f_name
driver = gdal.GetDriverByName(file_format)
if driver is None:
print "No driver found for "+file_format
return "None"
c_file=os.path.join("/tilestore/tile_cache","centroids_"+f_name+".csv")
print c_file
numpy.savetxt(c_file,km_centroids)
f_name = os.path.join("/tilestore/tile_cache",f_name)
raster = driver.Create(f_name+'.tif', nf, nc, nb, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform((x-0.00025, 0.00027, 0.0, y+1.0002400000000002, 0.0, -0.00027)) #Debemos obtenerlo del original, o calcularlo bien
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
raster.SetProjection(srs.ExportToWkt())
index = 1
for bn in presult:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(-999)
bn[numpy.isnan(bn)]=-999
stack_band.WriteArray(bn.reshape(nf,nc))
stack_band.ComputeStatistics(False)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
cache.set(c_name,f_name+".tif")
return f_name+".tif"
#Funcion que aplica los elementos de mask2 en array1
@celery.task()
def apply_nan(array1,mask2):
if (len(array1)==len(mask2)):
i = 0
while i < len(array1):
if(np.isnan(mask2[i])):
array1[i] = np.nan
i+=1
return array1
else:
print("ERROR DE TAMANOS DE MASCARA DIFERENTES DIFERENTES")
def generate_component_kmean(km_centroids,pca_final_with_nan):
indices = [numpy.where(km_centroids<=x)[0][0] for x in pca_final_with_nan]
print indices
return 99
@celery.task()
def convert_nan_to_median(array1,median_array_pre_pca):
f_result=[]
i=0
media=median_array_pre_pca
#print ("media ",media)
while i<len(array1) :
if(np.isnan(array1[i])):
f_result.append(media)
else:
f_result.append(array1[i])
i+=1
return f_result
@celery.task()
def combine_masks(mask1, mask2):
if (len(mask1)==len(mask2)):
i = 0
while i < len(mask1):
if(np.isnan(mask2[i])):
mask1[i] = np.nan
i+=1
return mask1
else:
print("ERROR DE TAMANOS DE MASCARA DIFERENTES DIFERENTES")
@celery.task()
def normalize(final_composite1,final_composite2):
desv_final_mask2=np.nanstd(final_composite2)
mean_final_1=np.nanmean(final_composite1)
mean_final_2=np.nanmean(final_composite2)
temp_mask2=((final_composite2-mean_final_2)/desv_final_mask2)+mean_final_1
return temp_mask2
@celery.task()
def pre_process_ravel(pre_pca):
new_pca_input=None
for d in xrange(0,pre_pca.shape[2]):
b=pre_pca[:,:,d].ravel()
if new_pca_input is None:
new_pca_input=b
else:
new_pca_input=numpy.vstack((new_pca_input,b))
#print ("ENVIO_VSTACK",new_pca_input.T.shape)
return new_pca_input.T
@celery.task()
def median_min(array_bands,validate_range):
count_no_nan=np.count_nonzero(np.isnan(array_bands))
len_data=len(array_bands)
if((len_data - count_no_nan)<=validate_range):
return np.nanmedian(array_bands)
else:
return np.nan
@celery.task()
def mask_range(array_bands,validate_range):
count_nan=np.count_nonzero(np.isnan(array_bands))
len_data=len(array_bands)
if((len_data - count_nan)>validate_range):
return True
else:
return False
@celery.task()
def validate_mask(array_bands):
count_nan=np.count_nonzero(np.isnan(array_bands))
len_data=len(array_bands)
if count_nan!=len_data :
return False
else:
return True
@celery.task()
def obtain_mask(validate_range,x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None):
mosaic_bands,meta=matrix_obtain_mosaic(x,y,start,end, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CInt16,months=None)
mask_data=None
for band in mosaic_bands:
b =np.apply_along_axis(mask_range,2,mosaic_bands[band],validate_range)
if mask_data is None:
mask_data=b
else:
mask_data=np.dstack((mask_data, b))
print mask_data.shape
return mask_data
@celery.task()
def assemble_mosaic(file_list):
print "Assembling mosaic"
print file_list
fl = None
try:
if type(file_list) is list:
fl = [f for f in file_list if f!="None"]
else:
fl = [file_list]
except:
fl = [file_list]
if len(fl) <1:
return "None"
c_name = hashlib.sha512("_".join(fl)).hexdigest()[0:32]
cmd = "gdalbuildvrt -hidenodata /tilestore/tile_cache/"+c_name+".vrt "+" ".join(fl)
print cmd
os.system(cmd)
if not os.path.exists("/tilestore/tile_cache/"+c_name+".vrt"):
return "None"
res = "/tilestore/tile_cache/"+c_name+".vrt"
ret_prod = []
ret_prod.append(res)
for fi in fl:
ret_prod.append(fi)
return ret_prod
@celery.task()
def get_bounds(input_file):
in_file = None
print input_file
if isinstance(input_file,(str)):
if input_file == "None":
return "None"
else:
in_file = input_file
else:
in_file = input_file[0]
ds = gdal.Open(in_file)
cols = ds.RasterXSize
rows = ds.RasterYSize
gt = ds.GetGeoTransform()
bb1 = originx = gt[0]
bb4 = originy = gt[3]
pixelWidth = gt[1]
pixelHeight = gt[5]
width = cols*pixelWidth
height = rows*pixelHeight
bb3 = originx+width
bb2 = originy+height
del ds
return str(bb2)+","+str(bb1)+","+str(bb4)+","+str(bb3)
@celery.task()
def translate_files(file_list,file_format,output_scale,output_size,output_datatype,output_bands,additional_arguments=None):
print file_list
fl = None
try:
if type(file_list) is list:
fl = [f for f in file_list if f!="None"]
else:
fl = [file_list]
except:
fl = [file_list]
addy = ""
b_arg= ""
if output_bands is not None:
b_arg = " ".join(["-b "+str(b) for b in output_bands])
res = []
if additional_arguments:
addy = " "+" ".join(additional_arguments)
for f in fl:
print "Translating "+f
ds = gdal.Open(f)
rc = ds.RasterCount
if output_bands is not None:
if rc < len(output_bands):
print "Less bands than requested!"
b_arg = "-b 1"
del ds
out_scale = ""
out_dt = ""
out_size = ""
b_l_arg = ""
if output_scale is not None and b_arg != "-b 1":
out_scale = " -scale "+output_scale
if output_datatype is not None:
out_dt = " -ot "+output_datatype
if output_size is not None:
out_size = " -outsize "+output_size
if output_bands is not None and b_arg != "-b 1":
b_l_arg = " "+b_arg
b_tmp = ""
if output_bands is not None:
b_tmp = "_".join([str(b) for b in output_bands])
c_arr = [f,str(file_format),str(output_scale),str(output_size),str(output_datatype),b_tmp,addy]
c_name = "_".join(c_arr)
c_name = hashlib.sha512(c_name).hexdigest()[0:32]
tar_img = os.path.join("/tilestore/tile_cache/",c_name+FILE_EXT[file_format])
tar_img_marked = os.path.join("/tilestore/tile_cache/",c_name+"_marked"+FILE_EXT[file_format])
cmd = "gdal_translate -of "+file_format+out_dt+out_scale+out_size+b_l_arg+addy+" "+f+" "+tar_img
print cmd
os.system(cmd)
if os.path.exists(tar_img):
if file_format == "png" or file_format == "PNG":
cmd = "convert -transparent \"#000000\" "+tar_img+" "+tar_img
os.system(cmd);
cmd = "convert "+tar_img+" -background red -alpha remove "+tar_img_marked
os.system(cmd)
res.append(tar_img)
res.append(tar_img_marked)
return res
@celery.task()
def apply_color_table_to_files(file_list,output_band,color_table):
print file_list
fl = None
try:
if type(file_list) is list:
fl = [f for f in file_list if f!="None"]
else:
fl = [file_list]
except:
fl = [file_list]
res = []
for f in fl:
print "Coloring "+f
c_arr = [f,str(output_band),color_table]
c_name = "_".join(c_arr)
c_name = hashlib.sha512(c_name).hexdigest()[0:32]
tar_img = os.path.join("/tilestore/tile_cache/",c_name+".tif")
tmp_img = os.path.join("/tilestore/tile_cache/",c_name)
cmd = "gdal_translate "+f+" "+tmp_img+"_"+str(output_band)+".tif"+" -b "+str(output_band)
os.system(cmd)
print "Applying color table"
cmd = "gdaldem color-relief -of GTiff "+tmp_img+"_"+str(output_band)+".tif"+" "+color_table+" "+tar_img
print cmd
os.system(cmd)
if os.path.exists(tar_img):
#cmd = "convert -transparent \"#000000\" "+tar_img+" "+tar_img
#os.system(cmd);
res.append(tar_img)
return res
@celery.task()
def preview_cloudfree_mosaic(x,y,start,end, bands, satellite,iterations=0,xsize=2000,ysize=2000,file_format="GTiff",data_type=gdal.GDT_CInt16):
def resize_array(arr,size):
r = numpy.array(arr).astype(numpy.int16)
i = Image.fromarray(r)
i2 = i.resize(size,Image.NEAREST)
r2 = numpy.array(i2)
del i2
del i
del r
return r2
StartDate = start
EndDate = end
best_data = {}
band_str = "+".join([band.name for band in bands])
sat_str = "+".join([sat.name for sat in satellite])
cache_id = ["preview",str(x),str(y),str(start),str(end),band_str,sat_str,str(xsize),str(ysize),file_format,str(iterations)]
f_name = "_".join(cache_id)
f_name = f_name.replace(" ","_")
c_name = f_name
cached_res = cache.get(c_name)
if cached_res:
return str(cached_res)
f_name = os.path.join("/tilestore/tile_cache",f_name)
tiles = list_tiles(x=[x], y=[y],acq_min=StartDate,acq_max=EndDate,satellites=satellite,dataset_types=[DatasetType.ARG25,DatasetType.PQ25], sort=SortType.ASC)
tile_metadata = None
tile_count = 0
tile_filled = False
for tile in tiles:
if tile_filled:
break
print "merging on tile "+str(tile.x)+", "+str(tile.y)
tile_count+=1
dataset = DatasetType.ARG25 in tile.datasets and tile.datasets[DatasetType.ARG25] or None
if dataset is None:
print "No dataset availible"
tile_count-=1
continue
tile_metadata = get_dataset_metadata(dataset)
if tile_metadata is None:
print "NO METADATA"
tile_count-=1
continue
pqa = DatasetType.PQ25 in tile.datasets and tile.datasets[DatasetType.PQ25] or None
mask = None
mask = get_mask_pqa(pqa,[PqaMask.PQ_MASK_CLEAR],mask=mask)
band_data = get_dataset_data_masked(dataset, mask=mask,bands=bands)
swap_arr = None
for band in band_data:
if not band in best_data:
print "Adding "+band.name
bd = resize_array(band_data[band],(2000,2000))
print bd
best_data[band]=bd
del bd
else:
best = resize_array(best_data[band],(2000,2000))
swap_arr=numpy.in1d(best.ravel(),-999).reshape(best.shape)
b_data = numpy.array(band_data[band])
best[swap_arr]=b_data[swap_arr]
best_data[band]=numpy.copy(best)
del b_data
del best
del swap_arr
if iterations > 0:
if tile_count>iterations:
print "Exiting after "+str(iterations)+" iterations"
break
numberOfBands=len(bands)
if numberOfBands == 0:
return "None"
if bands[0] not in best_data:
print "No data was merged for "+str(x)+", "+str(y)
return "None"
numberOfPixelsInXDirection=len(best_data[bands[0]])
numberOfPixelsInYDirection=len(best_data[bands[0]][0])
if tile_count <1:
print "No tiles found for "+str(x)+", "+str(y)
return "None"
driver = gdal.GetDriverByName(file_format)
if driver is None:
print "No driver found for "+file_format
return "None"
print f_name+'.tif'
raster = driver.Create(f_name+'.tif', numberOfPixelsInXDirection, numberOfPixelsInYDirection, numberOfBands, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
gt = tile_metadata.transform
gt2 = (gt[0],gt[1]*2.0,gt[2],gt[3],gt[4],gt[5]*2.0)
tile_metadata.transform = gt2
raster.SetGeoTransform(tile_metadata.transform)
print tile_metadata.transform
raster.SetProjection(tile_metadata.projection)
index = 1
for band in bands:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(-999)
stack_band.WriteArray(best_data[band])
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
cache.set(c_name,f_name+".tif")
return f_name+".tif"
import hashlib
#TODO: Implement proper masking support
@celery.task()
def obtain_file_from_math(input_file,expressions_list,file_format="GTiff",data_type=gdal.GDT_CFloat32,input_ndv=-999,output_ndv=-999):
"""
ex. band4,band3, (band4-band3)/(band4+band3) AKA NDVI
"""
"""
Read in file
"""
if input_file == "None":
return "None"
driver = gdal.GetDriverByName(file_format)
ds = gdal.Open(input_file,0)
if ds is None:
return "None"
arrays = []
band_count = ds.RasterCount
xsize = ds.RasterXSize
ysize = ds.RasterYSize
gt = ds.GetGeoTransform()
proj = ds.GetProjection()
exp_str = "_".join(expressions_list)
cache_id = [os.path.splitext(os.path.basename(input_file))[0],exp_str,str(xsize),str(ysize),file_format]
f_name = "_".join(cache_id)
f_name = hashlib.sha512(f_name).hexdigest()[0:32]
c_name = f_name
cached_res = cache.get(c_name)
if cached_res:
return cached_res
f_name = os.path.join("/tilestore/tile_cache",f_name)
for i in range(band_count):
RB = ds.GetRasterBand(i+1)
arrays.append(RB.ReadAsArray(0,0,xsize,ysize).astype(numpy.float32))
del RB
var_identifier = "A"+''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
#test if we've used this id in this scope
var_test = var_identifier+"_band1"
while var_test in globals():
var_identifier = "A"+''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
var_test = var_identifier+"_band1"
for band_num in range(len(arrays)):
globals()[var_identifier+'_band'+str(band_num+1)]=arrays[band_num]
results = []
expressions = [expression.replace("band",var_identifier+"_band") for expression in expressions_list]
for expression in expressions:
results.append(ne.evaluate(expression))
raster = driver.Create(f_name+'.tif', xsize, ysize, len(expressions_list), data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform(gt)
raster.SetProjection(proj)
index = 1
for band in results:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(output_ndv)
stack_band.WriteArray(band)
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
del ds
del results
cache.set(c_name,f_name+".tif")
return f_name+".tif"
@celery.task()
def shrink_raster_file(input_file,size=(2000,2000)):
if len(size)!=2:
return "None"
if input_file=="None":
return "None"
file_name = os.path.splitext(os.path.basename(input_file))[0]
if size[0] ==0 or size[1]==0:
return "None"
gdal.AllRegister()
c_arr = [file_name,str(size)]
c_name = "_".join(c_arr)
c_name = c_name.replace(" ","_")
c_name = c_name.replace(",","")
c_name = c_name.replace("(","")
c_name = c_name.replace(")","")
f_name = c_name+".tif"
f_name = os.path.join("/tilestore/tile_cache",f_name)
ds = gdal.Open(input_file,0)
band_count = ds.RasterCount
if band_count == 0:
return "None"
xsize = ds.RasterXSize
ysize = ds.RasterYSize
gt = ds.GetGeoTransform()
proj = ds.GetProjection()
ndv = ds.GetRasterBand(1).GetNoDataValue()
dt = ds.GetRasterBand(1).DataType
bands = []
for i in range(band_count):
RB = ds.GetRasterBand(i+1)
r = numpy.array(RB.ReadAsArray(0,0,xsize,ysize)).astype(numpy.float32)
print r
i = Image.fromarray(r)
i2 = i.resize(size,Image.NEAREST)
bands.append(numpy.array(i2))
del i2
del i
del r
driver = gdal.GetDriverByName("GTiff")
raster = driver.Create(f_name, size[0], size[1], band_count, dt, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform(gt)
raster.SetProjection(proj)
index = 1
for band in bands:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(ndv)
stack_band.WriteArray(band)
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
return f_name
@celery.task()
def merge_files_on_value(input_files_list,merge_value=-999, input_ndv=-999,output_ndv=-999):
input_files = input_files_list
input_files = [fl for fl in input_files if fl != "None"]
if len(input_files)<2:
if len(input_files)==1:
return input_files[0]
else:
return "None"
file_name_list = [os.path.splitext(os.path.basename(in_file))[0] for in_file in input_files]
file_names_str = "_".join(file_name_list)
c_name_arr = [file_names_str,str(merge_value),str(input_ndv),str(output_ndv)]
c_name= "_".join(c_name_arr)
f_name = c_name+".tif"
f_name = os.path.join("/tilestore/tile_cache",f_name)
gdal.AllRegister()
arrays = []
ds = None
ndv_array = None
swap_array = None
xsize = 0
ysize = 0
gt = None
proj = None
band_count = 0
ds = gdal.Open(file_path,0)
gt = ds.GetGeoTransform()
proj = ds.GetProjection()
band_count = ds.RasterCount
xsize = ds.RasterXSize
ysize = ds.RasterYSize
"""
Load the first file
"""
for i in range(band_count):
RB = ds.GetRasterBand(i+1)
arrays.append(RB.ReadAsArray(0,0,xsize,ysize))
del RB
ds = None
for file_path in input_files[1:]:
ds = gdal.Open(file_path,0)
if ds.RasterCount == band_count:
for i in range(band_count):
RB = ds.GetRasterBand(i+1)
RA = RB.ReadAsArray(0,0,xsize,ysize)
ndv_array = numpy.in1d(arrays[0].ravel(),ndv).reshape(arrays[0].shape)
swap_array = numpy.in1d(arrays[0].ravel(),merge_value).reshape(arrays[0].shape)
arrays[i][swap_array]=RA[swap_array]
arrays[i][ndv_array]=output_ndv
del RB
del RA
ndv_array = None
swap_array = None
ds = None
"""
Write the merged file
"""
raster = driver.Create(f_name+'.tif', xsize, ysize, band_count, gdal.GDT_CFloat32, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform(gt)
raster.SetProjection(proj)
index = 1
for band in arrays:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(output_ndv)
stack_band.WriteArray(band)
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
return f_name
@celery.task()
def merge_2files_on_value(input_file1, input_file2, merge_value=-999, input_ndv=-999,output_ndv=-999):
driver = gdal.GetDriverByName(file_format)
ds1 = gdal.Open(input_file1,0)
if ds1 is None:
return "None"
ds2 = gdal.Open(input_file2,0)
if ds2 is None:
return "None"
arrays1 = []
arrays2 = []
band_count = ds1.RasterCount
xsize = ds1.RasterXSize
ysize = ds1.RasterYSize
gt = ds1.GetGeoTransform()
proj = ds1.GetProjection()
for i in range(band_count):
RB = ds1.GetRasterBand(i+1)
arrays1.append(RB.ReadAsArray(0,0,xsize,ysize))
del RB
for i in range(band_count):
RB = ds2.GetRasterBand(i+1)
arrays2.append(RB.ReadAsArray(0,0,xsize,ysize))
del RB
for i in arrays1:
ndv_array = numpy.in1d(arrays1[0].ravel(),ndv).reshape(arrays1[0].shape)
swap_array = numpy.in1d(arrays1[0].ravel(),merge_value).reshape(arrays1[0].shape)
arrays1[i][swap_array]=arrays2[i][swap_array]
arrays1[i][ndv_array]=output_ndv
del ndv_array
del swap_array
del arrays2
cache_id = [os.path.splitext(os.path.basename(input_file1))[0],os.path.splitext(os.path.basename(input_file2))[0],str(merge_value),str(input_ndv),str(output_ndv)]
f_name = "_".join(cache_id)
f_name = hashlib.sha512(f_name).hexdigest()[0:32]
f_name = os.path.join("/tilestore/tile_cache",f_name)
raster = driver.Create(f_name+'.tif', xsize, ysize, band_count, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
raster.SetGeoTransform(gt)
raster.SetProjection(proj)
index = 1
for band in arrays1:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(output_ndv)
stack_band.WriteArray(band)
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del raster
del ds1
del ds2
return f_name+".tif"
@celery.task()
def obtain_pca_test(validate_range,x,y,start1,end1,start2,end2, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CFloat32,months=None):
print("OBTAIN PCA_ALL")
medians,meta=obtain_median(validate_range,x,y,start1,end1, bands, satellite,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CFloat32,months=None)
median_p2,meta2=obtain_median(validate_range,x,y,start2,end2, bands, satellite,iterations,xsize,ysize,file_format,data_type,months)
if medians is None or median_p2 is None:
return "None"
nf=medians.shape[0]
nc=medians.shape[1]
nb=medians.shape[2]*2
mask_nan=np.any(np.isnan(np.concatenate((medians, median_p2),axis=2)),axis=2)
##GUARDANDO DATOS MEDIANA_APLANANDO
component_p1=pre_process_ravel(medians)
component_p2=pre_process_ravel(median_p2)
#________________ciclo Normalizacion______________________________
for xat in xrange(0,component_p1.shape[1]):
component_p2[:,xat]=normalize(component_p1[:,xat],component_p2[:,xat])
pre_pca_bands=numpy.concatenate((component_p1,component_p2),1)
for xat in xrange(0,pre_pca_bands.shape[1]):
a=pre_pca_bands[:,xat]
a[np.isnan(a)]=np.nanmedian(a)
pre_pca_bands[:,xat]=a
f_pca=PCA(pre_pca_bands)
del medians
del median_p2
presult=f_pca.Y.T
#-_-------------------_-----------------------
"""
Guardar el archivo:
"""
band_str = "+".join([band.name for band in bands])
sat_str = "+".join([sat.name for sat in satellite])
cache_id = [str(x),str(y),str(start1),str(end1),str(start2),str(end2),band_str,sat_str,str(xsize),str(ysize),file_format,str(iterations)]
f_name = "_".join(cache_id)
f_name = "pca_"+f_name.replace(" ","_")
c_name = f_name
driver = gdal.GetDriverByName(file_format)
if driver is None:
print "No driver found for "+file_format
return "None"
f_name = os.path.join("/tilestore/tile_cache/",f_name)
t=max(nf,nc)
raster = driver.Create(f_name+'.tif', t, t, nb, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
#raster.SetGeoTransform((x-0.00025, 0.00027, 0.0, y+1.0002400000000002, 0.0, -0.00027)) #Debemos obtenerlo del original, o calcularlo bien
srs = osr.SpatialReference()
raster.SetGeoTransform(meta.transform)
#raster.SetProjection(tile_metadata.projection)
srs.SetWellKnownGeogCS("WGS84")
raster.SetProjection(srs.ExportToWkt())
index = 1
for bn in presult:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(-999)
bn=bn.reshape(nf,nc)
bn[mask_nan]=np.nan
stack_band.WriteArray(bn)
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del presult
del f_pca
cache.set(c_name,f_name+".tif")
return f_name+".tif"
@celery.task()
def obtain_pca_2002_2014L8(x,y):
validate_range=1
st = datetime.strptime('2002-01-01','%Y-%m-%d')
en = datetime.strptime('2002-12-31','%Y-%m-%d')
st2 = datetime.strptime('2014-01-01','%Y-%m-%d')
en2 = datetime.strptime('2014-12-31','%Y-%m-%d')
file_format="GTiff"
data_type=gdal.GDT_CFloat32
iterations=0
bands1=[ Ls57Arg25Bands.RED, Ls57Arg25Bands.NEAR_INFRARED, Ls57Arg25Bands.SHORT_WAVE_INFRARED_1,Ls57Arg25Bands.SHORT_WAVE_INFRARED_2]
satellite1=[Satellite.LS7]
medians,meta=obtain_median(validate_range,x,y,st,en, bands1, satellite1,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CFloat32,months=None)
print "consulta 1"
nf=medians.shape[0]
nc=medians.shape[1]
nb=medians.shape[2]*2
bands2=[Ls8Arg25Bands.RED, Ls8Arg25Bands.NEAR_INFRARED, Ls8Arg25Bands.SHORT_WAVE_INFRARED_1, Ls8Arg25Bands.SHORT_WAVE_INFRARED_2]
satellite2=[Satellite.LS8]
median_p2,meta2=obtain_median(validate_range,x,y,st2,en2, bands2, satellite2,iterations=0,xsize=4000,ysize=4000,file_format="GTiff",data_type=gdal.GDT_CFloat32,months=None)
print "consulta 2"
mask_nan=np.any(np.isnan(np.concatenate((medians, median_p2),axis=2)),axis=2)
##GUARDANDO DATOS MEDIANA_APLANANDO
component_p1=pre_process_ravel(medians)
component_p2=pre_process_ravel(median_p2)
#________________ciclo Normalizacion______________________________
for xat in xrange(0,component_p1.shape[1]):
component_p2[:,xat]=normalize(component_p1[:,xat],component_p2[:,xat])
pre_pca_bands=numpy.concatenate((component_p1,component_p2),1)
for xat in xrange(0,pre_pca_bands.shape[1]):
a=pre_pca_bands[:,xat]
a[np.isnan(a)]=np.nanmedian(a)
pre_pca_bands[:,xat]=a
f_pca=PCA(pre_pca_bands)
del medians
del median_p2
presult=f_pca.Y.T
#-_-------------------_-----------------------
"""
Guardar el archivo:
"""
band_str = "+".join([band.name for band in bands1])
sat_str = "+".join([sat.name for sat in satellite1])
cache_id = [str(x),str(y),str(st),str(en),str(st2),str(en2),band_str,sat_str,file_format,str(iterations)]
f_name = "_".join(cache_id)
f_name = "pca_"+f_name.replace(" ","_")
c_name = f_name
driver = gdal.GetDriverByName(file_format)
if driver is None:
print "No driver found for "+file_format
return "None"
f_name = os.path.join("/tilestore/tile_cache/",f_name)
t=max(nf,nc)
raster = driver.Create(f_name+'.tif', t, t, nb, data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
srs = osr.SpatialReference()
raster.SetGeoTransform(meta.transform)
#raster.SetProjection(tile_metadata.projection)
srs.SetWellKnownGeogCS("WGS84")
raster.SetProjection(srs.ExportToWkt())
index = 1
for bn in presult:
stack_band = raster.GetRasterBand(index)
stack_band.SetNoDataValue(-999)
bn=bn.reshape(nf,nc)
bn[mask_nan]=np.nan
stack_band.WriteArray(bn)
stack_band.ComputeStatistics(True)
index+=1
stack_band.FlushCache()
del stack_band
raster.FlushCache()
del presult
del f_pca
cache.set(c_name,f_name+".tif")
return f_name+".tif" | agpl-3.0 | 1,160,426,004,678,091,800 | 35.57681 | 182 | 0.593422 | false |
vaginessa/inception | inception/argparsers/makers/submakers/submaker_property.py | 1 | 1852 | from .submaker import Submaker
import collections
import os
class PropertySubmaker(Submaker):
def make(self, workDir):
props = self.getValue(".", {})
if "__make__" in props:
del props["__make__"]
if "__depend__" in props:
del props["__depend__"]
propsFlat = self.flatten(props)
outDir = os.path.join("data", "property")
localOutDir = os.path.join(workDir, outDir)
if len(propsFlat):
os.makedirs(localOutDir)
self.setValue("update.files.add.data/property", {
"destination": "/data/property",
"uid": "0",
"gid": "0",
"mode": "0600",
"mode_dirs": "0700"
})
for fname, val in propsFlat.items():
if not val:
continue
if fname.endswith("__val__"):
fname = fname.replace(".__val__", "")
fname = "persist.%s" % fname
with open(os.path.join(localOutDir, fname), "w") as propFile:
propFile.write(val)
#escapedFname = fname.replace(".", "\.")
#self.setConfigValue("update.files.add.data/property/%s" % escapedFname, self._getPropFileData(fname))
def _getPropFileData(self, fname):
return {
"destination": "/data/property/%s" % fname,
"uid": "0",
"gid": "0",
"mode": "0600"
}
def flatten(self, d, parent_key='', sep='.'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self.flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
| gpl-3.0 | -1,622,258,097,428,729,000 | 30.931034 | 114 | 0.49568 | false |
vmware/pyvmomi-community-samples | samples/sessions_list.py | 1 | 2110 | #!/usr/bin/env python
# VMware vSphere Python SDK
# Copyright (c) 2008-2021 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tools import cli, service_instance
# Demonstrates some simple working with sessions actions. By common sense
# you should expect that the session is None when you've logged out and
# you will lose the ability to see any session ID. It would be a massive
# security hole to allow people to see these when they were not authenticated
# since the session ID is all you need to spoof another user's login.
# Example output:
# > logged in to vcsa
# > current pyVmomi session id: 523ea3ee-865b-fc7e-3486-bd380c3ab4a2
# > Listing all sessions I can see:
# > session 5205c9e7-8f79-6597-f1d9-e06583cb5089
# > session 523ea3ee-865b-fc7e-3486-bd380c3ab4a2
# > session 52500401-b1e7-bb05-c6b1-05d903d32dcb
# > session 5284cc12-f15c-363a-4455-ae8dbeb8bc3b
# > logout
# > current pyVmomi session: None
parser = cli.Parser()
args = parser.get_args()
si = service_instance.connect(args)
print("logged in to %s" % args.host)
session_id = si.content.sessionManager.currentSession.key
print("current pyVmomi session id: %s" % session_id)
print("Listing all sessions I can see:")
for session in si.content.sessionManager.sessionList:
print(
"session key={0.key}, "
"username={0.userName}, "
"ip={0.ipAddress}".format(session)
)
print("logout")
si.content.sessionManager.Logout()
# The current session will be None after logout
session = si.content.sessionManager.currentSession
print("current pyVmomi session: %s" % session)
| apache-2.0 | 6,791,903,590,379,356,000 | 36.017544 | 77 | 0.746919 | false |
tallypokemap/PokeAlarm | PokeAlarm/Discord/DiscordAlarm.py | 1 | 8686 | # Standard Library Imports
import logging
import requests
# 3rd Party Imports
# Local Imports
from ..Alarm import Alarm
from ..Utils import parse_boolean, get_static_map_url, reject_leftover_parameters, require_and_remove_key
log = logging.getLogger('Discord')
try_sending = Alarm.try_sending
replace = Alarm.replace
##################################################### ATTENTION! #####################################################
# You DO NOT NEED to edit this file to customize messages for services! Please see the Wiki on the correct way to
# customize services In fact, doing so will likely NOT work correctly with many features included in PokeAlarm.
# PLEASE ONLY EDIT IF YOU KNOW WHAT YOU ARE DOING!
##################################################### ATTENTION! #####################################################
class DiscordAlarm(Alarm):
_defaults = {
'pokemon': {
'username': "<pkmn>",
'content':"",
'icon_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/<pkmn_id>.png",
'avatar_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/<pkmn_id>.png",
'title': "A wild <pkmn> has appeared!",
'url': "<gmaps>",
'body': "Available until <24h_time> (<time_left>)."
},
'pokestop': {
'username': "Pokestop",
'content': "",
'icon_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/pokestop.png",
'avatar_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/pokestop.png",
'title': "Someone has placed a lure on a Pokestop!",
'url': "<gmaps>",
'body': "Lure will expire at <24h_time> (<time_left>)."
},
'gym': {
'username': "<new_team> Gym Alerts",
'content': "",
'icon_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/gym_<new_team_id>.png",
'avatar_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/gym_leader_<new_team_id>.png",
'title': "A Team <old_team> gym has fallen!",
'url': "<gmaps>",
'body': "It is now controlled by <new_team>."
},
'egg': {
'username': "Egg",
'content': "",
'icon_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/egg_<raid_level>.png",
'avatar_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/egg_<raid_level>.png",
'title': "Raid is incoming!",
'url': "<gmaps>",
'body': "A level <raid_level> raid will hatch <begin_24h_time> (<begin_time_left>)."
},
'raid': {
'username': "Raid",
'content': "",
'icon_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/<pkmn_id>.png",
'avatar_url': "https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/egg_<raid_level>.png",
'title': "Level <raid_level> Raid is available against <pkmn>!",
'url': "<gmaps>",
'body': "The raid is available until <24h_time> (<time_left>)."
}
}
# Gather settings and create alarm
def __init__(self, settings, max_attempts, static_map_key):
# Required Parameters
self.__webhook_url = require_and_remove_key('webhook_url', settings, "'Discord' type alarms.")
self.__max_attempts = max_attempts
# Optional Alarm Parameters
self.__startup_message = parse_boolean(settings.pop('startup_message', "True"))
self.__disable_embed = parse_boolean(settings.pop('disable_embed', "False"))
self.__avatar_url = settings.pop('avatar_url', "")
self.__map = settings.pop('map', {}) # default for the rest of the alerts
self.__static_map_key = static_map_key
# Set Alert Parameters
self.__pokemon = self.create_alert_settings(settings.pop('pokemon', {}), self._defaults['pokemon'])
self.__pokestop = self.create_alert_settings(settings.pop('pokestop', {}), self._defaults['pokestop'])
self.__gym = self.create_alert_settings(settings.pop('gym', {}), self._defaults['gym'])
self.__egg = self.create_alert_settings(settings.pop('egg', {}), self._defaults['egg'])
self.__raid = self.create_alert_settings(settings.pop('raid', {}), self._defaults['raid'])
# Warn user about leftover parameters
reject_leftover_parameters(settings, "'Alarm level in Discord alarm.")
log.info("Discord Alarm has been created!")
# (Re)connect with Discord
def connect(self):
pass
# Send a message letting the channel know that this alarm has started
def startup_message(self):
if self.__startup_message:
args = {
'url': self.__webhook_url,
'payload': {
'username': 'PokeAlarm',
'content': 'PokeAlarm activated!'
}
}
try_sending(log, self.connect, "Discord", self.send_webhook, args, self.__max_attempts)
log.info("Startup message sent!")
# Set the appropriate settings for each alert
def create_alert_settings(self, settings, default):
alert = {
'webhook_url': settings.pop('webhook_url', self.__webhook_url),
'username': settings.pop('username', default['username']),
'avatar_url': settings.pop('avatar_url', default['avatar_url']),
'disable_embed': parse_boolean(settings.pop('disable_embed', self.__disable_embed)),
'content': settings.pop('content', default['content']),
'icon_url': settings.pop('icon_url', default['icon_url']),
'title': settings.pop('title', default['title']),
'url': settings.pop('url', default['url']),
'body': settings.pop('body', default['body']),
'map': get_static_map_url(settings.pop('map', self.__map), self.__static_map_key)
}
reject_leftover_parameters(settings, "'Alert level in Discord alarm.")
return alert
# Send Alert to Discord
def send_alert(self, alert, info):
log.debug("Attempting to send notification to Discord.")
payload = {
'username': replace(alert['username'], info)[:32], # Username must be 32 characters or less
'content': replace(alert['content'], info),
'avatar_url': replace(alert['avatar_url'], info),
}
if alert['disable_embed'] is False:
payload['embeds'] = [{
'title': replace(alert['title'], info),
'url': replace(alert['url'], info),
'description': replace(alert['body'], info),
'thumbnail': {'url': replace(alert['icon_url'], info)}
}]
if alert['map'] is not None:
payload['embeds'][0]['image'] = {'url': replace(alert['map'], {'lat': info['lat'], 'lng': info['lng']})}
args = {
'url': alert['webhook_url'],
'payload': payload
}
try_sending(log, self.connect, "Discord", self.send_webhook, args, self.__max_attempts)
# Trigger an alert based on Pokemon info
def pokemon_alert(self, pokemon_info):
log.debug("Pokemon notification triggered.")
self.send_alert(self.__pokemon, pokemon_info)
# Trigger an alert based on Pokestop info
def pokestop_alert(self, pokestop_info):
log.debug("Pokestop notification triggered.")
self.send_alert(self.__pokestop, pokestop_info)
# Trigger an alert based on Pokestop info
def gym_alert(self, gym_info):
log.debug("Gym notification triggered.")
self.send_alert(self.__gym, gym_info)
# Trigger an alert when a raid egg has spawned (UPCOMING raid event)
def raid_egg_alert(self, raid_info):
self.send_alert(self.__egg, raid_info)
def raid_alert(self, raid_info):
self.send_alert(self.__raid, raid_info)
# Send a payload to the webhook url
def send_webhook(self, url, payload):
log.debug(payload)
resp = requests.post(url, json=payload, timeout=5)
if resp.ok is True:
log.debug("Notification successful (returned {})".format(resp.status_code))
else:
log.debug("Discord response was {}".format(resp.content))
raise requests.exceptions.RequestException(
"Response received {}, webhook not accepted.".format(resp.status_code))
| agpl-3.0 | -7,167,528,100,492,597,000 | 46.206522 | 124 | 0.575869 | false |
pfsmorigo/minecraft | overviewer_config.py | 1 | 12108 | #!/bin/python
world_name = 'SmoWorld'
worlds[world_name] = '/home/pfsmorigo/.minecraft/saves/'+world_name
outputdir = '/mnt/disk/beirut/minecraft/overviewer/'+world_name
rendermode = "smooth_lighting"
MC_IDS = {
0: "air",
1: "stone",
2: "grass",
3: "dirt",
4: "cobblestone",
5: "planks",
6: "sapling",
7: "bedrock",
8: "flowing water",
9: "water",
10: "flowing lava",
11: "lava",
12: "sand",
13: "gravel",
14: "gold ore",
15: "iron ore",
16: "coal ore",
17: "log",
18: "leaves",
19: "sponge",
20: "glass",
21: "lapis ore",
22: "lapis block",
23: "dispenser",
24: "sandstone",
25: "noteblock",
26: "bed",
27: "golden rail",
28: "detector rail",
29: "sticky piston",
30: "web",
31: "tallgrass",
32: "deadbush",
33: "piston",
34: "piston head",
35: "wool",
36: "piston extension",
37: "yellow flower",
38: "red flower",
39: "brown mushroom",
40: "red mushroom",
41: "gold block",
42: "iron block",
43: "double stone slab",
44: "stone slab",
45: "brick block",
46: "tnt",
47: "bookshelf",
48: "mossy cobblestone",
49: "obsidian",
50: "torch",
51: "fire",
52: "mob spawner",
53: "oak stairs",
54: "chest",
55: "redstone wire",
56: "diamond ore",
57: "diamond block",
58: "crafting table",
59: "wheat",
60: "farmland",
61: "furnace",
62: "lit furnace",
63: "standing sign",
64: "wooden door",
65: "ladder",
66: "rail",
67: "stone stairs",
68: "wall sign",
69: "lever",
70: "stone pressure plate",
71: "iron door",
72: "wooden pressure plate",
73: "redstone ore",
74: "lit redstone ore",
75: "unlit redstone torch",
76: "redstone torch",
77: "stone button",
78: "snow layer",
79: "ice",
80: "snow",
81: "cactus",
82: "clay",
83: "reeds",
84: "jukebox",
85: "fence",
86: "pumpkin",
87: "netherrack",
88: "soul sand",
89: "glowstone",
90: "portal",
91: "lit pumpkin",
92: "cake",
93: "unpowered repeater",
94: "powered repeater",
95: "stained glass",
96: "trapdoor",
97: "monster egg",
98: "stonebrick",
99: "brown mushroom block",
100: "red mushroom block",
101: "iron bars",
102: "glass pane",
103: "melon block",
104: "pumpkin stem",
105: "melon stem",
106: "vine",
107: "fence gate",
108: "brick stairs",
109: "stone brick stairs",
110: "mycelium",
111: "waterlily",
112: "nether brick",
113: "nether brick fence",
114: "nether brick stairs",
115: "nether wart",
116: "enchanting table",
117: "brewing stand",
118: "cauldron",
119: "end portal",
120: "end portal frame",
121: "end stone",
122: "dragon egg",
123: "redstone lamp",
124: "lit redstone lamp",
125: "double wooden slab",
126: "wooden slab",
127: "cocoa",
128: "sandstone stairs",
129: "emerald ore",
130: "ender chest",
131: "tripwire hook",
132: "tripwire",
133: "emerald block",
134: "spruce stairs",
135: "birch stairs",
136: "jungle stairs",
137: "command block",
138: "beacon",
139: "cobblestone wall",
140: "flower pot",
141: "carrots",
142: "potatoes",
143: "wooden button",
144: "skull",
145: "anvil",
146: "trapped chest",
147: "light weighted pressure plate",
148: "heavy weighted pressure plate",
149: "unpowered comparator",
150: "powered comparator",
151: "daylight detector",
152: "redstone block",
153: "quartz ore",
154: "hopper",
155: "quartz block",
156: "quartz stairs",
157: "activator rail",
158: "dropper",
159: "stained hardened clay",
160: "stained glass pane",
161: "leaves2",
162: "log2",
163: "acacia stairs",
164: "dark oak stairs",
165: "slime",
166: "barrier",
167: "iron trapdoor",
168: "prismarine",
169: "sea lantern",
170: "hay block",
171: "carpet",
172: "hardened clay",
173: "coal block",
174: "packed ice",
175: "double plant",
176: "standing banner",
177: "wall banner",
178: "daylight detector inverted",
179: "red sandstone",
180: "red sandstone stairs",
181: "double stone slab2",
182: "stone slab2",
183: "spruce fence gate",
184: "birch fence gate",
185: "jungle fence gate",
186: "dark oak fence gate",
187: "acacia fence gate",
188: "spruce fence",
189: "birch fence",
190: "jungle fence",
191: "dark oak fence",
192: "acacia fence",
193: "spruce door",
194: "birch door",
195: "jungle door",
196: "acacia door",
197: "dark oak door",
# ...and items.
256: "iron shovel",
257: "iron pickaxe",
258: "iron axe",
259: "flint and steel",
260: "apple",
261: "bow",
262: "arrow",
263: "coal",
264: "diamond",
265: "iron ingot",
266: "gold ingot",
267: "iron sword",
268: "wooden sword",
269: "wooden shovel",
270: "wooden pickaxe",
271: "wooden axe",
272: "stone sword",
273: "stone shovel",
274: "stone pickaxe",
275: "stone axe",
276: "diamond sword",
277: "diamond shovel",
278: "diamond pickaxe",
279: "diamond axe",
280: "stick",
281: "bowl",
282: "mushroom stew",
283: "golden sword",
284: "golden shovel",
285: "golden pickaxe",
286: "golden axe",
287: "string",
288: "feather",
289: "gunpowder",
290: "wooden hoe",
291: "stone hoe",
292: "iron hoe",
293: "diamond hoe",
294: "golden hoe",
295: "wheat seeds",
296: "wheat",
297: "bread",
298: "leather helmet",
299: "leather chestplate",
300: "leather leggings",
301: "leather boots",
302: "chainmail helmet",
303: "chainmail chestplate",
304: "chainmail leggings",
305: "chainmail boots",
306: "iron helmet",
307: "iron chestplate",
308: "iron leggings",
309: "iron boots",
310: "diamond helmet",
311: "diamond chestplate",
312: "diamond leggings",
313: "diamond boots",
314: "golden helmet",
315: "golden chestplate",
316: "golden leggings",
317: "golden boots",
318: "flint",
319: "porkchop",
320: "cooked porkchop",
321: "painting",
322: "golden apple",
323: "sign",
324: "wooden door",
325: "bucket",
326: "water bucket",
327: "lava bucket",
328: "minecart",
329: "saddle",
330: "iron door",
331: "redstone",
332: "snowball",
333: "boat",
334: "leather",
335: "milk bucket",
336: "brick",
337: "clay ball",
338: "reeds",
339: "paper",
340: "book",
341: "slime ball",
342: "chest minecart",
343: "furnace minecart",
344: "egg",
345: "compass",
346: "fishing rod",
347: "clock",
348: "glowstone dust",
349: "fish",
350: "cooked fish",
351: "dye",
352: "bone",
353: "sugar",
354: "cake",
355: "bed",
356: "repeater",
357: "cookie",
358: "filled map",
359: "shears",
360: "melon",
361: "pumpkin seeds",
362: "melon seeds",
363: "beef",
364: "cooked beef",
365: "chicken",
366: "cooked chicken",
367: "rotten flesh",
368: "ender pearl",
369: "blaze rod",
370: "ghast tear",
371: "gold nugget",
372: "nether wart",
373: "potion",
374: "glass bottle",
375: "spider eye",
376: "fermented spider eye",
377: "blaze powder",
378: "magma cream",
379: "brewing stand",
380: "cauldron",
381: "ender eye",
382: "speckled melon",
383: "spawn egg",
384: "experience bottle",
385: "fire charge",
386: "writable book",
387: "written book",
388: "emerald",
389: "item frame",
390: "flower pot",
391: "carrot",
392: "potato",
393: "baked potato",
394: "poisonous potato",
395: "map",
396: "golden carrot",
397: "skull",
398: "carrot on a stick",
399: "nether star",
400: "pumpkin pie",
401: "fireworks",
402: "firework charge",
403: "enchanted book",
404: "comparator",
405: "netherbrick",
406: "quartz",
407: "tnt minecart",
408: "hopper minecart",
409: "prismarine shard",
410: "prismarine crystals",
411: "rabbit",
412: "cooked rabbit",
413: "rabbit stew",
414: "rabbit foot",
415: "rabbit hide",
416: "armor stand",
417: "iron horse armor",
418: "golden horse armor",
419: "diamond horse armor",
420: "lead",
421: "name tag",
422: "command block minecart",
423: "mutton",
424: "cooked mutton",
425: "banner",
427: "spruce door",
428: "birch door",
429: "jungle door",
430: "acacia door",
431: "dark oak door",
2256: "record 13",
2257: "record cat",
2258: "record blocks",
2259: "record chirp",
2260: "record far",
2261: "record mall",
2262: "record mellohi",
2263: "record stal",
2264: "record strad",
2265: "record ward",
2266: "record 11",
2267: "record wait"
}
def signFilter(poi):
if poi['id'] == 'Sign':
return "\n".join([poi['Text1'], poi['Text2'], poi['Text3'], poi['Text4']])
def chestFilter(poi):
global MC_IDS
if poi['id'] == 'Chest':
items = ''
for item in poi['Items']:
item_name = MC_IDS[item['id']]
items += '\n%s (%d)' % (item_name, item['Count'])
if 'diamond' in item_name:
print 'FOUND: %s at %d, %d, %d' % (item_name, poi['x'], poi['y'], poi['z'])
return ('Chest', 'Chest with %d items (%d, %d, %d):%s' % (len(poi['Items']), poi['x'], poi['y'], poi['z'], items))
def playerFilter(poi):
if poi['id'] == 'Player':
poi['icon'] = "http://overviewer.org/avatar/%s" % poi['EntityId']
return "Last known location for %s" % poi['EntityId']
def townFilter(poi):
if poi['id'] == 'Town':
try:
return (poi['name'], poi['description'])
except KeyError:
return poi['name'] + '\n'
def get_crop(radius = 0):
center_x = -16
center_z = -76
castle_radius = 30
min_x = center_x-castle_radius-radius
min_z = center_z-castle_radius-radius
max_x = center_x+castle_radius+radius+1
max_z = center_z+castle_radius+radius+1
return (min_x, min_z, max_x, max_z)
filters = [dict(name = "Signs", filterFunction = signFilter),
dict(name = "Chests", filterFunction = chestFilter, checked = True),
dict(name = "Players", filterFunction = playerFilter)]
renders['overworld_daytime'] = {
'world': world_name,
'title': 'Castle',
'dimension': 'overworld',
'northdirection': 'upper-left',
'crop': get_crop(100),
'markers': filters
}
renders['overworld_daytime'] = {
'world': world_name,
'title': 'Day',
'dimension': 'overworld',
'northdirection': 'upper-left',
'crop': get_crop(100),
'markers': filters
}
renders['overworld_night'] = {
'world': world_name,
'title': 'Night',
'rendermode': 'smooth_night',
'dimension': 'overworld',
'northdirection': 'upper-left',
'crop': get_crop(100),
'markers': filters
}
for i in range(0, 15):
renders['overworld_level_'+str(i)] = {
'world': world_name,
'title': 'Level '+str(i),
'rendermode': [Base(), EdgeLines(), SmoothLighting(), Depth(max = (i*8)+3)],
'dimension': 'overworld',
'northdirection': 'upper-left',
'crop': get_crop(),
'markers': filters
}
renders['nether'] = {
'world': world_name,
'title': 'Nether',
'rendermode': 'nether',
'dimension': 'nether',
'northdirection': 'upper-left',
'crop': get_crop(),
'markers': filters
}
| gpl-2.0 | -4,839,259,828,219,592,000 | 23.811475 | 122 | 0.537248 | false |
richbrowne/f5-openstack-agent | f5_openstack_agent/lbaasv2/drivers/bigip/vlan_binding.py | 1 | 6094 | # coding=utf-8
# Copyright 2014-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class VLANBindingBase(object):
"""Base Class for device interface to port binding """
def __init__(self, conf, driver):
self.conf = conf
self.driver = driver
self.interface_binding_mappings = {}
self.__initialized__bigip_ports = False
LOG.debug('reading static device interface port bindings')
if self.conf.interface_port_static_mappings:
LOG.debug('bindings: %s '
% self.conf.interface_port_static_mappings)
interface_binding_static_mappings = \
json.loads(self.conf.interface_port_static_mappings)
if isinstance(interface_binding_static_mappings, dict):
for device in interface_binding_static_mappings:
if isinstance(device, dict):
self.interface_binding_mappings[device] = \
interface_binding_static_mappings[device]
else:
LOG.debug('interface_port_static_mappings not configured')
def register_bigip_interfaces(self):
# Delayed binding BIG-IP ports will be called
# after BIG-IP endpoints are registered.
if not self.__initialized__bigip_ports:
for bigip in self.driver.get_all_bigips():
LOG.debug('Request Port information for MACs: %s'
% bigip.device_interfaces)
if self.driver.plugin_rpc:
ports = self.driver.plugin_rpc.get_ports_for_mac_addresses(
mac_addresses=bigip.mac_addresses)
LOG.debug('Neutron returned Port Info: %s' % ports)
for port in ports:
for interface in bigip.device_interfaces:
if not interface == 'mgmt':
if bigip.device_interfaces[interface] == \
port['mac_address']:
mapping = {interface: port['id']}
self.interface_binding_mappings[
bigip.device_name] = mapping
LOG.debug('adding mapping information device'
'%s interface %s to port: %s'
% (bigip.device_name,
interface,
port['id']))
self.__initialized__bigip_ports = True
LOG.debug('interface bindings after initialization are: %s'
% self.interface_binding_mappings)
for bigip in self.driver.get_all_bigips():
if bigip.device_name not in self.interface_binding_mappings:
example = {bigip.device_name: {}}
for interface in bigip.device_interfaces:
example[bigip.device_name][interface] = \
"port_id_for_%s" % interface
json_example = json.loads(example)
LOG.warning(
'The device %s at %s does not have interface bindings'
% (bigip.device_name, bigip.hostname),
' even though VLAN binding has been requested',
)
LOG.warning(
'An example static mapping would be: %s' % json_example
)
def allow_vlan(self, device_name=None, interface=None, vlanid=0):
raise NotImplementedError(
"An VLAN binding class must implement allow_vlan"
)
def prune_vlan(self, device_name=None, interface=None, vlanid=0):
raise NotImplementedError(
"An VLAN binding class must implement prune_vlan"
)
class NullBinding(VLANBindingBase):
# Class for configuring VLAN lists on ports.
def __init__(self, conf, driver):
super(NullBinding, self).__init__(conf, driver)
def allow_vlan(self, device_name=None, interface=None, vlanid=0):
if not device_name:
return
if not interface:
return
if vlanid == 0:
return
LOG.debug('checking for port bindings '
'device_name: %s interface %s'
% (device_name, interface))
if device_name in self.interface_binding_mappings:
if interface in self.interface_binding_mappings[device_name]:
LOG.debug(
'allowing VLAN %s on port %s'
% (vlanid,
self.interface_binding_mappings[device_name][interface])
)
def prune_vlan(self, device_name=None, interface=None, vlanid=None):
if not device_name:
return
if not interface:
return
if vlanid == 0:
return
LOG.debug('checking for port bindings '
'device_name: %s interface %s'
% (device_name, interface))
if device_name in self.interface_binding_mappings:
if interface in self.interface_binding_mappings[device_name]:
LOG.debug(
'pruning VLAN %s from port %s'
% (vlanid,
self.interface_binding_mappings[device_name][interface])
)
| apache-2.0 | -6,098,282,063,651,757,000 | 42.528571 | 79 | 0.541516 | false |
xArm-Developer/xArm-Python-SDK | example/wrapper/common/3002-record_trajectory.py | 1 | 1366 | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
"""
Description: Record trajectory
1. requires firmware 1.2.0 and above support
"""
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
#######################################################
"""
Just for test example
"""
if len(sys.argv) >= 2:
ip = sys.argv[1]
else:
try:
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address:')
if not ip:
print('input error, exit')
sys.exit(1)
########################################################
arm = XArmAPI(ip, is_radian=True)
arm.motion_enable(enable=True)
arm.set_mode(0)
arm.set_state(state=0)
# Turn on manual mode before recording
arm.set_mode(2)
arm.set_state(0)
arm.start_record_trajectory()
# Analog recording process, here with delay instead
time.sleep(20)
arm.stop_record_trajectory()
arm.save_record_trajectory('test.traj')
time.sleep(1)
# Turn off manual mode after recording
arm.set_mode(0)
arm.set_state(0)
| bsd-3-clause | -4,354,373,712,127,189,500 | 20.015385 | 68 | 0.60981 | false |
mtik00/pyclimenu | examples/nested.py | 1 | 2077 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
This sample script shows a simple **nested** menu structure.
There are two `group` items: `build_menu` and `test_menu`. These two items
are shown on the main menu. Once selected, thier respective sub-items will
be shown.
`test_menu` has a sub-menu itself, `sub_test_menu`. That item has a single
menu item.
Notice how the decorators change from `@climenu` to `@build_menu` and
`@test_menu`.
'''
from __future__ import print_function
import climenu
###############################################################################
# Create an empty function to serve as a menu group
@climenu.group()
def build_menu():
'''Build Functions'''
pass
# Add this function to the `build_menu` group
@build_menu.menu()
def test():
'''Build the package'''
print("!!!package build!!!")
return True
# Add this function to the `build_menu` group
@build_menu.menu()
def test2():
'''Build the release'''
print("!!!release build")
return True
###############################################################################
###############################################################################
# Create an empty function to serve as a menu group
@climenu.group()
def test_menu():
'''Test Functions'''
pass
# Add this function to the `test_menu` group
@test_menu.menu()
def test_one():
'''Run test #1'''
print("!!!test #1 run!!!")
return True
# Add this function to the `test_menu` group
@test_menu.menu()
def test_two():
'''Run test #2'''
print("!!!test #2 run!!!")
# Create a sub-group and add it to the `test_menu` group
@test_menu.group()
def sub_test_menu():
'''Another testing menu'''
pass
# Add this function to the `subsub_menu` group
@sub_test_menu.menu()
def subsub_menu1():
'''Run test #3'''
print("!!!test #3 run!!!")
#################################################################################
if __name__ == '__main__':
climenu.run()
| mit | -8,232,005,244,455,419,000 | 23.329268 | 81 | 0.515648 | false |
beagles/neutron_hacking | neutron/tests/unit/test_dhcp_agent.py | 1 | 62972 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import sys
import uuid
import eventlet
import mock
from oslo.config import cfg
from oslo import messaging
import testtools
from neutron.agent.common import config
from neutron.agent import dhcp_agent
from neutron.agent.dhcp_agent import DhcpAgentWithStateReport
from neutron.agent.linux import dhcp
from neutron.agent.linux import interface
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.tests import base
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
HOSTNAME = 'hostname'
dev_man = dhcp.DeviceManager
rpc_api = dhcp_agent.DhcpPluginApi
DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__)
DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__)
def etcdir(*p):
return os.path.join(ETCDIR, *p)
fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'
fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2',
end='172.9.9.254'))
fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=True, name='',
tenant_id=fake_tenant_id,
gateway_ip='172.9.9.1', host_routes=[],
dns_nameservers=[], ip_version=4,
allocation_pools=fake_subnet1_allocation_pools))
fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2',
end='172.9.8.254'))
fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.8.0/24', enable_dhcp=False, name='',
tenant_id=fake_tenant_id, gateway_ip='172.9.8.1',
host_routes=[], dns_nameservers=[], ip_version=4,
allocation_pools=fake_subnet2_allocation_pools))
fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='192.168.1.1/24', enable_dhcp=True))
fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='169.254.169.252/30',
gateway_ip='169.254.169.253',
enable_dhcp=True))
fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
ip_address='172.9.9.9'))
fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet,
ip_address='169.254.169.254'))
fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2',
end='172.9.9.254'))
fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
device_id='dhcp-12345678-1234-aaaa-1234567890ab',
allocation_pools=fake_subnet1_allocation_pools,
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip1]))
fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[]))
fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=const.DEVICE_OWNER_ROUTER_INTF,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip]))
fake_network = dhcp.NetModel(True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet2],
ports=[fake_port1]))
fake_meta_network = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port]))
fake_down_network = dhcp.NetModel(
True, dict(id='12345678-dddd-dddd-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=False,
subnets=[],
ports=[]))
class TestDhcpAgent(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgent, self).setUp()
dhcp_agent.register_options()
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.driver_cls_p = mock.patch(
'neutron.agent.dhcp_agent.importutils.import_class')
self.driver = mock.Mock(name='driver')
self.driver.existing_dhcp_networks.return_value = []
self.driver_cls = self.driver_cls_p.start()
self.driver_cls.return_value = self.driver
self.mock_makedirs_p = mock.patch("os.makedirs")
self.mock_makedirs = self.mock_makedirs_p.start()
self.addCleanup(mock.patch.stopall)
def test_dhcp_agent_manager(self):
state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI'
with mock.patch.object(DhcpAgentWithStateReport,
'sync_state',
autospec=True) as mock_sync_state:
with mock.patch.object(DhcpAgentWithStateReport,
'periodic_resync',
autospec=True) as mock_periodic_resync:
with mock.patch(state_rpc_str) as state_rpc:
with mock.patch.object(sys, 'argv') as sys_argv:
sys_argv.return_value = [
'dhcp', '--config-file',
etcdir('neutron.conf.test')]
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF(project='neutron')
agent_mgr = DhcpAgentWithStateReport('testhost')
eventlet.greenthread.sleep(1)
agent_mgr.after_start()
mock_sync_state.assert_called_once_with(agent_mgr)
mock_periodic_resync.assert_called_once_with(agent_mgr)
state_rpc.assert_has_calls(
[mock.call(mock.ANY),
mock.call().report_state(mock.ANY, mock.ANY,
mock.ANY)])
def test_dhcp_agent_main_agent_manager(self):
logging_str = 'neutron.agent.common.config.setup_logging'
launcher_str = 'neutron.openstack.common.service.ServiceLauncher'
with mock.patch(logging_str):
with mock.patch.object(sys, 'argv') as sys_argv:
with mock.patch(launcher_str) as launcher:
sys_argv.return_value = ['dhcp', '--config-file',
etcdir('neutron.conf.test')]
dhcp_agent.main()
launcher.assert_has_calls(
[mock.call(), mock.call().launch_service(mock.ANY),
mock.call().wait()])
def test_run_completes_single_pass(self):
with mock.patch(DEVICE_MANAGER):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['sync_state', 'periodic_resync']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
dhcp.run()
mocks['sync_state'].assert_called_once_with()
mocks['periodic_resync'].assert_called_once_with()
def test_call_driver(self):
network = mock.Mock()
network.id = '1'
dhcp = dhcp_agent.DhcpAgent(cfg.CONF)
self.assertTrue(dhcp.call_driver('foo', network))
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
'sudo',
mock.ANY,
mock.ANY)
def _test_call_driver_failure(self, exc=None,
trace_level='exception', expected_sync=True):
network = mock.Mock()
network.id = '1'
self.driver.return_value.foo.side_effect = exc or Exception
with mock.patch.object(dhcp_agent.LOG, trace_level) as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertIsNone(dhcp.call_driver('foo', network))
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
'sudo',
mock.ANY,
mock.ANY)
self.assertEqual(log.call_count, 1)
self.assertEqual(expected_sync, dhcp.needs_resync)
def test_call_driver_failure(self):
self._test_call_driver_failure()
def test_call_driver_remote_error_net_not_found(self):
self._test_call_driver_failure(
exc=messaging.RemoteError(exc_type='NetworkNotFound'),
trace_level='warning')
def test_call_driver_network_not_found(self):
self._test_call_driver_failure(
exc=exceptions.NetworkNotFound(net_id='1'),
trace_level='warning')
def test_call_driver_conflict(self):
self._test_call_driver_failure(
exc=exceptions.Conflict(),
trace_level='warning',
expected_sync=False)
def _test_sync_state_helper(self, known_networks, active_networks):
with mock.patch(DHCP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks_info.return_value = active_networks
plug.return_value = mock_plugin
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['refresh_dhcp_helper', 'disable_dhcp_helper', 'cache']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
mocks['cache'].get_network_ids.return_value = known_networks
dhcp.sync_state()
exp_refresh = [
mock.call(net_id) for net_id in active_networks]
diff = set(known_networks) - set(active_networks)
exp_disable = [mock.call(net_id) for net_id in diff]
mocks['cache'].assert_has_calls([mock.call.get_network_ids()])
mocks['refresh_dhcp_helper'].assert_has_called(exp_refresh)
mocks['disable_dhcp_helper'].assert_has_called(exp_disable)
def test_sync_state_initial(self):
self._test_sync_state_helper([], ['a'])
def test_sync_state_same(self):
self._test_sync_state_helper(['a'], ['a'])
def test_sync_state_disabled_net(self):
self._test_sync_state_helper(['b'], ['a'])
def test_sync_state_waitall(self):
class mockNetwork():
id = '0'
admin_state_up = True
subnets = []
def __init__(self, id):
self.id = id
with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w:
active_networks = [mockNetwork('1'), mockNetwork('2'),
mockNetwork('3'), mockNetwork('4'),
mockNetwork('5')]
known_networks = ['1', '2', '3', '4', '5']
self._test_sync_state_helper(known_networks, active_networks)
w.assert_called_once_with()
def test_sync_state_plugin_error(self):
with mock.patch(DHCP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks_info.side_effect = Exception
plug.return_value = mock_plugin
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.sync_state()
self.assertTrue(log.called)
self.assertTrue(dhcp.needs_resync)
def test_periodic_resync(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn:
dhcp.periodic_resync()
spawn.assert_called_once_with(dhcp._periodic_resync_helper)
def test_periodoc_resync_helper(self):
with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.needs_resync = True
with mock.patch.object(dhcp, 'sync_state') as sync_state:
sync_state.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
dhcp._periodic_resync_helper()
sync_state.assert_called_once_with()
sleep.assert_called_once_with(dhcp.conf.resync_interval)
self.assertFalse(dhcp.needs_resync)
def test_populate_cache_on_start_without_active_networks_support(self):
# emul dhcp driver that doesn't support retrieving of active networks
self.driver.existing_dhcp_networks.side_effect = NotImplementedError
with mock.patch.object(dhcp_agent.LOG, 'debug') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
cfg.CONF.root_helper
)
self.assertFalse(dhcp.cache.get_network_ids())
self.assertTrue(log.called)
def test_populate_cache_on_start(self):
networks = ['aaa', 'bbb']
self.driver.existing_dhcp_networks.return_value = networks
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
cfg.CONF.root_helper
)
self.assertEqual(set(networks), set(dhcp.cache.get_network_ids()))
def test_none_interface_driver(self):
cfg.CONF.set_override('interface_driver', None)
with mock.patch.object(dhcp, 'LOG') as log:
self.assertRaises(SystemExit, dhcp.DeviceManager,
cfg.CONF, 'sudo', None)
msg = 'An interface driver must be specified'
log.error.assert_called_once_with(msg)
def test_nonexistent_interface_driver(self):
# Temporarily turn off mock, so could use the real import_class
# to import interface_driver.
self.driver_cls_p.stop()
self.addCleanup(self.driver_cls_p.start)
cfg.CONF.set_override('interface_driver', 'foo')
with mock.patch.object(dhcp, 'LOG') as log:
self.assertRaises(SystemExit, dhcp.DeviceManager,
cfg.CONF, 'sudo', None)
log.error.assert_called_once()
class TestLogArgs(base.BaseTestCase):
def test_log_args_without_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': None,
'log_file': None,
'use_syslog': True,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--use-syslog',
'--syslog-log-facility=LOG_USER']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_file(self):
conf_dict = {'debug': True,
'verbose': True,
'log_dir': '/etc/tests',
'log_file': None,
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': '/etc/tests',
'log_file': 'tests/filelog',
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_dir(self):
conf_dict = {'debug': True,
'verbose': False,
'log_file': 'tests/filelog',
'log_dir': None,
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_filelog_and_syslog(self):
conf_dict = {'debug': True,
'verbose': True,
'log_file': 'tests/filelog',
'log_dir': '/etc/tests',
'use_syslog': True,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
class TestDhcpAgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentEventHandler, self).setUp()
config.register_interface_driver_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
self.plugin_p = mock.patch(DHCP_PLUGIN)
plugin_cls = self.plugin_p.start()
self.plugin = mock.Mock()
plugin_cls.return_value = self.plugin
self.cache_p = mock.patch('neutron.agent.dhcp_agent.NetworkCache')
cache_cls = self.cache_p.start()
self.cache = mock.Mock()
cache_cls.return_value = self.cache
self.mock_makedirs_p = mock.patch("os.makedirs")
self.mock_makedirs = self.mock_makedirs_p.start()
self.mock_init_p = mock.patch('neutron.agent.dhcp_agent.'
'DhcpAgent._populate_networks_cache')
self.mock_init = self.mock_init_p.start()
with mock.patch.object(dhcp.Dnsmasq,
'check_version') as check_v:
check_v.return_value = dhcp.Dnsmasq.MINIMUM_VERSION
self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver')
self.call_driver = self.call_driver_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager'
)
self.external_process = self.external_process_p.start()
def tearDown(self):
self.external_process_p.stop()
self.call_driver_p.stop()
self.cache_p.stop()
self.plugin_p.stop()
self.mock_makedirs_p.stop()
self.mock_init_p.stop()
cfg.CONF.reset()
super(TestDhcpAgentEventHandler, self).tearDown()
def _enable_dhcp_helper(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.plugin.get_network_info.return_value = fake_network
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.cache.assert_has_calls([mock.call.put(fake_network)])
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().enable(mock.ANY)
])
else:
self.assertFalse(self.external_process.call_count)
def test_enable_dhcp_helper_enable_isolated_metadata(self):
self._enable_dhcp_helper(isolated_metadata=True)
def test_enable_dhcp_helper(self):
self._enable_dhcp_helper()
def test_enable_dhcp_helper_down_network(self):
self.plugin.get_network_info.return_value = fake_down_network
self.dhcp.enable_dhcp_helper(fake_down_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_down_network.id)])
self.assertFalse(self.call_driver.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_network_none(self):
self.plugin.get_network_info.return_value = None
with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
self.dhcp.enable_dhcp_helper('fake_id')
self.plugin.assert_has_calls(
[mock.call.get_network_info('fake_id')])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertFalse(self.dhcp.needs_resync)
def test_enable_dhcp_helper_exception_during_rpc(self):
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertTrue(self.dhcp.needs_resync)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_driver_failure(self):
self.plugin.get_network_info.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_known_network(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_known_network_isolated_metadata(self):
self._disable_dhcp_helper_known_network(isolated_metadata=True)
def test_disable_dhcp_helper_known_network(self):
self._disable_dhcp_helper_known_network()
def test_disable_dhcp_helper_unknown_network(self):
self.cache.get_network_by_id.return_value = None
self.dhcp.disable_dhcp_helper('abcdef')
self.cache.assert_has_calls(
[mock.call.get_network_by_id('abcdef')])
self.assertEqual(0, self.call_driver.call_count)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_driver_failure_isolated_metadata(self):
self._disable_dhcp_helper_driver_failure(isolated_metadata=True)
def test_disable_dhcp_helper_driver_failure(self):
self._disable_dhcp_helper_driver_failure()
def test_enable_isolated_metadata_proxy(self):
class_path = 'neutron.agent.linux.external_process.ProcessManager'
with mock.patch(class_path) as ext_process:
self.dhcp.enable_isolated_metadata_proxy(fake_network)
ext_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().enable(mock.ANY)
])
def test_disable_isolated_metadata_proxy(self):
class_path = 'neutron.agent.linux.external_process.ProcessManager'
with mock.patch(class_path) as ext_process:
self.dhcp.disable_isolated_metadata_proxy(fake_network)
ext_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
def test_enable_isolated_metadata_proxy_with_metadata_network(self):
cfg.CONF.set_override('enable_metadata_network', True)
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('log_file', 'test.log')
class_path = 'neutron.agent.linux.ip_lib.IPWrapper'
self.external_process_p.stop()
# Ensure the mock is restored if this test fail
try:
with mock.patch(class_path) as ip_wrapper:
self.dhcp.enable_isolated_metadata_proxy(fake_meta_network)
ip_wrapper.assert_has_calls([mock.call(
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().netns.execute([
'neutron-ns-metadata-proxy',
mock.ANY,
mock.ANY,
'--router_id=forzanapoli',
mock.ANY,
mock.ANY,
'--debug',
('--log-file=neutron-ns-metadata-proxy-%s.log' %
fake_meta_network.id)])
])
finally:
self.external_process_p.start()
def test_network_create_end(self):
payload = dict(network=dict(id=fake_network.id))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_create_end(None, payload)
enable.assertCalledOnceWith(fake_network.id)
def test_network_update_end_admin_state_up(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=True))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_update_end(None, payload)
enable.assertCalledOnceWith(fake_network.id)
def test_network_update_end_admin_state_down(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=False))
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_update_end(None, payload)
disable.assertCalledOnceWith(fake_network.id)
def test_network_delete_end(self):
payload = dict(network_id=fake_network.id)
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_delete_end(None, payload)
disable.assertCalledOnceWith(fake_network.id)
def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self):
network = dhcp.NetModel(True, dict(id='net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[]))
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.return_value = network
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.refresh_dhcp_helper(network.id)
disable.called_once_with_args(network.id)
self.assertFalse(self.cache.called)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
def test_refresh_dhcp_helper_exception_during_rpc(self):
network = dhcp.NetModel(True, dict(id='net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[]))
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.refresh_dhcp_helper(network.id)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
self.assertTrue(log.called)
self.assertTrue(self.dhcp.needs_resync)
def test_subnet_update_end(self):
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_subnet_update_end_restart(self):
new_state = dhcp.NetModel(True, dict(id=fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1]))
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = new_state
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(new_state)])
self.call_driver.assert_called_once_with('restart',
new_state)
def test_subnet_update_end_delete_payload(self):
prev_state = dhcp.NetModel(True, dict(id=fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1]))
payload = dict(subnet_id=fake_subnet1.id)
self.cache.get_network_by_subnet_id.return_value = prev_state
self.cache.get_network_by_id.return_value = prev_state
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_delete_end(None, payload)
self.cache.assert_has_calls([
mock.call.get_network_by_subnet_id(
'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'),
mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'),
mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('restart',
fake_network)
def test_port_update_end(self):
payload = dict(port=vars(fake_port2))
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port2.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_update_change_ip_on_port(self):
payload = dict(port=vars(fake_port1))
self.cache.get_network_by_id.return_value = fake_network
updated_fake_port1 = copy.deepcopy(fake_port1)
updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99'
self.cache.get_port_by_id.return_value = updated_fake_port1
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port1.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_delete_end(self):
payload = dict(port_id=fake_port2.id)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_port_by_id(fake_port2.id),
mock.call.get_network_by_id(fake_network.id),
mock.call.remove_port(fake_port2)])
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_delete_end_unknown_port(self):
payload = dict(port_id='unknown')
self.cache.get_port_by_id.return_value = None
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')])
self.assertEqual(self.call_driver.call_count, 0)
class TestDhcpPluginApiProxy(base.BaseTestCase):
def setUp(self):
super(TestDhcpPluginApiProxy, self).setUp()
self.proxy = dhcp_agent.DhcpPluginApi('foo', {}, None)
self.proxy.host = 'foo'
self.call_p = mock.patch.object(self.proxy.client, 'call')
self.call = self.call_p.start()
def tearDown(self):
self.call_p.stop()
super(TestDhcpPluginApiProxy, self).tearDown()
def test_get_network_info(self):
self.call.return_value = dict(a=1)
retval = self.proxy.get_network_info('netid')
self.assertEqual(retval.a, 1)
self.call.assert_called_once_with({}, 'get_network_info',
network_id='netid',
host='foo')
def test_get_dhcp_port(self):
self.call.return_value = dict(a=1)
retval = self.proxy.get_dhcp_port('netid', 'devid')
self.assertEqual(retval.a, 1)
self.call.assert_called_once_with({}, 'get_dhcp_port',
network_id='netid',
device_id='devid',
host='foo')
def test_get_dhcp_port_none(self):
self.call.return_value = None
self.assertIsNone(self.proxy.get_dhcp_port('netid', 'devid'))
def test_get_active_networks_info(self):
self.proxy.get_active_networks_info()
self.call.assert_called_once_with({}, 'get_active_networks_info',
host='foo')
def test_create_dhcp_port(self):
port_body = (
{'port':
{'name': '', 'admin_state_up': True,
'network_id': fake_network.id,
'tenant_id': fake_network.tenant_id,
'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}],
'device_id': mock.ANY}})
self.proxy.create_dhcp_port(port_body)
self.call.assert_called_once_with({}, 'create_dhcp_port',
port=port_body,
host='foo')
def test_create_dhcp_port_none(self):
self.call.return_value = None
port_body = (
{'port':
{'name': '', 'admin_state_up': True,
'network_id': fake_network.id,
'tenant_id': fake_network.tenant_id,
'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}],
'device_id': mock.ANY}})
self.assertIsNone(self.proxy.create_dhcp_port(port_body))
def test_update_dhcp_port_none(self):
self.call.return_value = None
port_body = {'port': {'fixed_ips':
[{'subnet_id': fake_fixed_ip1.subnet_id}]}}
self.assertIsNone(self.proxy.update_dhcp_port(fake_port1.id,
port_body))
def test_update_dhcp_port(self):
port_body = {'port': {'fixed_ips':
[{'subnet_id': fake_fixed_ip1.subnet_id}]}}
self.proxy.update_dhcp_port(fake_port1.id, port_body)
self.call.assert_called_once_with({}, 'update_dhcp_port',
port_id=fake_port1.id,
port=port_body,
host='foo')
def test_release_dhcp_port(self):
self.proxy.release_dhcp_port('netid', 'devid')
self.assertTrue(self.call.called)
self.call.assert_called_once_with({}, 'release_dhcp_port',
network_id='netid',
device_id='devid',
host='foo')
def test_release_port_fixed_ip(self):
self.proxy.release_port_fixed_ip('netid', 'devid', 'subid')
self.assertTrue(self.call.called)
self.call.assert_called_once_with({}, 'release_port_fixed_ip',
network_id='netid',
subnet_id='subid',
device_id='devid',
host='foo')
class TestNetworkCache(base.BaseTestCase):
def test_put_network(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_put_network_existing(self):
prev_network_info = mock.Mock()
nc = dhcp_agent.NetworkCache()
with mock.patch.object(nc, 'remove') as remove:
nc.cache[fake_network.id] = prev_network_info
nc.put(fake_network)
remove.assert_called_once_with(prev_network_info)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_remove_network(self):
nc = dhcp_agent.NetworkCache()
nc.cache = {fake_network.id: fake_network}
nc.subnet_lookup = {fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id}
nc.port_lookup = {fake_port1.id: fake_network.id}
nc.remove(fake_network)
self.assertEqual(len(nc.cache), 0)
self.assertEqual(len(nc.subnet_lookup), 0)
self.assertEqual(len(nc.port_lookup), 0)
def test_get_network_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network)
def test_get_network_ids(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_ids(), [fake_network.id])
def test_get_network_by_subnet_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id),
fake_network)
def test_get_network_by_port_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_port_id(fake_port1.id),
fake_network)
def test_put_port(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_net.ports)
def test_put_port_existing(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_net.ports)
def test_remove_port_existing(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.remove_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 1)
self.assertNotIn(fake_port2, fake_net.ports)
def test_get_port_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1)
class FakePort1:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
class FakeV4Subnet:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
class FakeV4SubnetNoGateway:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = None
enable_dhcp = True
class FakeV4Network:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = [FakeV4Subnet()]
ports = [FakePort1()]
class FakeV4NetworkNoSubnet:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = []
ports = []
class FakeV4NetworkNoGateway:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoGateway()]
ports = [FakePort1()]
class TestDeviceManager(base.BaseTestCase):
def setUp(self):
super(TestDeviceManager, self).setUp()
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_isolated_metadata', True)
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
self.iproute_cls_p = mock.patch('neutron.agent.linux.'
'ip_lib.IpRouteCommand')
driver_cls = self.dvr_cls_p.start()
iproute_cls = self.iproute_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
self.mock_iproute = mock.MagicMock()
driver_cls.return_value = self.mock_driver
iproute_cls.return_value = self.mock_iproute
def tearDown(self):
self.dvr_cls_p.stop()
self.device_exists_p.stop()
self.iproute_cls_p.stop()
cfg.CONF.reset()
super(TestDeviceManager, self).tearDown()
def _test_setup_helper(self, device_exists, reuse_existing=False,
net=None, port=None):
net = net or fake_network
port = port or fake_port1
plugin = mock.Mock()
plugin.create_dhcp_port.return_value = port or fake_port1
plugin.get_dhcp_port.return_value = port or fake_port1
self.device_exists.return_value = device_exists
self.mock_driver.get_device_name.return_value = 'tap12345678-12'
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
dh._set_default_route = mock.Mock()
interface_name = dh.setup(net, reuse_existing)
self.assertEqual(interface_name, 'tap12345678-12')
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id': net.id, 'tenant_id': net.tenant_id,
'fixed_ips':
[{'subnet_id': fake_fixed_ip1.subnet_id}],
'device_id': mock.ANY}})])
expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
expected = [
mock.call.get_device_name(port),
mock.call.init_l3(
'tap12345678-12',
expected_ips,
namespace=net.namespace)]
if not reuse_existing:
expected.insert(1,
mock.call.plug(net.id,
port.id,
'tap12345678-12',
'aa:bb:cc:dd:ee:ff',
namespace=net.namespace))
self.mock_driver.assert_has_calls(expected)
dh._set_default_route.assert_called_once_with(net)
def test_setup(self):
cfg.CONF.set_override('enable_metadata_network', False)
self._test_setup_helper(False)
cfg.CONF.set_override('enable_metadata_network', True)
self._test_setup_helper(False)
def test_setup_device_exists(self):
with testtools.ExpectedException(exceptions.PreexistingDeviceFailure):
self._test_setup_helper(True)
def test_setup_device_exists_reuse(self):
self._test_setup_helper(True, True)
def test_create_dhcp_port_raise_conflict(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
plugin.create_dhcp_port.return_value = None
self.assertRaises(exceptions.Conflict,
dh.setup_dhcp_port,
fake_network)
def test_create_dhcp_port_create_new(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
plugin.create_dhcp_port.return_value = fake_network.ports[0]
dh.setup_dhcp_port(fake_network)
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id':
fake_network.id, 'tenant_id': fake_network.tenant_id,
'fixed_ips':
[{'subnet_id': fake_fixed_ip1.subnet_id}],
'device_id': mock.ANY}})])
def test_create_dhcp_port_update_add_subnet(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
fake_network_copy.subnets[1].enable_dhcp = True
plugin.update_dhcp_port.return_value = fake_network.ports[0]
dh.setup_dhcp_port(fake_network_copy)
port_body = {'port': {
'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id,
'ip_address': fake_fixed_ip1.ip_address},
{'subnet_id': fake_subnet2.id}]}}
plugin.assert_has_calls([
mock.call.update_dhcp_port(fake_network_copy.ports[0].id,
port_body)])
def test_update_dhcp_port_raises_conflict(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
fake_network_copy.subnets[1].enable_dhcp = True
plugin.update_dhcp_port.return_value = None
self.assertRaises(exceptions.Conflict,
dh.setup_dhcp_port,
fake_network_copy)
def test_create_dhcp_port_no_update_or_create(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
dh.setup_dhcp_port(fake_network_copy)
self.assertFalse(plugin.setup_dhcp_port.called)
self.assertFalse(plugin.update_dhcp_port.called)
def test_destroy(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
fake_port = dhcp.DictModel(
dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff'))
with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
dh.destroy(fake_net, 'tap12345678-12')
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.unplug('tap12345678-12',
namespace='qdhcp-' + fake_net.id)])
plugin.assert_has_calls(
[mock.call.release_dhcp_port(fake_net.id, mock.ANY)])
def test_get_interface_name(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
fake_port = dhcp.DictModel(
dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff'))
with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin)
dh.get_interface_name(fake_net, fake_port)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
self.assertEqual(len(plugin.mock_calls), 0)
def test_get_device_id(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-'
'5678-1234567890ab')
with mock.patch('socket.gethostbyname') as get_host:
with mock.patch('uuid.uuid5') as uuid5:
uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457'
get_host.return_value = 'localhost'
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None)
uuid5.called_once_with(uuid.NAMESPACE_DNS, 'localhost')
self.assertEqual(dh.get_device_id(fake_net), expected)
def _get_device_manager_with_mock_device(self, conf, device):
dh = dhcp.DeviceManager(conf, cfg.CONF.root_helper, None)
dh._get_device = mock.Mock(return_value=device)
return dh
def test_update(self):
# Try with namespaces and no metadata network
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_metadata_network', False)
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None)
dh._set_default_route = mock.Mock()
dh.update(True)
dh._set_default_route.assert_called_once_with(True)
# No namespaces, shouldn't set default route.
cfg.CONF.set_override('use_namespaces', False)
cfg.CONF.set_override('enable_metadata_network', False)
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network())
self.assertFalse(dh._set_default_route.called)
# Meta data network enabled, don't interfere with its gateway.
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_metadata_network', True)
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network())
self.assertTrue(dh._set_default_route.called)
# For completeness
cfg.CONF.set_override('use_namespaces', False)
cfg.CONF.set_override('enable_metadata_network', True)
dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network())
self.assertFalse(dh._set_default_route.called)
def test_set_default_route(self):
device = mock.Mock()
device.route.get_gateway.return_value = None
# Basic one subnet with gateway.
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4Network()
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.0.1')
def test_set_default_route_no_subnet(self):
device = mock.Mock()
device.route.get_gateway.return_value = None
# Try a namespace but no subnet.
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4NetworkNoSubnet()
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
self.assertFalse(device.route.delete_gateway.called)
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_no_subnet_delete_gateway(self):
device = mock.Mock()
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
# Try a namespace but no subnet where a gateway needs to be deleted.
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4NetworkNoSubnet()
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
device.route.delete_gateway.assert_called_once_with('192.168.0.1')
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_no_gateway(self):
device = mock.Mock()
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
# Try a subnet with no gateway
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4NetworkNoGateway()
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
device.route.delete_gateway.assert_called_once_with('192.168.0.1')
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_do_nothing(self):
device = mock.Mock()
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
# Try a subnet where the gateway doesn't change. Should do nothing.
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4Network()
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
self.assertFalse(device.route.delete_gateway.called)
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_change_gateway(self):
device = mock.Mock()
device.route.get_gateway.return_value = dict(gateway='192.168.0.2')
# Try a subnet with a gateway this is different than the current.
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4Network()
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.0.1')
def test_set_default_route_two_subnets(self):
device = mock.Mock()
device.route.get_gateway.return_value = None
# Try two subnets. Should set gateway from the first.
dh = self._get_device_manager_with_mock_device(cfg.CONF, device)
network = FakeV4Network()
subnet2 = FakeV4Subnet()
subnet2.gateway_ip = '192.168.1.1'
network.subnets = [subnet2, FakeV4Subnet()]
dh._set_default_route(network)
device.route.get_gateway.assert_called_once()
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.1.1')
class TestDictModel(base.BaseTestCase):
def test_basic_dict(self):
d = dict(a=1, b=2)
m = dhcp.DictModel(d)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
def test_dict_has_sub_dict(self):
d = dict(a=dict(b=2))
m = dhcp.DictModel(d)
self.assertEqual(m.a.b, 2)
def test_dict_contains_list(self):
d = dict(a=[1, 2])
m = dhcp.DictModel(d)
self.assertEqual(m.a, [1, 2])
def test_dict_contains_list_of_dicts(self):
d = dict(a=[dict(b=2), dict(c=3)])
m = dhcp.DictModel(d)
self.assertEqual(m.a[0].b, 2)
self.assertEqual(m.a[1].c, 3)
class TestNetModel(base.BaseTestCase):
def test_ns_name(self):
network = dhcp.NetModel(True, {'id': 'foo'})
self.assertEqual(network.namespace, 'qdhcp-foo')
def test_ns_name_false_namespace(self):
network = dhcp.NetModel(False, {'id': 'foo'})
self.assertIsNone(network.namespace)
def test_ns_name_none_namespace(self):
network = dhcp.NetModel(None, {'id': 'foo'})
self.assertIsNone(network.namespace)
| apache-2.0 | 131,708,714,163,254,300 | 41.263087 | 79 | 0.571063 | false |
Jeff-Tian/mybnb | Python27/Lib/test/test_zipfile64.py | 1 | 6052 | # Tests of the full ZIP64 functionality of zipfile
# The test_support.requires call is the only reason for keeping this separate
# from test_zipfile
from test import test_support
# XXX(nnorwitz): disable this test by looking for extra largfile resource
# which doesn't exist. This test takes over 30 minutes to run in general
# and requires more disk space than most of the buildbots.
test_support.requires(
'extralargefile',
'test requires loads of disk-space bytes and a long time to run'
)
# We can test part of the module without zlib.
try:
import zlib
except ImportError:
zlib = None
import zipfile, os, unittest
import time
import sys
from tempfile import TemporaryFile
from test.test_support import TESTFN, run_unittest
TESTFN2 = TESTFN + "2"
# How much time in seconds can pass before we print a 'Still working' message.
_PRINT_WORKING_MSG_INTERVAL = 5 * 60
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
# Create test data.
# xrange() is important here -- don't want to create immortal space
# for a million ints.
line_gen = ("Test of zipfile line %d." % i for i in xrange(1000000))
self.data = '\n'.join(line_gen)
# And write it to a file.
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def zipTest(self, f, compression):
# Create the ZIP archive.
zipfp = zipfile.ZipFile(f, "w", compression, allowZip64=True)
# It will contain enough copies of self.data to reach about 6GB of
# raw data to store.
filecount = 6*1024**3 // len(self.data)
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
for num in range(filecount):
zipfp.writestr("testfn%d" % num, self.data)
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print >>sys.__stdout__, (
' zipTest still writing %d of %d, be patient...' %
(num, filecount))
sys.__stdout__.flush()
zipfp.close()
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
for num in range(filecount):
self.assertEqual(zipfp.read("testfn%d" % num), self.data)
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print >>sys.__stdout__, (
' zipTest still reading %d of %d, be patient...' %
(num, filecount))
sys.__stdout__.flush()
zipfp.close()
def testStored(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
for f in TemporaryFile(), TESTFN2:
self.zipTest(f, zipfile.ZIP_STORED)
if zlib:
def testDeflated(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
for f in TemporaryFile(), TESTFN2:
self.zipTest(f, zipfile.ZIP_DEFLATED)
def tearDown(self):
for fname in TESTFN, TESTFN2:
if os.path.exists(fname):
os.remove(fname)
class OtherTests(unittest.TestCase):
def testMoreThan64kFiles(self):
# This test checks that more than 64k files can be added to an archive,
# and that the resulting archive can be read properly by ZipFile
zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=True)
zipf.debug = 100
numfiles = (1 << 16) * 3/2
for i in xrange(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(len(zipf2.namelist()), numfiles)
for i in xrange(numfiles):
self.assertEqual(zipf2.read("foo%08d" % i), "%d" % (i**3 % 57))
zipf2.close()
def testMoreThan64kFilesAppend(self):
zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False)
zipf.debug = 100
numfiles = (1 << 16) - 1
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=False)
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=True)
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
numfiles2 = (1 << 16) * 3//2
for i in range(numfiles, numfiles2):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles2)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(len(zipf2.namelist()), numfiles2)
for i in range(numfiles2):
self.assertEqual(zipf2.read("foo%08d" % i), "%d" % (i**3 % 57))
zipf2.close()
def tearDown(self):
test_support.unlink(TESTFN)
test_support.unlink(TESTFN2)
def test_main():
run_unittest(TestsWithSourceFile, OtherTests)
if __name__ == "__main__":
test_main()
| apache-2.0 | -6,163,582,844,137,052,000 | 36.062893 | 79 | 0.583609 | false |
dladd/pyFormex | pyformex/examples/TrussFrame.py | 1 | 2483 | # $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""TrussFrame
"""
from __future__ import print_function
_status = 'checked'
_level = 'normal'
_topics = ['geometry']
_techniques = ['color']
from gui.draw import *
def run():
clear()
yf = [ 0.0, 0.2, 1.2, 2.2, 3.2, 4.2, 4.5 ] # y of nodes in frame columns
a = Formex([[[0.0,y]] for y in yf ])
b = connect([a,a],bias=[0,1]).translate([0.5,0.0,0.0])
b.setProp(3)
c = b.reflect(0)
d = connect([b,c],bias=[1,1])
d.setProp(2)
e = connect([b,c],bias=[1,2]).select([0,2]) + connect([b,c],bias=[2,1]).select([1,3])
e.setProp(1)
col = b+c+d+e
frame = col.translate([-4.0,0.0,0.0]) + col.translate([+4.0,0.0,0.0])
# Dakligger
h0 = 1.2 # hoogte in het midden
h1 = 0.5 # hoogte aan het einde
xd = [ 0, 0.6 ] + [ 0.6+i*1.2 for i in range(5)] # hor. positie knopen
ko = Formex([[[x,0.0]] for x in xd])
ond = connect([ko,ko],bias=[0,1])
bov = ond.translate(1,h0).shear(1,0,(h1-h0)/xd[-1])
tss = connect([ond,bov],bias=[1,1])
ond.setProp(2)
bov.setProp(4)
tss.setProp(5)
dakligger = (ond+bov+tss)
dakligger += dakligger.reflect(0)
frame += dakligger.translate([0,yf[-1],0])
draw(frame)
structure = frame.replic2(2,6,12.,3.,0,2)
clear()
draw(structure)
view('top')
view('right')
view('iso')
if __name__ == 'draw':
run()
# End
| gpl-3.0 | -147,417,965,538,401,250 | 32.106667 | 89 | 0.631091 | false |
BigEgg/LeetCode | Python/LeetCode/_051_100/_068_TextJustification.py | 1 | 1097 | class Solution:
def fullJustify(self, words: [str], maxWidth: int) -> [str]:
result = []
start_index, current_length = 0, 0
for i in range(len(words)):
if current_length + len(words[i]) > maxWidth:
space = maxWidth - current_length + (i - start_index)
new_line = ''
for j in range(start_index, i):
new_line += words[j]
space_count = maxWidth - len(new_line) if j == i - 1 else space // (i - start_index - 1) + (1 if (j - start_index < (space % (i - start_index - 1))) else 0)
new_line += ' ' * space_count
result.append(new_line)
current_length = 0
start_index = i
current_length += len(words[i]) + 1
new_line = ''
for j in range(start_index, len(words)):
new_line += words[j]
space_count = 1 if j != len(words) - 1 else maxWidth - len(new_line)
new_line += ' ' * space_count
result.append(new_line)
return result
| mit | -1,006,641,651,837,325,300 | 35.566667 | 176 | 0.483136 | false |
ehuelsmann/openipam | openIPAM/openipam/web/access.py | 1 | 3170 | import cherrypy
from basepage import BasePage
import framework
from openipam.web.resource.submenu import submenu
from openipam.config import frontend
perms = frontend.perms
class Access(BasePage):
'''The access class. This includes all pages that are /access/*'''
def __init__(self):
BasePage.__init__(self)
# Object for wrapping HTML into the template
self.__template = framework.Basics("access")
#-----------------------------------------------------------------
# PUBLISHED FUNCTIONS
#-----------------------------------------------------------------
#-----------------------------------------------------------------
# EXPOSED FUNCTIONS
#-----------------------------------------------------------------
@cherrypy.expose
def index(self):
"""The user management page"""
# Confirm user authentication
self.check_session()
domains_text = []
networks_text = []
hosts_text = []
domains = self.webservice.get_domains( { 'additional_perms' : str(perms.ADD) } )
if not domains:
domains_text.append("<p>You do not have access to add hosts in any domains.</p>")
else:
domains_text.append("<p>You have access to add hosts in the following domains:</p>")
rows = []
# The template HTML for every item
item_template = '''<tr class="info">
<td>%(name)s</td>
<td>%(description)s</td>
</tr>
'''
# Go through the query and make the table HTML using the template
for domain in domains:
rows.append(item_template % (domain))
# Combine all the parts into the table
domains_text.append('''
<table class="infoTable">
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
%s
</tbody>
</table>
''' % ''.join(rows))
networks = self.webservice.get_networks( { 'additional_perms' : str(perms.ADD) } )
if not networks:
networks_text.append("<p>You do not have access to add static IP addresses to any networks.</p>")
else:
networks_text.append("<p>You have access to add static IP addresses to these networks:</p>")
rows = []
# The template HTML for every item
item_template = '''<tr class="info">
<td>%(network)s</td>
<td>%(name)s</td>
<td>%(gateway)s</td>
<td>%(description)s</td>
</tr>
'''
# Go through the query and make the table HTML using the template
for network in networks:
rows.append(item_template % (network))
# Combine all the parts into the table
networks_text.append('''
<table class="infoTable">
<thead>
<tr>
<th>Network (CIDR)</th>
<th>Name</th>
<th>Gateway</th>
<th>Description</th>
</tr>
</thead>
<tbody>
%s
</tbody>
</table>
''' % ''.join(rows))
maincontent = '''
<h1>My Access</h1>
%s
<h2>Domains</h2>
%s
<h2>Networks</h2>
%s
''' % (frontend.my_access_text, ''.join(domains_text), ''.join(networks_text))
return self.__template.wrap(maincontent)
#-----------------------------------------------------------------
| gpl-3.0 | 1,357,530,256,039,712,000 | 24.15873 | 100 | 0.529022 | false |
oliverlee/pydy | examples/simple_pendulum/run.py | 3 | 1046 | # This code requires sympy 1.0 to run
from sympy import *
from sympy.physics.mechanics import LagrangesMethod, Lagrangian
from sympy.physics.mechanics import ReferenceFrame, Particle, Point
from sympy.physics.mechanics import dynamicsymbols
# System state variables
theta = dynamicsymbols('theta')
thetad = dynamicsymbols('theta', 1)
# Other system variables
m, l, g = symbols('m l g')
# Set up the reference frames
# Reference frame A set up in the plane perpendicular to the page containing
# segment OP
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [theta, N.z])
# Set up the points and particles
O = Point('O')
P = O.locatenew('P', l * A.x)
Pa = Particle('Pa', P, m)
# Set up velocities
A.set_ang_vel(N, thetad * N.z)
O.set_vel(N, 0)
P.v2pt_theory(O, N, A)
# Set up the lagrangian
L = Lagrangian(N, Pa)
# Create the list of forces acting on the system
fl = [(P, g * m * N.x)]
# Create the equations of motion using lagranges method
l = LagrangesMethod(L, [theta], forcelist=fl, frame=N)
pprint(l.form_lagranges_equations())
| bsd-3-clause | 2,658,679,534,334,844,000 | 24.512195 | 76 | 0.715105 | false |
vtsuperdarn/davitpy | davitpy/pydarn/sdio/radDataRead.py | 1 | 13124 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. module:: radDataRead
:synopsis: A module for reading radar data (iqdat, raw, fit)
.. moduleauthor:: AJ, 20130110
************************************
**Module**: pydarn.sdio.radDataRead
************************************
Functions
----------
:func:`pydarn.sdio.radDataRead.radDataOpen`
:func:`pydarn.sdio.radDataRead.radDataReadRec`
:func:`pydarn.sdio.radDataRead.radDataReadScan`
:func:`pydarn.sdio.radDataRead.radDataReadAll`
:func:`pydarn.sdio.radDataRead.radDataCreateIndex`
"""
import logging
def radDataOpen(sTime, radcode, eTime=None, channel=None, bmnum=None, cp=None,
fileType='fitex', filtered=False, src=None, fileName=None,
noCache=False, local_dirfmt=None, local_fnamefmt=None,
local_dict=None, remote_dirfmt=None, remote_fnamefmt=None,
remote_dict=None, remote_site=None, username=None,
password=None, port=None, tmpdir=None, remove=False,
try_file_types=True):
"""A function to establish a pipeline through which we can read radar data.
first it tries the mongodb, then it tries to find local files, and lastly
it sftp's over to the VT data server.
Parameters
-----------
sTime : (datetime)
The beginning time for which you want data
radcode : (str)
The 3-letter radar code with optional channel extension for which you
want data
eTime : (datetime/NoneType)
The last time that you want data for. If this is set to None, it will
be set to 1 day after sTime. (default=None)
channel : (str/NoneType)
The 1-letter code for what channel you want data from, eg 'a','b',...
if this is set to None, data from ALL channels will be read.
(default=None)
bmnum : (int/NoneType)
The beam number which you want data for. If this is set to None, data
from all beams will be read. (default=None)
cp : (int)
The control program which you want data for. If this is set to None,
data from all cp's will be read. (default=None)
fileType : (str)
The type of data you want to read. valid inputs are: 'fitex','fitacf',
'fitacf3','lmfit','rawacf','iqdat'. If you choose a fit file format
and the specified one isn't found, we will search for one of the
others. Beware: if you ask for rawacf/iq data, these files are large
and the data transfer might take a long time. (default='fitex')
filtered : (boolean)
A boolean specifying whether you want the fit data to be boxcar
filtered. ONLY VALID FOR FIT. (default=False)
src : (str/NoneType)
The source of the data. Valid inputs are 'local' 'sftp'. If this is
set to None, it will try all possibilites sequentially. (default=None)
fileName : (str/NoneType)
The name of a specific file which you want to open. (default=None)
noCache : (boolean)
Flag to indicate that you do not want to check first for cached files.
(default=False)
remote_site : (str/NoneType)
The remote data server's address. If None, the rcParam value DB will be
used. (default=None)
port : (str/NoneType)
The port number to use for remote_site. If None, the rcParam value
DB_PORT will be used. (default=None)
username : (str/NoneType)
Username for remote_site. If None, the rcParam value DBREADUSER will
be used. (default=None)
password : (str/bool/NoneType)
Password for remote_site. If password is set to True, the user is
prompted for the remote_site password. If set to None, the rcParam
value DBREADPASS will be used (default=None)
remote_dirfmt : (str/NoneType)
The remote_site directory structure. Can include keywords to be
replaced by dictionary keys in remote_dict. If None, the rcParam value
DAVIT_REMOTE_DIRFORMAT will be used. (default=None)
Ex) remote_dirfmt='/{year}/{month}'
remote_fnamefmt : (str/list/NoneType)
The remote_site file naming format. Can include keywords to be replaced
by dictionary keys in remote_dict. If None, the rcParam value
DAVIT_REMOTE_FNAMEFMT will be used. (default=None)
Ex) remote_fnamefmt=['{date}.{radar}.{ftype}',
'{date}.{channel}.{radar}.{ftype}']
local_dirfmt : (str/None)
The local directory structure. Can include keywords to be replaced by
dictionary keys in remote_dict. If None, the rcParam value
DAVIT_LOCAL_DIRFORMAT will be used. (default=None)
Ex) local_dirfmt='/{year}/{month}'
local_fnamefmt : (str/list/NoneType)
The local file naming format. Can include keywords to be replaced by
dictionary keys in remote_dict. If None, the rcParam value
DAVIT_LOCAL_FNAMEFMT will be used. (default=None)
Ex) local_fnamefmt=['{date}.{radar}.{ftype}',
'{date}.{channel}.{radar}.{ftype}']
tmpdir : (str/NoneType)
The directory in which to store temporary files. If None, the rcParam
value DAVIT_TMPDIR will be used. (default=None)
remove : (bool)
Remove compressed file after uncompression (default=False)
try_file_types : (bool)
If desired file type could not be found, try to download others
(default=True)
Returns
--------
myPtr : (pydarn.sdio.radDataTypes.radDataPtr)
A radDataPtr object which contains a link to the data to be read.
This can then be passed to radDataReadRec in order to actually read the
data.
Notes
-------
The evironment variables are python dictionary capable formatted strings
appended encode radar name, channel, and/or date information. Currently
supported dictionary keys which can be used are:
"date" : datetime.datetime.strftime("%Y%m%d")
"year" : 0 padded 4 digit year
"month" : 0 padded 2 digit month
"day" : 0 padded 2 digit day
"hour" : 0 padded 2 digit day
"ftype" : filetype string
"radar" : 3-chr radarcode
"channel" : single character string, ex) 'a'
Example
----------
::
import datetime as dt
myPtr = pydarn.sdio.radDataOpen(dt.datetime(2011,1,1),'bks', \
eTime=dt.datetime(2011,1,1,2),channel=None, bmnum=7,cp=153, \
fileType='fitex',filtered=False, src=None)
Written by AJ 20130110
"""
from davitpy.pydarn.sdio import radDataPtr
from davitpy.pydarn.radar import network
myPtr = radDataPtr(sTime=sTime, radcode=radcode, eTime=eTime,
channel=channel, bmnum=bmnum, cp=cp, fileType=fileType,
filtered=filtered, src=src, fileName=fileName,
noCache=noCache, local_dirfmt=local_dirfmt,
local_fnamefmt=local_fnamefmt, local_dict=local_dict,
remote_dirfmt=remote_dirfmt, remote_dict=remote_dict,
remote_fnamefmt=remote_fnamefmt, remote_site=remote_site,
username=username, port=port, password=password,
stid=int(network().getRadarByCode(radcode).id),
tmpdir=tmpdir, remove=remove,
try_file_types=try_file_types)
return myPtr
def radDataReadRec(my_ptr):
"""A function to read a single record of radar data from a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object
Parameters
------------
my_ptr : (pydarn.sdio.radDataTypes.radDataPtr)
Contains the pipeline to the data we are after.
Returns
---------
my_beam : (pydarn.sdio.radDataTypes.beamData/NoneType)
An object filled with the data we are after. Will return None when
finished reading.
Example
---------
::
import datetime as dt
my_ptr = radDataOpen(dt.datetime(2011,1,1),'bks', \
eTime=dt.datetime(2011,1,1,2),channel=None,bmnum=7,cp=153,
fileType='fitex',filtered=False,src=None)
my_beam = radDataReadRec(my_ptr)
Notes
------
To use this, you must first create a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object with
:func:`radDataOpen`
Written by AJ 20130110
"""
from davitpy.pydarn.sdio import radDataPtr
assert isinstance(my_ptr, radDataPtr), \
logging.error('input must be of type radDataPtr')
return my_ptr.readRec()
def radDataReadScan(my_ptr):
"""A function to read a full scan of data from a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object
Parameters
-----------
my_ptr : (pydarn.sdio.radDataTypes.radDataPtr)
Contains the pipeline to the data we are after
Returns
--------
my_scan : (pydarn.sdio.radDataTypes.scanData)
A class created to define a list of pydarn.sdio.radDataTypes.beamData
objects, filled with a scan (pattern of beams) of data from the
specified pipeline. The pointer will return None when finished reading.
Example
--------
::
import datetime as dt
my_ptr = radDataOpen(dt.datetime(2011,1,1),'bks', \
eTime=dt.datetime(2011,1,1,2),channel=None, bmnum=7,cp=153, \
fileType='fitex',filtered=False, src=None):
my_scan = radDataReadScan(my_ptr)
Notes
-------
To use this, you must first create a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object with :func:`radDataOpen`
This will ignore any beam number (bmnum) request. Also, if no channel was
specified in radDataOpen, it will only read channel 'a'
Written by AJ 20130110
"""
from davitpy.pydarn.sdio import radDataPtr
# check input
assert isinstance(my_ptr, radDataPtr), \
logging.error('input must be of type radDataPtr')
return my_ptr.readScan()
def radDataCreateIndex(my_ptr):
"""A function to index radar data into dict from a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object
Parameters
-----------
my_ptr : (pydarn.sdio.radDataTypes.radDataPtr)
Contains the pipeline to the data we are after
Returns
--------
my_index : (dict)
A dictionary with keys recording the time of each bean in the specified
pointer and the value corresponding to the location for that record
in the data file (byte offsets in the file).
Example
---------
::
import datetime as dt
my_ptr = radDataOpen(dt.datetime(2011,1,1),'bks', \
eTime=dt.datetime(2011,1,1,2),channel=None, bmnum=7,cp=153, \
fileType='fitex',filtered=False, src=None)
my_index = radDataCreateIndex(my_ptr)
Notes
------
To use this, you must first create a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object with :func:`radDataOpen`
Written by JDS 20140606
"""
from davitpy.pydarn.sdio.radDataTypes import radDataPtr
assert isinstance(my_ptr, radDataPtr), \
logging.error('input must be of type radDataPtr')
return my_ptr.createIndex()
def radDataReadAll(my_ptr):
"""A function to read a large amount (to the end of the request) of radar
data into a list from a :class:`pydarn.sdio.radDataTypes.radDataPtr` object
Parameters
-----------
my_ptr : (pydarn.sdio.radDataTypes.radDataPtr)
Contains the pipeline to the data we are after
Returns
----------
my_list : (list)
A list filled with pydarn.sdio.radDataTypes.scanData objects holding
the data we are after. The list will contain None if nothing is found.
Example
-----------
::
import datetime as dt
my_ptr = radDataOpen(dt.datetime(2011,1,1),'bks', \
eTime=dt.datetime(2011,1,1,2),channel=None, bmnum=7,cp=153, \
fileType='fitex',filtered=False, src=None)
my_list = radDataReadAll(my_ptr)
Notes
------
To use this, you must first create a
:class:`pydarn.sdio.radDataTypes.radDataPtr` object with :func:`radDataOpen`
Written by AJ 20130606
"""
from davitpy.pydarn.sdio import radDataPtr
# check input
assert isinstance(my_ptr, radDataPtr), \
logging.error('input must be of type radDataPtr')
my_list = [beam for beam in my_ptr]
return my_list
| gpl-3.0 | 7,147,466,849,340,686,000 | 37.262391 | 80 | 0.639973 | false |
mnr/rubberfish | unneeded_stuff/countSyllables.py | 1 | 1467 | def count_syllables(word):
# thanks to https://github.com/akkana
verbose = False #print debugging?
vowels = ['a', 'e', 'i', 'o', 'u']
on_vowel = False
in_diphthong = False
minsyl = 0
maxsyl = 0
lastchar = None
word = word.lower()
for c in word:
is_vowel = c in vowels
if on_vowel == None:
on_vowel = is_vowel
# y is a special case
if c == 'y':
is_vowel = not on_vowel
if is_vowel:
if verbose: print (c, "is a vowel")
if not on_vowel:
# We weren't on a vowel before.
# Seeing a new vowel bumps the syllable count.
if verbose: print ("new syllable")
minsyl += 1
maxsyl += 1
elif on_vowel and not in_diphthong and c != lastchar:
# We were already in a vowel.
# Don't increment anything except the max count,
# and only do that once per diphthong.
if verbose: print (c, "is a diphthong")
in_diphthong = True
maxsyl += 1
elif verbose: print ("[consonant]")
on_vowel = is_vowel
lastchar = c
# Some special cases:
if word[-1] == 'e':
minsyl -= 1
# if it ended with a consonant followed by y, count that as a syllable.
if word[-1] == 'y' and not on_vowel:
maxsyl += 1
return minsyl, maxsyl
| mit | 3,068,843,860,149,435,000 | 27.764706 | 75 | 0.503749 | false |
open-craft/opencraft | instance/tests/models/test_log_entry.py | 1 | 13897 | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2019 OpenCraft <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Logger models & mixins - Tests
"""
# Imports #####################################################################
from unittest.mock import patch
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.test import override_settings
from freezegun import freeze_time
from instance.models.log_entry import LogEntry
from instance.tests.base import TestCase
from instance.tests.models.factories.openedx_appserver import make_test_appserver
from instance.tests.models.factories.openedx_instance import OpenEdXInstanceFactory
from instance.tests.models.factories.server import OpenStackServerFactory
# Tests #######################################################################
@patch(
'instance.tests.models.factories.openedx_instance.OpenEdXInstance._write_metadata_to_consul',
return_value=(1, True)
)
class LoggingTestCase(TestCase):
"""
Test cases for logging
"""
def setUp(self):
"""
Set up an instance and server to use for testing.
"""
super().setUp()
with patch(
'instance.tests.models.factories.openedx_instance.OpenEdXInstance._write_metadata_to_consul',
return_value=(1, True)
):
self.instance = OpenEdXInstanceFactory(sub_domain='my.instance', name="Test Instance 1")
self.app_server = make_test_appserver(instance=self.instance)
self.server = self.app_server.server
# Override the VM names for consistency:
patcher = patch('instance.models.server.OpenStackServer.name', new='test-vm-name')
self.addCleanup(patcher.stop)
patcher.start()
# Expected log line prefixes based on the above:
self.instance_prefix = 'instance.models.instance | instance={} (Test Instance 1) | '.format(
self.instance.ref.pk
)
self.appserver_prefix = (
'instance.models.appserver | instance={} (Test Instance 1),app_server={} (AppServer 1) | '.format(
self.instance.ref.pk, self.app_server.pk
)
)
self.server_prefix = 'instance.models.server | server={} (test-vm-name) | '.format(self.app_server.server.pk)
def check_log_entries(self, entries, expected):
"""
Check that the given entries match the expected log output.
"""
for entry, (date, level, text) in zip(entries, expected):
self.assertEqual(entry.created.strftime("%Y-%m-%d %H:%M:%S"), date)
self.assertEqual(entry.level, level)
self.assertEqual(entry.text, text)
def test_default_log_level(self, mock_consul):
"""
Check that the default log level is INFO
"""
log_entry = LogEntry(text='OHAI')
self.assertEqual(log_entry.level, 'INFO')
def test_log_entries(self, mock_consul):
"""
Check `log_entries` output for combination of instance & server logs
"""
lines = [
("2015-08-05 18:07:00", self.instance.logger.info, 'Line #1, on instance'),
("2015-08-05 18:07:01", self.server.logger.info, 'Line #2, on server'),
("2015-08-05 18:07:02", self.instance.logger.debug,
'Line #3, on instance (debug, not published by default)'),
("2015-08-05 18:07:03", self.instance.logger.info, 'Line #4, on instance'),
("2015-08-05 18:07:04", self.instance.logger.warning, 'Line #5, on instance (warn)'),
("2015-08-05 18:07:05", self.server.logger.info, 'Line #6, on server'),
("2015-08-05 18:07:06", self.server.logger.critical, 'Line #7, exception'),
]
for date, log, text in lines:
with freeze_time(date):
log(text)
expected = [
("2015-08-05 18:07:00", 'INFO', self.instance_prefix + 'Line #1, on instance'),
("2015-08-05 18:07:03", 'INFO', self.instance_prefix + 'Line #4, on instance'),
("2015-08-05 18:07:04", 'WARNING', self.instance_prefix + 'Line #5, on instance (warn)'),
]
self.check_log_entries(self.instance.log_entries, expected)
expected = [
("2015-08-05 18:07:01", 'INFO', self.server_prefix + 'Line #2, on server'),
("2015-08-05 18:07:05", 'INFO', self.server_prefix + 'Line #6, on server'),
("2015-08-05 18:07:06", 'CRITICAL', self.server_prefix + 'Line #7, exception'),
]
self.check_log_entries(self.app_server.log_entries, expected)
# Check that the `LOG_LIMIT` setting is respected
with override_settings(LOG_LIMIT=2):
self.check_log_entries(self.app_server.log_entries, expected[-2:])
@patch('instance.logging.publish_data')
def test_log_publish(self, mock_publish_data, mock_consul):
"""
Logger sends an event to the client on each new entry added.
"""
with freeze_time('2019-10-07 00:41:00'):
self.instance.logger.info('Text the client should see')
mock_publish_data.assert_called_with({
'log_entry': {
'created': '2019-10-07T00:41:00Z',
'level': 'INFO',
'text': (
'instance.models.instance | instance={} (Test Instance 1) | Text the client should see'.format(
self.instance.ref.pk
)
),
},
'type': 'object_log_line',
'instance_id': self.instance.ref.pk,
'instance_type': 'OpenEdXInstance',
})
with freeze_time('2019-10-07 00:41:00'):
self.instance.logger.info('Text the client should see, with unicode «ταБЬℓσ»')
mock_publish_data.assert_called_with({
'log_entry': {
'created': '2019-10-07T00:41:00Z',
'level': 'INFO',
'text': (
'instance.models.instance | instance={} (Test Instance 1) | '
'Text the client should see, with unicode «ταБЬℓσ»'
).format(
self.instance.ref.pk
)
},
'type': 'object_log_line',
'instance_id': self.instance.ref.pk,
'instance_type': 'OpenEdXInstance',
})
def test_log_delete(self, mock_consul):
"""
Check `log_entries` output for combination of instance & server logs
"""
# Clear out existing log entries to make sure we're starting with a clean slate:
for log_entry in LogEntry.objects.all():
log_entry.delete()
server1 = self.server
server2 = OpenStackServerFactory(openstack_id='vm2_id')
self.instance.logger.info('Line #1, on instance')
server1.logger.info('Line #2, on server 1')
server2.logger.info('Line #3, on server 2')
self.assertEqual(LogEntry.objects.count(), 3)
# Delete server 1:
server1_id = server1.pk
server1.delete()
# Now its log entry should be deleted:
entries = LogEntry.objects.order_by('pk').all().values_list('text', flat=True)
for entry_text in entries:
self.assertNotIn('Line #2', entry_text)
self.assertIn('Line #1, on instance', entries[0])
self.assertIn('Line #3, on server 2', entries[1])
self.assertIn(
'Deleted 1 log entries for deleted OpenStack VM instance with ID {}'.format(server1_id),
entries[2]
)
def test_log_num_queries(self, mock_consul):
"""
Check that logging to the LogEntry table doesn't do more queries than necessary.
The expected queries upon inserting a log entry are:
1. SELECT (1) AS "a" FROM "django_content_type" WHERE "django_content_type"."id" = {content_type_id} LIMIT 1
2. SELECT (1) AS "a" FROM "instance_openstackserver" WHERE "instance_openstackserver"."id" = {object_id} LIMIT 1
3. INSERT INTO "instance_logentry" (...)
The first two are used to validate the foreign keys. 1. is added by django, and 2. is
added by us since the object_id foreign key constraint is not enforced by the database.
"""
with self.assertNumQueries(3):
self.server.logger.info('some log message')
def test_log_delete_num_queries(self, mock_consul):
"""
Check that the LogEntry.on_post_delete handler doesn't do more queries than necessary.
"""
server = OpenStackServerFactory() # Can't use self.server since deletion of it cascades to self.app_server
with self.assertNumQueries(3):
# We expect one query to check for a related appserver, one to delete the server, one to delete the LogEntry
server.delete()
log_entry = LogEntry.objects.create(text='blah')
with self.assertNumQueries(1):
log_entry.delete()
def test_str_repr(self, mock_consul):
"""
Test the string representation of a LogEntry object
"""
msg = 'We have entered a spectacular binary star system in the Kavis Alpha sector'
with freeze_time("2015-10-20 20:10:15"):
self.server.logger.info(msg)
log_entry = LogEntry.objects.order_by('-pk')[0]
self.assertEqual(
str(log_entry),
'2015-10-20 20:10:15 | INFO | instance.models.server | server={} ({}) | {}'.format(
self.server.pk, self.server.name, msg)
)
def test_invalid_content_type_object_id_combo(self, mock_consul):
"""
Test that content_type and object_id cannot be set on their own
"""
text = 'We are en route to Mintaka III.'
def check_exception(exc):
""" Check that the given exception contains the expected message """
self.assertEqual(
exc.messages,
['LogEntry content_type and object_id must both be set or both be None.'],
)
with self.assertRaises(ValidationError) as context:
LogEntry.objects.create(text=text, content_type_id=None, object_id=self.server.pk)
check_exception(context.exception)
content_type = ContentType.objects.get_for_model(self.server)
with self.assertRaises(ValidationError) as context:
LogEntry.objects.create(text=text, content_type_id=content_type.pk, object_id=None)
check_exception(context.exception)
def test_invalid_object_id(self, mock_consul):
"""
Test that object_id validity is enforced at the application level.
"""
content_type = ContentType.objects.get_for_model(self.server)
with self.assertRaises(ValidationError) as context:
LogEntry.objects.create(
text='We are departing the Rana system for Starbase 133.',
content_type=content_type,
object_id=987654321, # An invalid ID
)
self.assertEqual(
context.exception.messages,
['Object attached to LogEntry has bad content_type or primary key'],
)
def test_log_error_entries(self, mock_consul):
"""
Check `log_error_entries` output for combination of AppServer & server logs
"""
with freeze_time("2015-08-05 18:07:00"):
self.app_server.logger.info('Line #1, on app_server')
with freeze_time("2015-08-05 18:07:01"):
self.app_server.logger.error('Line #2, on app_server')
with freeze_time("2015-08-05 18:07:02"):
self.app_server.logger.debug('Line #3, on app_server (debug, not published by default)')
with freeze_time("2015-08-05 18:07:03"):
self.server.logger.critical('Line #4, on server')
with freeze_time("2015-08-05 18:07:04"):
self.app_server.logger.warning('Line #5, on app_server (warning)')
with freeze_time("2015-08-05 18:07:05"):
self.server.logger.info('Line #6, on server')
with freeze_time("2015-08-05 18:07:06"):
self.app_server.logger.critical('Line #7, exception')
self.instance.logger.critical("Instance-level errors should be excluded.")
entries = self.app_server.log_error_entries
self.assertEqual(len(entries), 3)
self.assertEqual(entries[0].level, "ERROR")
self.assertEqual(entries[0].created.strftime("%Y-%m-%d %H:%M:%S"), "2015-08-05 18:07:01")
self.assertEqual(entries[0].text, self.appserver_prefix + "Line #2, on app_server")
self.assertEqual(entries[1].level, "CRITICAL")
self.assertEqual(entries[1].created.strftime("%Y-%m-%d %H:%M:%S"), "2015-08-05 18:07:03")
self.assertEqual(entries[1].text, self.server_prefix + "Line #4, on server")
self.assertEqual(entries[2].level, "CRITICAL")
self.assertEqual(entries[2].created.strftime("%Y-%m-%d %H:%M:%S"), "2015-08-05 18:07:06")
self.assertEqual(entries[2].text, self.appserver_prefix + "Line #7, exception")
| agpl-3.0 | -1,055,283,871,556,104,400 | 42.102484 | 120 | 0.604006 | false |
mcoughli/root_of_trust | operational_os/plot_data.py | 1 | 4710 | #!/usr/bin/python
import pylab
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import numpy
import csv
import os
import sys
# TIMESTAMPS = ["2017-03-30T06:35:54.726002", "2017-03-30T09:45:20.551204"]
TIMESTAMPS = ["2017-04-02T03:35:32.431002"]
# TIMESTAMP_LABELS = {"2017-03-30T06:35:54.726002":"SDCARD",
# "2017-03-30T09:45:20.551204":"TMPFS"}
TIMESTAMP_LABELS = {"2017-04-02T03:35:32.431002":"TMPFS"}
SUB_EXPERIMENTS = ["python", "fpga"]
EXPERIMENT_LABELS = {"python":"Python FS",
"fpga":"FPGA FS"}
EXPERIMENT_MARKER = {"python":"o-",
"fpga":"D-"}
# TRIALS = range(1000, 30000, 1000)
TRIALS = [(1024*2**x) for x in range(1,13)]
def get_sub_experiment_data(sub_experiment, timestamp):
data = {}
averages = []
times_averages = []
stdevs = []
data_bytes = []
throughputs_averages = []
for trial in TRIALS:
filename = "data/{1}/{0}/{0}_{1}_{2}.csv".format(sub_experiment, timestamp, trial)
if not os.path.exists(filename):
print "Skipping file {}".format(filename)
continue
data[trial] = {}
with open(filename) as data_file:
reader = csv.DictReader(data_file)
data[trial]['data'] = list(reader)
throughputs = numpy.array([])
times = numpy.array([])
data_sum = 0
data_total = 0
for item in data[trial]['data']:
num_bytes = int(item["Bytes"])#/1024.0
num_time = float(item["Time"])
print "Num bytes: {}".format(num_bytes)
throughputs = numpy.append(throughputs, (num_bytes/num_time)/1024.0)
# print "{},{}".format(int(item["Bytes"]), float(item["Time"]))
data_sum = data_sum + num_bytes
data_total = data_total + 1
times = numpy.append(times, num_time)
data_average = data_sum/data_total
if data_average not in TRIALS:
print "Data average {} not in trials".format(data_average)
continue
data[trial]['average'] = numpy.average(throughputs)
data[trial]['std'] = numpy.std(throughputs)#numpy.std(times_averages)
data[trial]['time_average'] = numpy.average(times)
averages.append(data[trial]['average'])
stdevs.append(data[trial]['std'])
times_averages.append(data[trial]['time_average'])
throughput_average = numpy.average(throughputs)
throughputs_averages.append(throughput_average)
data_bytes.append(data_average)
data[trial]['throughputs'] = throughputs
print "Throughputs average for {} {},{}: {}".format(TIMESTAMP_LABELS[timestamp], sub_experiment, trial, throughput_average)
# print "Throughputs mean for {} {},{}: {}".format(TIMESTAMP_LABELS[timestamp], sub_experiment, trial, numpy.mean(throughputs))
# print "Throughputs stdev for {} {},{}: {}".format(TIMESTAMP_LABELS[timestamp], sub_experiment, trial, numpy.std(throughputs))
# pylab.figure()
# pylab.plot(TRIALS, averages)
# pylab.show()
# print "\n\n\n\n\n"
return times_averages, stdevs, data_bytes, throughputs_averages
def plot_experiment():
for timestamp in TIMESTAMPS:
throughputs = {}
# all_averages = []
# lengend = []
# if TIMESTAMP_LABELS[timestamp] != "TMPFS":
# continue
for sub_experiment in SUB_EXPERIMENTS:
# legend.append(sub_experiment)
time_averages, std, data, throughputs_averages = get_sub_experiment_data(sub_experiment, timestamp)
pylab.errorbar(data, throughputs_averages, yerr=std, fmt=EXPERIMENT_MARKER[sub_experiment], label=EXPERIMENT_LABELS[sub_experiment])
throughputs[sub_experiment] = list(throughputs_averages)
# pylab.bar(data, throughputs_averages, 1000, yerr=std)
overheads = []
for i in range(len(throughputs["python"])):
overheads.append(float(throughputs["fpga"][i])/float(throughputs["python"][i]))
overhead_sum = 0
for overhead in overheads:
overhead_sum = overhead_sum + overhead
overhead_average = overhead_sum/len(overheads)
print "Overhead average: {}".format((1-overhead_average)*100.0)
pylab.xscale("log", nonposx='clip')
pylab.xlabel("Data Processed (bytes)")
# pylab.ylabel("Time (s)")
pylab.ylabel("Throughput (KB/s)")
pylab.legend(loc=4)
pylab.savefig("{}.png".format(TIMESTAMP_LABELS[timestamp]))
pylab.savefig("{}.pdf".format(TIMESTAMP_LABELS[timestamp]))
pylab.show()
if __name__ == "__main__":
plot_experiment()
| gpl-3.0 | 8,308,318,929,500,350,000 | 40.681416 | 144 | 0.608068 | false |
adafruit/Adafruit_Python_BluefruitLE | setup.py | 1 | 1037 | from setuptools import setup, find_packages
import platform
platform_install_requires = []
if platform.system() == 'Darwin':
platform_install_requires += ['pyobjc-framework-CoreBluetooth']
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name = 'Adafruit_BluefruitLE',
version = '0.9.10',
author = 'Tony DiCola',
author_email = '[email protected]',
description = 'Python library for interacting with Bluefruit LE (Bluetooth low energy) devices on Linux or OSX.',
long_description = long_description,
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_BluefruitLE/',
install_requires = ['future'] + platform_install_requires,
packages = find_packages())
| mit | -1,287,049,547,496,601,900 | 36.035714 | 125 | 0.642237 | false |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/SGIS/texture4D.py | 1 | 1657 | '''OpenGL extension SGIS.texture4D
This module customises the behaviour of the
OpenGL.raw.GL.SGIS.texture4D to provide a more
Python-friendly API
Overview (from the spec)
This extension defines 4-dimensional texture mapping. If EXT_subtexture
is supported, this extension also defines a mechanism to redefine a
portion of an existing 4-dimensional texture image. Because
EXT_texture3D is required, this extension utilizes the 3-dimensional
image support defined in by EXT_texture3D as a base for 4-dimensional
image manipulation.
The 4th dimension has an abstract, rather than physical, reference
and will be called "extent", since the definition of extent is "that which
specifies the range or magnitude of an area or volume."
Four-dimensional texture mapping is more constrained than its one, two,
and three-dimensional counterparts. Mipmapping is not supported, so
only the level-zero 4-dimensional texture image can be defined. Cubic
filtering is not supported, so the border width must be either zero or
one.
Four-dimensional textures are used primarily as color lookup tables for
color conversion.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIS/texture4D.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.SGIS.texture4D import *
### END AUTOGENERATED SECTION
from OpenGL.GL import images as _i
_i.images.RANK_PACKINGS.setdefault(4,[]).extend([
(glPixelStorei,GL_PACK_SKIP_VOLUMES_SGIS, 0),
(glPixelStorei,GL_PACK_IMAGE_DEPTH_SGIS, 0),
])
| bsd-2-clause | -7,978,031,406,866,542,000 | 37.534884 | 75 | 0.786964 | false |
gtesei/fast-furious | competitions/tgs-salt-identification-challenge/unet_start_4_grad.py | 1 | 13087 | import os
import sys
import random
import matplotlib.image as mpimg
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import cv2
import time
# functions
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
def get_iou(A, B):
intersection = np.logical_and(A, B)
union = np.logical_or(A, B)
iou = np.sum(intersection > 0) / np.sum(union > 0)
return iou
def gradmag(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
return ~gradmag
# Loading of training/testing ids and depths
def load_images(random_state=1234):
train_df = pd.read_csv("data/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("data/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
print(">>> train_df:",train_df.shape)
print(train_df.head())
print(">>> test_df:", test_df.shape)
print(test_df.head())
train_df["images"] = [gradmag(np.array(imread(path_train_images+"{}.png".format(idx)))) for idx in tqdm(train_df.index)]
train_df["masks"] = [np.array(load_img(path_train_masks+"{}.png".format(idx),grayscale=True))/255 for idx in tqdm(train_df.index)]
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
print("*** TRAIN ***")
print(train_df.head())
print("*** TEST ***")
print(test_df.head())
ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split(
train_df.index.values,
np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1),
np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1),
train_df.coverage.values,
train_df.z.values,
test_size=0.2,
stratify=train_df.coverage_class,
random_state=random_state)
#Data augmentation
x_train2 = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
y_train2 = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)
print(x_train2.shape)
print(y_valid.shape)
x_test = np.array([gradmag(np.array(imread(path_test_images+"{}.png".format(idx)))) for idx in tqdm(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1)
return x_train2, x_valid, y_train2, y_valid, x_test, test_df.index.values
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
x = BatchNormalization()(x)
if activation == True:
x = Activation('relu')(x)
return x
def residual_block(blockInput, num_filters=16):
x = Activation('relu')(blockInput)
x = BatchNormalization()(x)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
return x
# Build model
def build_model(input_layer, start_neurons, DropoutRatio = 0.5):
# 101 -> 50
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(input_layer)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(DropoutRatio/2)(pool1)
# 50 -> 25
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(DropoutRatio)(pool2)
# 25 -> 12
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(DropoutRatio)(pool3)
# 12 -> 6
conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = Activation('relu')(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(DropoutRatio)(pool4)
# Middle
convm = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm,start_neurons * 16)
convm = residual_block(convm,start_neurons * 16)
convm = Activation('relu')(convm)
# 6 -> 12
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(DropoutRatio)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = Activation('relu')(uconv4)
# 12 -> 25
#deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(DropoutRatio)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = Activation('relu')(uconv3)
#5 -> 50
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(DropoutRatio)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = Activation('relu')(uconv2) # 50 -> 101
#deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(DropoutRatio)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = Activation('relu')(uconv1)
uconv1 = Dropout(DropoutRatio/2)(uconv1)
output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
return output_layer
# Define IoU metric
def castF(x):
return K.cast(x, K.floatx())
def castB(x):
return K.cast(x, bool)
def iou_loss_core(true,pred):
intersection = true * pred
notTrue = 1 - true
union = true + (notTrue * pred)
return (K.sum(intersection, axis=-1) + K.epsilon()) / (K.sum(union, axis=-1) + K.epsilon())
def competitionMetric2(true, pred): #any shape can go
tresholds = [0.5 + (i*.05) for i in range(10)]
#flattened images (batch, pixels)
true = K.batch_flatten(true)
pred = K.batch_flatten(pred)
pred = castF(K.greater(pred, 0.5))
#total white pixels - (batch,)
trueSum = K.sum(true, axis=-1)
predSum = K.sum(pred, axis=-1)
#has mask or not per image - (batch,)
true1 = castF(K.greater(trueSum, 1))
pred1 = castF(K.greater(predSum, 1))
#to get images that have mask in both true and pred
truePositiveMask = castB(true1 * pred1)
#separating only the possible true positives to check iou
testTrue = tf.boolean_mask(true, truePositiveMask)
testPred = tf.boolean_mask(pred, truePositiveMask)
#getting iou and threshold comparisons
iou = iou_loss_core(testTrue,testPred)
truePositives = [castF(K.greater(iou, tres)) for tres in tresholds]
#mean of thressholds for true positives and total sum
truePositives = K.mean(K.stack(truePositives, axis=-1), axis=-1)
truePositives = K.sum(truePositives)
#to get images that don't have mask in both true and pred
trueNegatives = (1-true1) * (1 - pred1) # = 1 -true1 - pred1 + true1*pred1
trueNegatives = K.sum(trueNegatives)
return (truePositives + trueNegatives) / castF(K.shape(true)[0])
def predict_result(model,x_test,img_size_target): # predict both orginal and reflect x
x_test_reflect = np.array([np.fliplr(x) for x in x_test])
preds_test1 = model.predict(x_test).reshape(-1, img_size_target, img_size_target)
preds_test2_refect = model.predict(x_test_reflect).reshape(-1, img_size_target, img_size_target)
preds_test2 = np.array([ np.fliplr(x) for x in preds_test2_refect] )
preds_avg = (preds_test1 +preds_test2)/2
return preds_avg
def rle_encode(im):
'''
im: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = im.flatten(order = 'F')
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
# Set some parameters
img_size_ori = 101
img_size_target = 101
im_width = 101
im_height = 101
im_chan = 1
basicpath = 'data/'
path_train = basicpath + 'train/'
path_test = basicpath + 'test/'
path_train_images = path_train + 'images/'
path_train_masks = path_train + 'masks/'
path_test_images = path_test + 'images/'
# load images
start_time = time.time()
x_train, x_valid, y_train, y_valid, x_test, x_test_ids = load_images()
# model
input_layer = Input((img_size_target, img_size_target, 1))
output_layer = build_model(input_layer, 16,0.5)
model = Model(input_layer, output_layer)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=[competitionMetric2])
model.summary()
# train
earlystopper = EarlyStopping(patience=20, verbose=1,monitor='val_competitionMetric2',mode='max')
checkpointer = ModelCheckpoint('model-tgs-salt-1.h5', verbose=1, save_best_only=True,monitor='val_competitionMetric2',mode='max')
reduce_lr = ReduceLROnPlateau(factor=0.2, patience=5, min_lr=0.00001, verbose=1,monitor='val_competitionMetric2',mode='max')
results = model.fit(x_train, y_train,
validation_data=[x_valid,y_valid],
batch_size=32, epochs=200,
callbacks=[earlystopper, checkpointer,reduce_lr])
model = load_model('model-tgs-salt-1.h5' , custom_objects={'competitionMetric2': competitionMetric2 , 'iou_loss_core': iou_loss_core , 'castB': castB , 'castF': castF})
# threshold_best
preds_valid = predict_result(model,x_valid,img_size_target)
thresholds = np.linspace(0.3, 0.7, 31)
ious = np.array([get_iou(y_valid.reshape(-1,img_size_target, img_size_target),np.int32(preds_valid.reshape(-1,img_size_target, img_size_target) > threshold)) for threshold in tqdm(thresholds)])
threshold_best_index = np.argmax(ious)
threshold_best = thresholds[threshold_best_index]
print(">>> threshold_best:",threshold_best)
# test
preds_test = predict_result(model,x_test,img_size_target)
pred_dict = {idx: rle_encode(np.round(preds_test[i]) > threshold_best) for i, idx in enumerate(tqdm(x_test_ids))}
sub = pd.DataFrame.from_dict(pred_dict,orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv('submission4.csv')
###
seconds = time.time() - start_time
mins = seconds / 60
hours = mins / 60
days = hours / 24
print("------>>>>>>> elapsed seconds:", seconds)
print("------>>>>>>> elapsed minutes:", mins)
print("------>>>>>>> elapsed hours:", hours)
print("------>>>>>>> elapsed days:", days)
| mit | -6,945,831,053,943,981,000 | 39.76947 | 218 | 0.662184 | false |
silenteddie/Landsat8LST_SWA | modis_water_vapor_interface.py | 1 | 5279 | # coding=utf-8
from PyQt4.QtGui import QApplication
import modis_extent_generator
from qgis.core import *
import l8_lst_swa_common_lib
import processing
import datetime
from urllib2 import urlopen
from ftplib import FTP
import shutil
from PyQt4.QtCore import QUrl
from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest
def getWaterVaporForGivenRaster (inputRaster, year, month, day, outputPath,tle1,tle2,processLabel, tempDir):
"""
Find needed MOD09 file for raster (e.g. Landsat scene) and download everything needed from NASA FTP'
Then cut MOD09 by input raster and fix resolition.
:param inputRaster: raster, for which's extent MOD09 will be searched
:param year: year of aquisition
:param month: month of aquisition
:param day: day of aquisition
:param outputPath: path, where final Water Vapor grid will be saved
:param tle1: TLE line 1
:param tle2: TLE line 2
:param processLabel: qt label from interface to show status of progress
:param tempDir: temporary directory where files will be downloaded
:return: 1 or error code
"""
processLabel.setText('Calculating TERRA track for day')
QApplication.processEvents()
scenesExtent = modis_extent_generator.generateScenesExtentLayerForDay(year,month,day,tle1,tle2,'Terra', True)
processLabel.setText('Searching suitable scene for raster')
QApplication.processEvents()
WGS84 = QgsCoordinateReferenceSystem(4326, QgsCoordinateReferenceSystem.PostgisCrsId)
rasterExtent = l8_lst_swa_common_lib.getRasterLayerExtent(inputRaster, WGS84)
rasterExtentGeom = [[QgsPoint(rasterExtent['xMin'],rasterExtent['yMin']),
QgsPoint(rasterExtent['xMin'],rasterExtent['yMax']),
QgsPoint(rasterExtent['xMax'],rasterExtent['yMax']),
QgsPoint(rasterExtent['xMax'],rasterExtent['yMin'])]]
rasterMaskLayer = QgsVectorLayer("Polygon", 'Raster mask', "memory")
rasterMaskLayerDP = rasterMaskLayer.dataProvider()
rasterMaskLayer.startEditing()
maskFeature = QgsFeature()
maskFeature.setGeometry(QgsGeometry.fromPolygon(rasterExtentGeom))
rasterMaskLayerDP.addFeatures([maskFeature])
rasterMaskLayer.commitChanges()
rasterMaskLayer.updateExtents()
QgsMapLayerRegistry.instance().addMapLayer(rasterMaskLayer)
QgsMapLayerRegistry.instance().addMapLayer(scenesExtent)
try:
processing.runalg('qgis:selectbylocation',scenesExtent,rasterMaskLayer,u'contains',0)
except:
raise
containingScene = scenesExtent.selectedFeatures()[0]
# Suitable scene time
containingSceneTime = str(containingScene[1]).split(':')[0]+str(containingScene[1]).split(':')[1]
processLabel.setText('Downloading MOD03...')
QApplication.processEvents()
MOD03 = downloadMODL2ForGivenDateAndTime(year,month,day,containingSceneTime,'MOD03',tempDir+'\\MOD03A.'+str(year)+str(month)+str(day)+'.'+str(containingSceneTime)+'.hdf')
if MOD03 != 1:
return MOD03
processLabel.setText('Downloading MOD09...')
QApplication.processEvents()
MOD09 = downloadMODL2ForGivenDateAndTime(year,month,day,containingSceneTime,'MOD09',tempDir+'\\MOD09A.'+str(year)+str(month)+str(day)+'.'+str(containingSceneTime)+'.hdf')
if MOD09 != 1:
return MOD09
QgsMapLayerRegistry.instance().removeMapLayer(rasterMaskLayer.id())
QgsMapLayerRegistry.instance().removeMapLayer(scenesExtent.id())
### TO BE CONTINUED
def downloadMODL2ForGivenDateAndTime(year, month, day, time, product, rasterFullPath):
"""
Downloads MODIS L2 product for given date. If success returns 1. Else error code from 2 to 6
:param year: year of aquisition
:param month: month of aquisition
:param day: day of aquisition
:param time: time if format hhmm ( 0845 )
:param product: Product code. MOD09, MOD03 etc.
:param rasterLayerFullPath: path, where to download
:return: 1 or error code
"""
currentDate = datetime.date(year,month,day)
currentDayOfYear = currentDate.timetuple().tm_yday
currentDayOfYear = '0'*(3-len(str(currentDayOfYear))) + str(currentDayOfYear)
try:
ftp = FTP('ladsweb.nascom.nasa.gov') # MODIS NASA FTP
ftp.login()
except:
return 2 # Bad connection
try:
ftp.cwd('allData/6/'+product+'/')
ftp.cwd(str(year))
ftp.cwd(str(currentDayOfYear))
except:
return 3 # Date is unavailable
pathString = 'ftp://ladsweb.nascom.nasa.gov/allData/6/' + product + '/' + str(year) + '/' +\
str(currentDayOfYear) + '/'
try:
files = ftp.nlst()
except:
return 4 # File list is not available
timestamp = str(year) + str(currentDayOfYear) + '.' + str(time)
fileFlag = False
for file in files:
if (file[-3:] == 'hdf') and (file.find(timestamp) != -1):
fileFlag = True
pathString += file
try:
req = urlopen(pathString)
dist = open(rasterFullPath, 'wb')
shutil.copyfileobj(req, dist)
dist.close()
except:
return 5 # Cannot download file
if not fileFlag:
return 6 # No needed file
return 1
| gpl-2.0 | 4,448,369,078,451,487,000 | 36.707143 | 174 | 0.684599 | false |
w0rm/todo | code.py | 1 | 1195 | #!/usr/bin/env python
'''
This is the main executable file
that runs application in development server or wsgi mode
'''
import os
import sys
import web
from config import config
from urls import urls
from template import render
# Add current directory to path
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
# App initialization
app = web.application(urls, globals())
# Database initialization
db = web.database(**config.database)
# Session initialization
if web.config.get('_session') is None:
# this is required to work with reloader
web.config._session = web.session.Session(
app,
web.session.DBStore(db, 'sessions'),
)
# Save session and db in web.ctx
def ctx_hook():
web.ctx.session = web.config._session
web.ctx.db = db
app.add_processor(web.loadhook(ctx_hook))
app.notfound = lambda: web.notfound(render.error(404, 'Not Found'))
# Custom error pages in production
if config.environment == 'production':
app.internalerror = lambda: web.internalerror(
render.error(500, 'Internal Server Error'))
if __name__ == '__main__':
if config.environment != 'test':
app.run()
else:
application = app.wsgifunc()
| mit | 8,926,043,363,984,605,000 | 21.980769 | 67 | 0.697908 | false |
campadrenalin/python-libdeje | deje/tests/test_dexter_commands_basic.py | 1 | 3043 | '''
This file is part of python-libdeje.
python-libdeje is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
python-libdeje is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with python-libdeje. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import absolute_import
from deje.tests.dexter_commands import DexterCommandTester
class TestDexterBasicGroup(DexterCommandTester):
def test_help(self):
with self.io:
self.interface.do_command('help')
self.assertEqual(self.interface.view.contents, [
'msglog> help',
'Dexter is a low-level DEJE client.',
'It\'s perfect for low-level management of documents.',
'Type "commands" to see the list of available commands.',
'Type "help somecommand" to see more about a command.',
])
def test_help_with_args(self):
with self.io:
self.interface.do_command('help help commands blooby')
self.assertEqual(self.interface.view.contents, [
'msglog> help help commands blooby',
'help :: A simple little help message.',
'',
'You can also view full descriptions with "help commandname".',
'commands :: List all available commands.',
'blooby :: No such command.',
])
def test_commands(self):
with self.io:
self.interface.do_command('commands')
self.assertEqual(self.interface.view.contents, [
'msglog> commands',
'commands :: List all available commands.',
'demo :: No description available.',
'devent :: Propose a change to the document.',
'dexport :: Serialize the current document to disk.',
'dget_latest :: Get the latest version number of the doc.',
'dinit :: Initialize DEJE interactivity.',
'dvexport :: Serialize the current document to variable storage.',
'fread :: Read contents of a file as a series of commands.',
'fwrite :: Write contents of a view to a file.',
'help :: A simple little help message.',
'quit :: Exit the program.',
'vclone :: Copy variable data from one location to another.',
'vdel :: Delete a value from variable storage.',
'vget :: Print a value in variable storage.',
'view :: List views, or select one.',
'vload :: Load a variable value from disk.',
'vsave :: Save a variable value to disk.',
'vset :: Set a value in variable storage.',
])
| gpl-3.0 | 4,530,050,492,031,537,000 | 42.471429 | 78 | 0.628327 | false |
dwagon/pymoo | moo/moo/settings.py | 1 | 2358 | """
Django settings for moo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u@g=^v7)hrcffe-1p82f!q38v#1!w6(b!6p1=61m-$osx2w%!h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'moo/templates',),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'building',
'game',
'planet',
'race',
'ship',
'system',
'tech',
'player'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'moo.urls'
WSGI_APPLICATION = 'moo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| gpl-2.0 | -6,647,317,063,238,338,000 | 22.58 | 71 | 0.704411 | false |
pymager/pymager | tests/pymagertests/resources/test_nestedpathgenerator.py | 1 | 1748 | """
Copyright 2010 Sami Dalouche
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import unittest
from pymager.resources.impl.nestedpathgenerator import NestedPathGenerator
from tests.pymagertests.resources.fake_image_format_mapper import FakeImageFormatMapper
from tests.pymagertests import objectmothers
from pymager import domain
class NestedPathGeneratorTestCase(unittest.TestCase):
def setUp(self):
self._path_generator = NestedPathGenerator(FakeImageFormatMapper(), os.path.abspath("/basedir"))
def test_should_return_original_image_path(self):
self.assertEquals(
os.path.abspath("/basedir/pictures/66/b1/fb/ce/ca/25/b9/95/78/0a/7a/6f/8f/ea/79/b6/97/ce/cc/66b1fbceca25b995780a7a6f8fea79b697ceccb0.jpg"),
self._path_generator.original_path(objectmothers.original_yemmagouraya_metadata()).absolute())
def test_should_return_derived_image_path(self):
self.assertEquals(
os.path.abspath("/basedir/cache/d8/ae/48/bd/0c/62/ea/68/2c/df/e5/26/ce/df/68/6a/48/04/5a/d8ae48bd0c62ea682cdfe526cedf686a48045a7d-100x100.jpg"),
self._path_generator.derived_path(objectmothers.derived_100x100_yemmagouraya_metadata()).absolute())
| apache-2.0 | 382,058,540,958,470,300 | 46.243243 | 156 | 0.747712 | false |
D-Vaillant/julius | setup.py | 1 | 5140 | """ setup.py:
Real simple utility.
No "install to command line" option, because nobody has requested one.
"""
from typing import Union
from io import TextIOBase
import string
import argparse
import julius
# Some utilities.
num_to_word = {'1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five',
'6': 'six', '7': 'seven', '8': 'eight', '9': 'nine', '0': 'zero'}
class MissingKeyError(Exception):
pass
# Compatibility, I guess?
try:
FileNotFoundError()
except NameError:
FileNotFoundError = IOError
def file_wrapper(file_loc: str) -> str:
"""
Tries to treat a string as a file location.
If successfully, return the contents of the file.
Otherwise, return the input string.
"""
try:
out = open(file_loc, 'r').read()
except FileNotFoundError:
out = file_loc
return out
def safety_wrapper(key: str, safety_level = 0) -> str:
"""
Strips a string of non-alphabetical characters, depending on the safety.
PUNCTUATION, CAPITALIZATION
"""
if safety_level > 2:
safety_level -= 2
return key
# The argparse definition.
parser = argparse.ArgumentParser(prog='julius',
description="Implements a Vigenere cipher.\n"
"Sends the text to sys.out.")
# The text to be ciphered through.
parser.add_argument('plain_text',
nargs=1,
help="It really should be a file location, but you can "
"input strings as well.")
# The key that we're ciphering with!
parser.add_argument('key',
nargs='?',
default=None,
help="If omitted, looks to optional KEY arguments.\n"
"Please only use lowercase letters! "
"Can also open files.")
# Key arguments.
parser.add_argument('--key_length',
nargs='?',
default=0,
type=int,
help="If key is omitted, generate a random key of given "
"length and use that. This is a KEY argument.")
parser.add_argument('--caesar',
action='store_true',
help="If key is omitted, generate a random key of length 1 "
"and use that. This is a KEY argument.")
parser.add_argument('--otp',
action='store_true',
help="If key is omitted, generate a random key of length "
"equal to the length of the plain_text and save it to "
"the given file location.\nStores a file containing "
"key. This is a KEY argument.")
# Use-case arguments.
parser.add_argument('--decrypt',
action='store_true',
help="Key cannot be omitted. Decrypts a text encrypted "
"with the given key.")
'''
parser.add_argument(
'--unsafe',
nargs='?',
type=int,
default=0,
help="Allows for the preservation of non-alphanumeric characters.\n"
"Controls punctuation, capitalization, and spaces.\r\n"
"It's a binary notation: SPACES - CAPITALIZATION - PUNCTUATION.\r\n"
"001 -> 1 => strip spaces and capitalization, keep punctuation\r\n"
"111 -> 1 + 2 + 4 = 7 => keep all\r\n"
"101 -> 4 + 0 + 1 = 5 => strip capitalization"
)
'''
if __name__ == "__main__":
args = parser.parse_args()
# Some plain_text text mungling.
plain_text = file_wrapper(args.plain_text[0])
# Turn numerals into words.
for k, v in num_to_word.items():
plain_text = plain_text.replace(k, v)
# Forcefully remove all non-alphabetical characters, make 'em lowercase.
# TODO: Remove this in lieu of safety_wrapper.
plain_text = ''.join(char for char in plain_text
if char in string.ascii_letters).lower()
# This is the part that deals with keys.
if args.key is not None:
# strip the key of punctuation and capitalization
key = safety_wrapper(file_wrapper(args.key), 0)
else:
# Decryption requires a key to decrypt with, of course.
if args.decrypt:
raise MissingKeyError("Decryption requires a key!")
# One-time pad.
if args.otp:
key = julius.create_random_key(length=len(plain_text))
# Save the key to a keyfile of random name.
with open("key_{}.txt".format(julius.create_random_key(5)), 'w')\
as key_file:
key_file.write(key)
print("Saved key to {}.".format(key_file.name))
elif args.key_length > 0:
key = julius.create_random_key(length=args.key_length)
elif args.caesar:
key = julius.create_random_key(length=1)
else:
raise MissingKeyError("Either specify a key textfile location, a "
"key, or use one of the KEY flags.")
print(julius.vigenere(plain_text, key, decrypting=args.decrypt))
| gpl-3.0 | -5,746,411,298,161,874,000 | 31.948718 | 80 | 0.560506 | false |
pmeier82/BOTMpy | botmpy/common/spike_alignment.py | 1 | 7458 | # -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# Copyright (c) 2012 Berlin Institute of Technology
# All rights reserved.
#
# Developed by: Philipp Meier <[email protected]>
# Neural Information Processing Group (NI)
# School for Electrical Engineering and Computer Science
# Berlin Institute of Technology
# MAR 5-6, Marchstr. 23, 10587 Berlin, Germany
# http://www.ni.tu-berlin.de/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# * Neither the names of Neural Information Processing Group (NI), Berlin
# Institute of Technology, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
#_____________________________________________________________________________
#
# Acknowledgements:
# Philipp Meier <[email protected]>
#_____________________________________________________________________________
#
"""spikes alignment functions"""
__docformat__ = 'restructuredtext'
__all__ = ['sinc_interp1d', 'get_tau_for_alignment', 'get_tau_align_min',
'get_tau_align_max', 'get_tau_align_energy', 'get_aligned_spikes']
##--- IMPORTS
import scipy as sp
from scipy.signal import resample
from .util import INDEX_DTYPE
from .funcs_spike import epochs_from_spiketrain, get_cut, extract_spikes
##---FUNCTIONS
def sinc_interp1d(x, s, r):
"""Interpolates `x`, sampled at times `s`
Output `y` is sampled at times `r`
inspired from from Matlab:
http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html
:param ndarray x: input data time series
:param ndarray s: input sampling time series (regular sample interval)
:param ndarray r: output sampling time series
:return ndarray: output data time series (regular sample interval)
"""
# init
s = sp.asarray(s)
r = sp.asarray(r)
x = sp.asarray(x)
if x.ndim == 1:
x = sp.atleast_2d(x)
else:
if x.shape[0] == len(s):
x = x.T
else:
if x.shape[1] != s.shape[0]:
raise ValueError('x and s must be same temporal extend')
if sp.allclose(s, r):
return x.T
T = s[1] - s[0]
# resample
sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))
return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
def get_tau_for_alignment(spikes, align_at):
"""return the per spike offset in samples (taus) of the maximum values to
the desired alignment sample within the spike waveform.
:type spikes: ndarray
:param spikes: stacked mc spike waveforms [ns, tf, nc]
:type align_at: int
:param align_at: sample to align the maximum at
:returns: ndarray - offset per spike
"""
# checks
ns, tf, nc = spikes.shape
if 0 < align_at >= tf:
return sp.zeros(ns)
# offsets
dchan = [spike.max(0).argmax() for spike in spikes]
tau = [spikes[i, :, dchan[i]].argmax() - align_at for i in xrange(ns)]
return sp.asarray(tau, dtype=INDEX_DTYPE)
get_tau_align_min = lambda spks, ali: get_tau_for_alignment(-spks, ali)
get_tau_align_max = lambda spks, ali: get_tau_for_alignment(spks, ali)
get_tau_align_energy = lambda spks, ali: get_tau_for_alignment(spks * spks, ali)
def get_aligned_spikes(data, spike_train, align_at=-1, tf=47, look_ahead=0, mc=True,
kind='none', rsf=1., sample_back=True):
"""return the set of aligned spikes waveforms and the aligned spike train
:type data: ndarray
:param data: data with channels in the columns
:type spike_train: ndarray or list
:param spike_train: spike train of events in data
:type align_at: int
:param align_at: align feature at this sample in the waveform
:type tf: int
:param tf: temporal extend of the waveform in samples
:type look_ahead: int
:param look_ahead: samples to look beyond the cut window for finding the align feature
:type mc: bool
:param mc: if True, return mc waveforms, else return concatenated waveforms.
Default=True
:type kind: str
:param kind: String giving the type of alignment to conduct. One of:
- "max" - align on maximum of the waveform
- "min" - align on minimum of the waveform
- "energy" - align on peak of energy
- "none" - no alignment
Default='none'
:type rsf: float
:param rsf: resampling factor (use integer values of powers of 2)
:param bool sample_back: if True, resample spikes to original length after resampling
:rtype: ndarray, ndarray
:returns: stacked spike events, spike train with events corrected for
alignment
"""
# resample?
if rsf != 1.0:
data = resample(data, rsf * data.shape[0])
tf *= rsf
align_at *= rsf
spike_train *= rsf
look_ahead *= rsf
# init
ep, st = epochs_from_spiketrain(
spike_train,
(align_at + look_ahead, tf - align_at + look_ahead),
end=data.shape[0],
with_corrected_st=True)
# align spikes
if ep.shape[0] > 0:
if kind in ['min', 'max', 'energy']:
spikes = extract_spikes(data, ep, mc=True)
if rsf != 1.0:
print spikes.shape
tau = {'min': get_tau_align_min,
'max': get_tau_align_max,
'energy': get_tau_align_energy}[kind](spikes, align_at)
st += tau
st -= look_ahead
ep, st = epochs_from_spiketrain(
st,
(align_at, tf - align_at),
end=data.shape[0],
with_corrected_st=True)
spikes = extract_spikes(data, ep, mc=mc)
else:
if mc is True:
size = 0, tf, data.shape[1]
else:
size = 0, tf * data.shape[1]
spikes = sp.zeros(size)
# re-resample?
if sample_back and rsf != 1.0:
spikes = resample(spikes, spikes.shape[1] * 1. / rsf, axis=1)
st *= 1. / rsf
# return
return spikes, st
##--- MAIN
if __name__ == '__main__':
pass
| mit | -8,748,461,185,882,833,000 | 35.028986 | 90 | 0.61786 | false |
ryansturmer/cuttlebug | cuttlebug/ui/views/runtime_view.py | 1 | 29660 | import view
import wx
import wx.gizmos as gizmos
from cuttlebug.ui.controls import DictListCtrl
from cuttlebug.util import ArtListMixin, has_icon, bidict, KeyTree, str2int
from functools import partial
import cuttlebug.gdb as gdb
import os, threading
import cuttlebug.ui.menu as menu
import cuttlebug.settings as settings
import cuttlebug.project as project
import cuttlebug.ui.controls as controls
MNU_ENABLE_BKPT = 0
MNU_DISABLE_BKPT = 1
class RuntimeTree(gizmos.TreeListCtrl, ArtListMixin, KeyTree):
def __init__(self, parent):
self.parent = parent
gizmos.TreeListCtrl.__init__(self, id=-1, parent=parent, style=wx.TR_DEFAULT_STYLE | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_HIDE_ROOT | wx.TR_HAS_BUTTONS | wx.TR_LINES_AT_ROOT | wx.TR_EDIT_LABELS)
ArtListMixin.__init__(self)
KeyTree.__init__(self)
self.SetFont(wx.Font(8, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self.parent = parent
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.on_expanding)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.on_get_tooltip)
self.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.on_begin_label_edit)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.on_end_label_edit)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_select_item)
#self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self.Bind(wx.EVT_LEFT_DCLICK, self.on_dclick)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.on_dclick)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.on_item_right_click)
self.Bind(wx.EVT_LIST_COL_END_DRAG, self.on_col_resize)
self.model = None
self.AddColumn('Context')
self.AddColumn('Value')
self.SetColumnEditable(1, True)
self.SetColumnAlignment(1, wx.ALIGN_RIGHT)
self.lock = threading.RLock()
self.__var_idx = 0
self.create_popup_menus()
self.clear()
self.load_positions()
def on_col_resize(self, evt):
self.save_positions()
def save_positions(self):
cols = self.GetColumnCount()
widths = [self.GetColumnWidth(i) for i in range(cols)]
settings.session_set('runtime_view_col_widths', widths)
def load_positions(self):
try:
widths = settings.session_get('runtime_view_col_widths')
cols = self.GetColumnCount()
if len(widths) != cols:
raise Exception("Mismatch of stored column widths")
for i, width in enumerate(widths):
self.SetColumnWidth(i, width)
except:
pass
def create_popup_menus(self):
self.menu_manager = menu.MenuManager()
m = self.menu_manager.menu()
m.item("Enable", func=self.on_enable_breakpoint, icon='stop.png', show=MNU_ENABLE_BKPT, hide=MNU_DISABLE_BKPT)
m.item("Disable", func=self.on_disable_breakpoint, icon='stop_disabled.png', show=MNU_DISABLE_BKPT, hide=MNU_ENABLE_BKPT)
m.item("Remove", func=self.on_remove_breakpoint, icon='ex.png')
self.menu_breakpoint_item = m
m = self.menu_manager.menu()
m.item("Enable All Breakpoints", func=self.on_enable_all_breakpoints, icon='stop.png')
m.item("Disable All Breakpoints", func=self.on_disable_all_breakpoints, icon='stop_disabled.png')
m.item("Remove All Breakpoints", func=self.on_remove_all_breakpoints, icon='ex.png')
self.menu_breakpoints = m
m = self.menu_manager.menu()
m.item("Show this Frame in Source", func=self.on_show_frame, icon='find.png')
m.step_out = m.item("Step Out\tShift+F6", func=self.on_step_out, icon='control_play_blue.png')
self.menu_frame_item = m
m = self.menu_manager.menu()
m.item("Add Watch...", func=self.on_add_watch, icon='magnifier_zoom_in.png')
self.menu_watches = m
m = self.menu_manager.menu()
m.item("Remove Watch", func=self.on_remove_watch, icon='ex.png')
self.menu_watch_item = m
def set_model(self, model):
self.model = model
self.model.Bind(gdb.EVT_GDB_UPDATE_VARS, self.on_var_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_STACK, self.on_stack_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_BREAKPOINTS, self.on_breakpoint_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_REGISTERS, self.on_register_update)
self.model.Bind(gdb.EVT_GDB_FINISHED, self.on_gdb_finished)
self.model.Bind(gdb.EVT_GDB_STOPPED, self.on_gdb_stopped)
wx.CallAfter(self.build_sfr_tree)
def get_var_name(self):
name = "rtv_%d" % self.__var_idx
self.__var_idx += 1
return name
def on_breakpoint_update(self, evt):
wx.CallAfter(self.update_breakpoints)
def on_register_update(self, evt):
wx.CallAfter(self.update_registers, evt.data)
self.save_positions()
def on_var_update(self, evt):
names = evt.data
for name in names:
if name in self.pending_var_additions:
self.lock.acquire()
parent = self.pending_var_additions.pop(name)
self.lock.release()
wx.CallAfter(self.add_var_item, parent, name, self.model.vars[name])
if parent == self.watch_item:
self.expand(self.watch_item)
elif name in self.pending_var_updates:
self.lock.acquire()
var_item = self.pending_var_updates.pop(name)
old_name= self.get_item_data(var_item)
if old_name in self.var_registry:
self.var_registry.pop(old_name)
self.lock.release()
wx.CallAfter(self.update_var_item, var_item, name, self.model.vars[name])
elif name in self.var_registry and name in self.model.vars:
var_item = self.var_registry[name]
wx.CallAfter(self.update_var_item, var_item, name, self.model.vars[name])
else:
pass
def on_stack_update(self, evt):
#print self.model.stack.pretty()
if self.model:
if self.__check_stack():
wx.CallAfter(self.update_stack)
else:
wx.CallAfter(self.rebuild_stack)
evt.Skip()
def on_gdb_finished(self, evt):
self.clear()
self.model = None
def on_item_right_click(self, evt):
item = self.__get_evt_item(evt)
if item.is_ok():
self.select_item(item)
if self.model:
if item == self.breakpoints_item and self.get_children_count(self.breakpoints_item) > 0:
self.PopupMenu(self.menu_breakpoints.build(self), evt.GetPoint())
elif self.is_descendent(item, self.breakpoints_item):
bkpt = self.get_item_data(item)
self.breakpoint = bkpt
self.menu_manager.publish(MNU_DISABLE_BKPT) if bkpt.enabled else self.menu_manager.publish(MNU_ENABLE_BKPT)
self.PopupMenu(self.menu_breakpoint_item.build(self), evt.GetPoint())
elif self.is_frame_item(item):
frame = self.get_item_data(item)
self.frame = frame
if frame.level == 0 and len(self.frames) > 1:
self.menu_frame_item.step_out.show()
else:
self.menu_frame_item.step_out.hide()
self.PopupMenu(self.menu_frame_item.build(self), evt.GetPoint())
elif item == self.watch_item:
self.PopupMenu(self.menu_watches.build(self), evt.GetPoint())
elif self.is_descendent(item, self.watch_item):
self.selected_item = item
self.PopupMenu(self.menu_watch_item.build(self), evt.GetPoint())
evt.Skip()
def on_dclick(self, evt):
id = self.__get_evt_item(evt)
if self.model and self.is_descendent(id, self.breakpoints_item):
bkpt = self.get_item_data(id)
if bkpt.enabled:
self.model.break_disable(bkpt)
else:
self.model.break_enable(bkpt)
elif self.model and self.is_descendent(id, self.sfr_item):
reg = self.get_item_data(id)
if reg:
old_value = reg.value
try:
response = controls.RegisterEditDialog.show(self, reg)
except Exception, e:
print e
if response == wx.ID_OK:
self.model.data_evaluate_expression("%s=%s" % (reg.expression, reg.value), callback=partial(self.on_sfr_data, id,True))
else:
reg.value = old_value
elif self.model and self.is_descendent(id, self.registers_item):
name = self.get_item_data(id)
target_model = self.parent.controller.project.target
reg = target_model.find_by_name(name)
if not reg:
reg = project.CPURegister(name, name, 4)
reg.add_field(project.Field(0, 32, name))
reg.value = str2int(self.register_registry[name])
response = controls.RegisterEditDialog.show(self, reg)
if response == wx.ID_OK:
self.model.data_evaluate_expression("%s=%s" % (reg.expression, reg.value),callback=self.on_register_data)
evt.Skip()
def on_register_data(self, evt):
self.model.update()
def on_begin_label_edit(self, evt):
item = self.get_event_item(evt)
name = self.get_item_data(item)
if name in self.var_registry:
if self.is_descendent(item, self.get_frame_items()[-1]):
evt.Skip()
return
if self.is_descendent(item, self.sfr_item) or self.is_descendent(item, self.watch_item):
evt.Skip()
return
evt.Veto()
def on_select_item(self, evt):
#item = self.get_event_item(evt)
#print self.get_item_data(item)
evt.Veto()
#evt.Skip()
def on_end_label_edit(self, evt):
item = self.get_event_item(evt)
name = self.get_item_data(item)
if name in self.var_registry and name in self.model.vars:
new_var_value = evt.GetLabel()
self.model.var_assign(name, new_var_value)
if self.is_descendent(item, self.sfr_item) or self.is_descendent(item, self.watch_item):
reg = self.get_item_data(item)
if hasattr(reg, 'expression'):
self.model.data_evaluate_expression('%s=%s' % (reg.expression, evt.GetLabel()), callback=partial(self.on_sfr_data, item,True))
evt.Veto()
def on_get_tooltip(self, evt):
item = self.get_event_item(evt)
if self.model and item:
if item == self.stack_item:
evt.SetToolTip(wx.ToolTip("Stack Depth: %d frames" % self.model.stack.depth))
data = self.get_item_data(item)
if hasattr(data, 'file'): # This is a stack frame
evt.SetToolTip(wx.ToolTip("Stack frame %s() at 0x%x %s" % (data.func, data.addr, "in file %s" % data.file if data.file else "")))
elif data in self.var_registry:
evt.SetToolTip(wx.ToolTip(self.model.vars[data].expression))
def on_expanding(self, evt):
item=self.get_event_item(evt)
item_data=self.get_item_data(item)
if self.is_descendent(item, self.sfr_item):
self.update_sfr_tree(item, force_root=True, colorize=False)
return
if hasattr(item_data, 'level') and self.get_children_count(item, False) == 0: #item_data is a stack frame, and we wish to list its locals
if not self.model.running:
self.model.stack_list_arguments(frame=item_data.level, callback=partial(self.__on_listed_arguments, item))
else:
evt.Veto()
elif item_data in self.var_registry and self.get_children_count(item, False) == 0:
if not self.model.running:
self.model.var_list_children(item_data, callback=partial(self.__on_listed_children, item))
else:
evt.Veto()
evt.Skip()
def __on_listed_children(self, parent, result):
names = []
if hasattr(result, 'children'):
for child in result.children:
varname= child['child']['name']
self.lock.acquire()
self.pending_var_additions[varname] = parent
self.lock.release()
names.append(varname)
class Dummy(object): pass
evt = Dummy()
evt.data = names
wx.CallAfter(self.on_var_update, evt)
def __on_listed_locals(self, frame_item, args, result):
if result.cls != 'error':
if hasattr(result, 'locals') and frame_item.is_ok():
frame = self.get_item_data(frame_item)
if self.get_children_count(frame_item, recursive=False) == 0:
for item in args + result.locals:
varname = self.get_var_name()
self.lock.acquire()
self.pending_var_additions[varname] = frame_item
self.lock.release()
self.model.var_create(item['name'], frame=frame.level, callback=self.__on_created_var, name=varname)
def __on_listed_arguments(self, frame_item, result):
if result.cls != 'error':
if 'stack-args' in result and frame_item.is_ok():
frame = self.get_item_data(frame_item)
f = result['stack-args'][frame.level]['frame']
if int(f['level']) != frame.level:
raise ValueError("Failed Sanity Check!")
args = f['args']
self.model.stack_list_locals(frame=frame.level, callback=partial(self.__on_listed_locals, frame_item, args))
def __on_created_var(self, result):
if hasattr(result, 'name'):
self.model.var_update(result.name)
def add_var_item(self, parent, name, var):
if parent.is_ok():
var_item = self.append_item(parent, var.expression.strip('"'))
self.update_var_item(var_item, name, var)
def update_var_item(self, var_item, name, var):
if var_item.is_ok():
self.set_item_data(var_item, name)
if var.children:
self.set_item_has_children(var_item, bool(var.children))
else:
self.set_item_has_children(var_item, False)
self.set_item_text(var_item, var.data, 1)
icon_name = var.type.icon_name
if has_icon(icon_name):
self.set_item_art(var_item, icon_name)
self.lock.acquire()
self.var_registry[name] = var_item
self.lock.release()
def add_watch(self, s):
vn = self.get_var_name()
self.lock.acquire()
self.pending_var_additions[vn] = self.watch_item
self.lock.release()
self.model.var_create(s, floating=True, callback=self.__on_created_var, name=vn)
def on_add_watch(self, evt):
dlg = wx.TextEntryDialog(self, "Watch Variable", self.last_watch)
if dlg.ShowModal() == wx.ID_OK:
var = dlg.GetValue().strip()
self.add_watch('"%s"' % var) # Quoting the watch allows spaces
def on_remove_watch(self, evt):
item = self.get_item_data(self.selected_item)
self.model.var_delete(item, callback=partial(self.on_watch_deleted, self.selected_item))
def on_watch_deleted(self, watch_item, evt):
self.delete(watch_item)
def scrub_vars(self, all_vars=False):
#TODO use a list
to_update = {}
if self.get_frame_count() > 0:
frame_items = self.get_frame_items()
for name, var_item in self.var_registry.iteritems():
if (not self.is_descendent(var_item, frame_items[-1]) or all_vars) and name in self.model.vars:
var = self.model.vars[name]
frame = self.get_var_frame(name)
if frame:
varname = self.get_var_name()
to_update[(name, varname)] = (frame, var)
self.pending_var_updates[varname] = var_item
for (old_name, new_name), (frame, var)in to_update.iteritems():
self.model.var_delete(old_name)
self.model.var_create(var.expression, frame=frame.level, callback=self.__on_created_var, name=new_name)
def get_frame_items(self):
return list(self.children(self.stack_item)) if self.stack_item.is_ok() else []
def get_frames(self):
return [self.get_item_data(frame_item) for frame_item in self.get_frame_items()]
def get_frame_count(self):
if self.stack_item.is_ok():
return self.get_children_count(self.stack_item, recursive=False)
else:
return 0
def is_frame_item(self, item):
return item.is_ok() and isinstance(self.get_item_data(item), gdb.GDBStackFrame)
def add_frame_item(self, frame):
item = self.append_item(self.stack_item, frame.func + "( )")
self.update_frame_item(item, frame)
def update_frame_item(self, frame_item, frame):
self.set_item_data(frame_item, frame)
self.set_item_art(frame_item, 'frame.png' if frame.level != 0 else 'frame_active.png')
self.set_item_has_children(frame_item, True)
self.set_item_bold(frame_item, True)
self.set_item_data(frame_item, frame)
def on_show_frame(self, evt):
if self.model and self.frame:
self.GetParent().controller.goto(self.frame.file, self.frame.line)
self.frame = None
def __check_stack(self):
if self.model:
# Our list of frames is reversed from the models, because that's how we view it.
for model_frame, view_frame in zip(reversed(self.model.stack), self.get_frames()):
if model_frame.key != view_frame.key: return False
return True
def get_var_frame(self, name):
frame = None
item = self.var_registry[name]
frames = self.get_frames()
while frame not in frames:
item = self.get_parent(item)
if item.is_ok():
frame = self.get_item_data(item)
else:
return None
return frame
def on_step_out(self, evt):
self.parent.controller.step_out()
def clear_stack(self):
n = self.get_frame_count()
for i in range(n):
self.pop_stack_frame()
def rebuild_stack(self):
self.clear_stack()
self.update_stack()
def update_stack(self):
stack = self.model.stack
stack_changed=False
# If the frame count in the view is > the frame count in the model, pop off until they match (tossing frames that no longer exist)
n = self.get_frame_count()-len(stack)
if n > 0:
for i in range(n):
self.pop_stack_frame()
stack_changed = True
for frame_item, frame in zip(self.get_frame_items(), reversed(self.model.stack)):
self.update_frame_item(frame_item, frame)
# Otherwise add frames until we're all in sync
idx = self.get_frame_count()+1
while self.get_frame_count() < len(self.model.stack):
frame = stack[len(stack)-idx]
self.add_frame_item(frame)
idx += 1
self.scrub_vars(all_vars=stack_changed)
def pop_stack_frame(self):
frame_item = self.get_frame_items()[-1]
if frame_item.is_ok():
for child in self.walk(frame_item):
name = self.get_item_data(child)
if name in self.var_registry:
self.var_registry.pop(name)
self.model.var_delete(name)
self.delete(frame_item)
else:
print "Can't remove frame. Frame item is NOT ok."
def update_breakpoints(self):
if self.model and self.breakpoints_item.is_ok():
breakpoints = self.model.breakpoints
self.delete_children(self.breakpoints_item)
for bp in breakpoints:
if bp.fullname:
name = os.path.split(os.path.abspath(bp.fullname))[1]
else:
name = '0x%x' % bp.address
item = self.append_item(self.breakpoints_item, name)
self.set_item_data(item, bp)
self.set_item_text(item, str(bp.line), 1)
self.set_item_art(item, 'stop.png' if bp.enabled else 'stop_disabled.png')
def on_enable_breakpoint(self, evt):
if self.breakpoint and self.model:
self.model.break_enable(self.breakpoint)
self.breakpoint = None
def on_disable_breakpoint(self, evt):
if self.breakpoint and self.model:
self.model.break_disable(self.breakpoint)
self.breakpoint = None
def on_remove_breakpoint(self, evt):
if self.breakpoint and self.model:
self.model.break_delete(self.breakpoint)
self.breakpoint = None
def on_enable_all_breakpoints(self, evt):
if self.model:
for bkpt in self.model.breakpoints:
self.model.break_enable(bkpt)
def on_disable_all_breakpoints(self, evt):
if self.model:
for bkpt in self.model.breakpoints:
self.model.break_disable(bkpt)
def on_remove_all_breakpoints(self, evt):
if self.model:
for bkpt in self.model.breakpoints:
self.model.break_delete(bkpt)
def update_registers(self, names):
'''
if self.model and self.registers_item.is_ok():
registers = self.model.registers
if len(registers) != self.get_children_count(self.registers_item, recursive=False):
self.delete_children(self.registers_item)
for key, value in registers.iteritems():
item = self.append_item(self.registers_item, key)
self.set_item_text(item, value, 1)
self.set_item_data(item, key)
self.register_registry[key] = value
else:
for child in self.children(self.registers_item):
self.set_item_text_colour(child, wx.BLACK)
for name in names:
item = self.register_registry[name]
print item
self.set_item_text(item, registers[name], 1)
self.set_item_text_colour(item, wx.RED)
'''
def build_sfr_tree(self):
if not self.parent.controller.project:
return
self.delete_children(self.sfr_item)
target_model = self.parent.controller.project.target
def walk(self, tree_item, item):
if isinstance(item, project.Group):
group_item = self.append_item(tree_item, item.name)
for child in item.items:
walk(self, group_item, child)
elif isinstance(item, project.Peripheral):
peripheral_item = self.append_item(tree_item, item.name)
for child in item.registers:
walk(self, peripheral_item, child)
elif isinstance(item, project.SpecialFunctionRegister):
sfr_item = self.append_item(tree_item, item.fullname)
self.set_item_data(sfr_item, item)
tree_item = self.sfr_item
for item in target_model.items:
walk(self, tree_item, item)
def on_gdb_stopped(self, evt):
self.update_sfr_tree(self.sfr_item)
evt.Skip()
def update_sfr_tree(self, sfr_item, force_root=False, colorize=True):
if force_root:
items = self.children(sfr_item)
else:
items = [sfr_item]
for i in items:
for tree_item in self.walk_expanded(i, False):
item = self.get_item_data(tree_item)
if hasattr(item, 'expression'):
self.model.data_evaluate_expression(item.expression, callback=partial(self.on_sfr_data, tree_item, colorize))
def on_sfr_data(self, item, colorize, data):
if data.cls == "done" and hasattr(data, 'value'):
wx.CallAfter(self.update_sfr_value, item, data.value, colorize)
def update_sfr_value(self, item, value, colorize=True):
current_value = self.get_item_text(item, 1)
try:
reg = self.get_item_data(item)
reg.value = int(value)
text = "0x%08x" % int(value)
except:
text = value
self.set_item_text(item, text, 1)
if current_value != text and colorize:
self.set_item_text_colour(item, wx.RED)
else:
self.set_item_text_colour(item, wx.BLACK)
def update(self):
pass
def clear(self):
self.last_watch = ""
self.DeleteAllItems()
self.root_item = self.add_root('root')
self.stack_item = self.append_item(self.root_item,'Call Stack')
self.breakpoints_item = self.append_item(self.root_item, 'Breakpoints')
self.registers_item = self.append_item(self.root_item, 'CPU Registers')
self.watch_item = self.append_item(self.root_item, 'Watch')
self.sfr_item = self.append_item(self.root_item, 'HW Registers')
self.set_item_art(self.registers_item, 'chip.png')
self.set_item_art(self.stack_item, 'stack.png')
self.set_item_art(self.breakpoints_item, 'breakpoint.png')
self.set_item_art(self.watch_item, 'magnifier.png')
self.set_item_art(self.sfr_item, 'application_view_list.png')
self.lock.acquire()
self.frames = [] # Frame keys to tree items
self.var_registry = bidict() # Var names to tree items
self.pending_var_additions = {}
self.pending_var_updates = {}
self.register_registry = bidict()
self.lock.release()
self.breakpoint = None
def __get_evt_item(self, evt):
item = evt.GetItem()
if item and item.IsOk():
try:
return self.get_key(item)
except:
return None
pt = evt.GetPosition()
items = self.HitTest(pt)
try:
return self.get_key(items[0])
except:
return None
def set_item_art(self, item, name, style=wx.TreeItemIcon_Normal):
if name not in self.art:
self.add_art(name)
if item.is_ok():
self.set_item_image(item, self.art[name], style)
else:
print "Tried to set art for item that's NOT ok?"
class RuntimeView(view.View):
def __init__(self, *args, **kwargs):
super(RuntimeView, self).__init__(*args, **kwargs)
self.tree = RuntimeTree(self)
# self.tree.Bind(wx.EVT_KEY_DOWN, self.tree.on_key_down)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.tree, 1, wx.EXPAND)
self.SetSizer(sizer)
def set_model(self, model):
self.tree.set_model(model)
def update(self, stack):
self.tree.update()
def add_watch(self, s):
self.tree.add_watch(s)
class GDBDebugView(view.View):
def __init__(self, *args, **kwargs):
super(GDBDebugView, self).__init__(*args, **kwargs)
self.list = DictListCtrl(self, color_changes=False)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.list, 1, wx.EXPAND)
self.SetSizer(sizer)
def set_model(self, model):
self.model = model
print "Binding the var update"
self.model.Bind(gdb.EVT_GDB_UPDATE_VARS, self.on_var_update)
def on_var_update(self, evt):
for name in evt.data:
if name in self.model.vars:
self.list[name] = self.model.vars[name].data
else:
del self.list[name]
evt.Skip() | mit | 3,711,950,667,816,855,000 | 41.050798 | 199 | 0.54973 | false |
caveatemptors-2015/special-garbanzo | project/portfolioX/migrations/0001_initial.py | 1 | 2621 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-09 23:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Holding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('avg_price', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
migrations.CreateModel(
name='Portfolio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portfolio_name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Security',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=100)),
('company_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('txn_date', models.DateTimeField(auto_now_add=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('portfolio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolioX.Portfolio')),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolioX.Security')),
],
),
migrations.AddField(
model_name='holding',
name='portfolio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolioX.Portfolio'),
),
migrations.AddField(
model_name='holding',
name='security',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolioX.Security'),
),
]
| mit | -8,121,466,650,044,240,000 | 39.953125 | 121 | 0.581457 | false |
polyaxon/polyaxon | platform/coredb/tests/test_create_e2e/test_create_jobs_e2e.py | 1 | 3730 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.test import TestCase
from coredb import operations
from coredb.factories.projects import ProjectFactory
from coredb.factories.users import UserFactory
from coredb.models.runs import Run
from polyaxon.polyaxonfile import CompiledOperationSpecification, OperationSpecification
from polyaxon.polyflow import V1RunKind
from polycommon.test_cases.fixtures import get_fxt_job, get_fxt_job_with_inputs
class TestCreateJobs(TestCase):
def setUp(self):
super().setUp()
self.user = UserFactory()
self.project = ProjectFactory()
def test_create_run_with_job_spec(self):
count = Run.objects.count()
config_dict = get_fxt_job()
spec = OperationSpecification.read(values=config_dict)
run = operations.init_and_save_run(
project_id=self.project.id, user_id=self.user.id, op_spec=spec
)
assert Run.objects.count() == count + 1
assert run.pending is None
assert run.kind == V1RunKind.JOB
assert run.name == "foo"
assert run.description == "a description"
assert set(run.tags) == {"tag1", "tag2"}
# Check compiled operation passes
compiled_operation = CompiledOperationSpecification.read(run.content)
compiled_operation = CompiledOperationSpecification.apply_params(
compiled_operation
)
CompiledOperationSpecification.apply_runtime_contexts(compiled_operation)
# Check job
job_spec = CompiledOperationSpecification.read(run.content)
assert job_spec.run.container.image == "test"
job_spec = CompiledOperationSpecification.apply_operation_contexts(job_spec)
assert job_spec.run.container.image == "test"
def test_create_run_with_templated_job_spec(self):
count = Run.objects.count()
config_dict = get_fxt_job_with_inputs()
spec = OperationSpecification.read(values=config_dict)
run = operations.init_and_save_run(
project_id=self.project.id, user_id=self.user.id, op_spec=spec
)
assert Run.objects.count() == count + 1
assert run.pending is None
assert run.kind == V1RunKind.JOB
assert run.name == "foo"
assert run.description == "a description"
assert set(run.tags) == {"tag1", "tag2"} # From template
compiled_operation = CompiledOperationSpecification.read(run.content)
compiled_operation = CompiledOperationSpecification.apply_params(
compiled_operation, params=spec.params
)
compiled_operation = CompiledOperationSpecification.apply_operation_contexts(
compiled_operation
)
CompiledOperationSpecification.apply_runtime_contexts(compiled_operation)
run.content = compiled_operation.to_dict(dump=True)
run.save(update_fields=["content"])
job_spec = CompiledOperationSpecification.read(run.content)
assert job_spec.run.container.image == "{{ image }}"
job_spec = CompiledOperationSpecification.apply_runtime_contexts(job_spec)
assert job_spec.run.container.image == "foo/bar"
| apache-2.0 | -2,776,431,568,413,844,500 | 42.882353 | 88 | 0.694102 | false |
ohsu-qin/qipipe | qipipe/helpers/logging.py | 1 | 5667 | # Absolute import (the default in a future Python release) resolves
# the logging import as the Python standard logging module rather
# than this module of the same name.
from __future__ import absolute_import
import os
import sys
from datetime import datetime
import logging
import qiutil
NIPYPE_LOG_DIR_ENV_VAR = 'NIPYPE_LOG_DIR'
"""The environment variable used by Nipype to set the log directory."""
def configure(**opts):
"""
Configures the logger as follows:
- If there is a *log* option,
then the logger is a conventional ``qiutil.logging`` logger
which writes to the given log file.
- Otherwise, the logger delegates to a mock logger that
writes to stdout.
.. Note:: In a cluster environment, Nipype kills the dispatched job
log config. Logging falls back to the default. For this reason,
the default mock logger level is ``DEBUG`` rather than ``INFO``.
The dispatched node's log is the stdout captured in the file
*work*\ ``/batch/``\ *node_name*\ ``.o``\ *node_id*, where
*work* the execution work directory.
:param opts: the ``qiutil.command.configure_log`` options
:return: the logger factory
"""
# The log file option.
log_file_opt = opts.get('log')
# Set the Nipype log directory environment variable before importing
# any nipype module. The code below works around the following Nipype
# bug:
# * Nipype requires a log directory. If the Nipype log directory is
# set to /dev/null, then Nipype raises an error. The work-around
# is to set the NIPYPE_LOG_DIR environment variable to a new temp
# directory.
log_dir = None
if log_file_opt:
# Configure the qiutil logger for the auxiliary qi* modules.
# The non-Nipype log messages will be ignored in a cluster
# job context since Nipype stomps on the Python logger, but
# we will go through the motions anyway.
qiutil.command.configure_log('qixnat', 'qidicom',
'qiutil', **opts)
log_file = os.path.abspath(log_file_opt)
if log_file == '/dev/null':
# Work around the Nipype bug described above.
log_dir = tempfile.mkdtemp(prefix='qipipe_')
else:
log_dir = os.path.dirname(log_file)
# Make the log file parent directory, if necessary.
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Nipype always needs a log directory to work around the
# following Nipype bug:
# * If the Nipype log directory is not set, then Nipype still
# logs to the default log file ./log/pypeline.log, but also
# logs to stdout, which stomps on the qipipe logging.
if not log_dir:
log_dir = '/'.join([os.getcwd(), 'log'])
# Set the Nipype log directory environment variable.
os.environ[NIPYPE_LOG_DIR_ENV_VAR] = log_dir
# Print qipipe log messages to stdout to work around the
# Nipype bug described in the logger method apidoc.
mock_log_opts = {}
level = opts.get('log_level')
if level:
mock_log_opts['level'] = level
factory = MockLoggerFactory(**mock_log_opts).logger
# Set the qipipe logger factory.
logger._factory = factory
# Print a log message.
log_dest = log_file_opt if log_file_opt else 'stdout'
log_level = opts.get('log_level', 'DEBUG')
factory(__name__).info("Logging qipipe to %s with level %s." %
(log_dest, log_level))
factory(__name__).info("Logging nipype to the %s directory." %
log_dir)
return factory
def logger(name):
"""
This method overrides ``qiutil.logging.logger`` to work
around the following Nipype bug:
* Nipype stomps on any other application's logging.
The work-around is to mock a "logger" that writes
to stdout.
:param name: the caller's context ``__name__``
:return: the logger facade
"""
# Make a default logger factory on demand.
if not hasattr(logger, '_factory'):
logger._factory = configure()
return logger._factory(name)
class MockLoggerFactory(object):
def __init__(self, **opts):
self.writer = MockLogWriter(**opts)
def logger(self, name):
return MockLogger(self.writer, name)
class MockLogger(object):
def __init__(self, writer, name):
self.writer = writer
self.name = name
@property
def level(self):
return self.writer.level
def info(self, message):
self.writer.info(self.name, message)
def error(self, message):
self.writer.error(self.name, message)
def warn(self, message):
self.writer.warn(self.name, message)
def debug(self, message):
self.writer.debug(self.name, message)
class MockLogWriter(object):
def __init__(self, level=None):
if not level:
level = 'DEBUG'
self.level = getattr(logging, level)
def info(self, name, message):
if self.level <= logging.INFO:
self._write(name, 'INFO', message)
def debug(self, name, message):
if self.level <= logging.DEBUG:
self._write(name, 'DEBUG', message)
def warn(self, name, message):
if self.level <= logging.WARN:
self._write(name, 'WARN', message)
def error(self, name, message):
if self.level <= logging.ERROR:
self._write(name, 'ERROR', message)
def _write(self, name, level, message):
dt = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
print "%s %s %s %s" % (dt, name, level, message)
sys.stdout.flush()
| bsd-2-clause | 5,169,502,066,354,706,000 | 32.934132 | 73 | 0.627845 | false |
FEniCS/dolfin | test/unit/python/function/test_constant.py | 1 | 3337 | #!/usr/bin/env py.test
"""Unit tests for the function library"""
# Copyright (C) 2007 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2007-05-24
# Last changed: 2011-01-28
import pytest
from numpy import array
from dolfin import *
from ufl import UFLException
def test_name_argument():
u = Constant(1.0)
v = Constant(1.0, name="v")
assert u.name() == "f_%d" % u.count()
assert v.name() == "v"
assert str(v) == "v"
def testConstantInit():
c0 = Constant(1.)
c1 = Constant([2, 3], interval)
c2 = Constant([[2, 3], [3, 4]], triangle)
c3 = Constant(array([2, 3]), tetrahedron)
c4 = Constant([[2, 3], [3, 4]], quadrilateral)
c5 = Constant(array([2, 3]), hexahedron)
# FIXME:
assert c0.cell() is None
assert c1.cell() == interval
assert c2.cell() == triangle
assert c3.cell() == tetrahedron
assert c4.cell() == quadrilateral
assert c5.cell() == hexahedron
assert c0.ufl_shape == ()
assert c1.ufl_shape == (2,)
assert c2.ufl_shape == (2, 2)
assert c3.ufl_shape == (2,)
assert c4.ufl_shape == (2, 2)
assert c5.ufl_shape == (2,)
def testGrad():
import ufl
zero = ufl.constantvalue.Zero((2, 3))
c0 = Constant(1.)
c3 = Constant(array([2, 3]), tetrahedron)
def gradient(c):
return grad(c)
with pytest.raises(UFLException):
grad(c0)
assert zero == gradient(c3)
@pytest.mark.parametrize('mesh_factory', [(UnitCubeMesh, (8, 8, 8)), (UnitHexMesh.create, (8, 8, 8))])
def test_compute_vertex_values(mesh_factory):
from numpy import zeros, all, array
func, args = mesh_factory
mesh = func(*args)
e0 = Constant(1)
e1 = Constant((1, 2, 3))
# e0_values = zeros(mesh.num_vertices(),dtype='d')
# e1_values = zeros(mesh.num_vertices()*3,dtype='d')
e0_values = e0.compute_vertex_values(mesh)
e1_values = e1.compute_vertex_values(mesh)
assert all(e0_values == 1)
assert all(e1_values[:mesh.num_vertices()] == 1)
assert all(e1_values[mesh.num_vertices():mesh.num_vertices()*2] == 2)
assert all(e1_values[mesh.num_vertices()*2:mesh.num_vertices()*3] == 3)
def test_values():
import numpy as np
c0 = Constant(1.)
c0_vals = c0.values()
assert np.all(c0_vals == np.array([1.], dtype=np.double))
c1 = Constant((1., 2.))
c1_vals = c1.values()
assert np.all(c1_vals == np.array([1., 2.], dtype=np.double))
c2 = Constant((1., 2., 3.))
c2_vals = c2.values()
assert np.all(c2_vals == np.array([1., 2., 3.], dtype=np.double))
def test_str():
c0 = Constant(1.)
c0.str(False)
c0.str(True)
c1 = Constant((1., 2., 3.))
c1.str(False)
c1.str(True)
| lgpl-3.0 | -405,382,258,535,716,400 | 26.808333 | 102 | 0.628409 | false |
briend/mypaint | lib/palette.py | 1 | 31230 | # This file is part of MyPaint.
# Copyright (C) 2013-2018 by the MyPaint Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Palette: user-defined lists of color swatches"""
# TODO: Make palettes part of the model, save as part of ORA documents.
## Imports
from __future__ import division, print_function
import re
from copy import copy
import logging
from lib.helpers import clamp
from lib.observable import event
from lib.color import RGBColor, CAM16Color, color_diff
from lib.color import YCbCrColor
from lib.pycompat import unicode
from lib.pycompat import xrange
from lib.pycompat import PY3
from io import open
logger = logging.getLogger(__name__)
## Class and function defs
class Palette (object):
"""A flat list of color swatches, compatible with the GIMP
As a (sideways-compatible) extension to the GIMP's format, MyPaint supports
empty slots in the palette. These slots are represented by pure black
swatches with the name ``__NONE__``.
Palette objects expose the position within the palette of a current color
match, which can be declared to be approximate or exact. This is used for
highlighting the user concept of the "current color" in the GUI.
Palette objects can be serialized in the GIMP's file format (the regular
`unicode()` function on a Palette will do this too), or converted to and
from a simpler JSON-ready representation for storing in the MyPaint prefs.
Support for loading and saving via modal dialogs is defined here too.
"""
## Class-level constants
_EMPTY_SLOT_ITEM = RGBColor(-1, -1, -1)
_EMPTY_SLOT_NAME = "__NONE__"
## Construction, loading and saving
def __init__(self, filehandle=None, filename=None, colors=None):
"""Instantiate, from a file or a sequence of colors
:param filehandle: Filehandle to load.
:param filename: Name of a file to load.
:param colors: Iterable sequence of colors (lib.color.UIColor).
The constructor arguments are mutually exclusive. With no args
specified, you get an empty palette.
>>> Palette()
<Palette colors=0, columns=0, name=None>
Palettes can be generated from interpolations, which is handy for
testing, at least.
>>> cols = RGBColor(1,1,0).interpolate(RGBColor(1,0,1), 10)
>>> Palette(colors=cols)
<Palette colors=10, columns=0, name=None>
"""
super(Palette, self).__init__()
#: Number of columns. 0 means "natural flow"
self._columns = 0
#: List of named colors
self._colors = []
#: Name of the palette as a Unicode string, or None
self._name = None
#: Current position in the palette. None=no match; integer=index.
self._match_position = None
#: True if the current match is approximate
self._match_is_approx = False
#: Set to true to keep position during palette shifting
self.keep_position = False
# Clear and initialize
self.clear(silent=True)
if colors:
for col in colors:
col = self._copy_color_in(col)
self._colors.append(col)
elif filehandle:
self.load(filehandle, silent=True)
elif filename:
with open(filename, "r", encoding="utf-8", errors="replace") as fp:
self.load(fp, silent=True)
def clear(self, silent=False):
"""Resets the palette to its initial state.
>>> grey16 = RGBColor(1,1,1).interpolate(RGBColor(0,0,0), 16)
>>> p = Palette(colors=grey16)
>>> p.name = "Greyscale"
>>> p.columns = 3
>>> p # doctest: +ELLIPSIS
<Palette colors=16, columns=3, name=...'Greyscale'>
>>> p.clear()
>>> p
<Palette colors=0, columns=0, name=None>
Fires the `info_changed()`, `sequence_changed()`, and `match_changed()`
events, unless the `silent` parameter tests true.
"""
self._colors = []
self._columns = 0
self._name = None
self._match_position = None
self._match_is_approx = False
if not silent:
self.info_changed()
self.sequence_changed()
self.match_changed()
def load(self, filehandle, silent=False):
"""Load contents from a file handle containing a GIMP palette.
:param filehandle: File-like object (.readline, line iteration)
:param bool silent: If true, don't emit any events.
>>> pal = Palette()
>>> with open("palettes/MyPaint_Default.gpl", "r") as fp:
... pal.load(fp)
>>> len(pal) > 1
True
If the file format is incorrect, a RuntimeError will be raised.
"""
comment_line_re = re.compile(r'^#')
field_line_re = re.compile(r'^(\w+)\s*:\s*(.*)$')
color_line_re = re.compile(r'^(\d+)\s+(\d+)\s+(\d+)\s*(?:\b(.*))$')
fp = filehandle
self.clear(silent=True) # method fires events itself
line = fp.readline()
if line.strip() != "GIMP Palette":
raise RuntimeError("Not a valid GIMP Palette")
header_done = False
line_num = 0
for line in fp:
line = line.strip()
line_num += 1
if line == '':
continue
if comment_line_re.match(line):
continue
if not header_done:
match = field_line_re.match(line)
if match:
key, value = match.groups()
key = key.lower()
if key == 'name':
self._name = value.strip()
elif key == 'columns':
self._columns = int(value)
else:
logger.warning("Unknown 'key:value' pair %r", line)
continue
else:
header_done = True
match = color_line_re.match(line)
if not match:
logger.warning("Expected 'R G B [Name]', not %r", line)
continue
r, g, b, col_name = match.groups()
col_name = col_name.strip()
r = clamp(int(r), 0, 0xff) / 0xff
g = clamp(int(g), 0, 0xff) / 0xff
b = clamp(int(b), 0, 0xff) / 0xff
if r == g == b == 0 and col_name == self._EMPTY_SLOT_NAME:
self.append(None)
else:
col = CAM16Color(color=RGBColor(r, g, b))
col.__name = col_name
self._colors.append(col)
if not silent:
self.info_changed()
self.sequence_changed()
self.match_changed()
def save(self, filehandle):
"""Saves the palette to an open file handle.
:param filehandle: File-like object (.write suffices)
>>> from lib.pycompat import PY3
>>> if PY3:
... from io import StringIO
... else:
... from cStringIO import StringIO
>>> fp = StringIO()
>>> cols = RGBColor(1,.7,0).interpolate(RGBColor(.1,.1,.5), 16)
>>> pal = Palette(colors=cols)
>>> pal.save(fp)
>>> fp.getvalue() == unicode(pal)
True
The file handle is not flushed, and is left open after the
write.
>>> fp.flush()
>>> fp.close()
"""
filehandle.write(unicode(self))
def update(self, other):
"""Updates all details of this palette from another palette.
Fires the `info_changed()`, `sequence_changed()`, and `match_changed()`
events.
"""
self.clear(silent=True)
for col in other._colors:
col = self._copy_color_in(col)
self._colors.append(col)
self._name = other._name
self._columns = other._columns
self.info_changed()
self.sequence_changed()
self.match_changed()
## Palette size and metadata
def get_columns(self):
"""Get the number of columns (0 means unspecified)."""
return self._columns
def set_columns(self, n):
"""Set the number of columns (0 means unspecified)."""
self._columns = int(n)
self.info_changed()
def get_name(self):
"""Gets the palette's name."""
return self._name
def set_name(self, name):
"""Sets the palette's name."""
if name is not None:
name = unicode(name)
self._name = name
self.info_changed()
def __bool__(self):
"""Palettes never test false, regardless of their length.
>>> p = Palette()
>>> bool(p)
True
"""
return True
def __len__(self):
"""Palette length is the number of color slots within it."""
return len(self._colors)
## PY2/PY3 compat
__nonzero__ = __bool__
## Match position marker
def get_match_position(self):
"""Return the position of the current match (int or None)"""
return self._match_position
def set_match_position(self, i):
"""Sets the position of the current match (int or None)
Fires `match_changed()` if the value is changed."""
if i is not None:
i = int(i)
if i < 0 or i >= len(self):
i = None
if i != self._match_position:
self._match_position = i
self.match_changed()
def get_match_is_approx(self):
"""Returns whether the current match is approximate."""
return self._match_is_approx
def set_match_is_approx(self, approx):
"""Sets whether the current match is approximate
Fires match_changed() if the boolean value changes."""
approx = bool(approx)
if approx != self._match_is_approx:
self._match_is_approx = approx
self.match_changed()
def match_color(self, col, exact=False, order=None):
"""Moves the match position to the color closest to the argument.
:param col: The color to match.
:type col: lib.color.UIColor
:param exact: Only consider exact matches, and not near-exact or
approximate matches.
:type exact: bool
:param order: a search order to use. Default is outwards from the
match position, or in order if the match is unset.
:type order: sequence or iterator of integer color indices.
:returns: Whether the match succeeded.
:rtype: bool
By default, the matching algorithm favours exact or near-exact matches
which are close to the current position. If the current position is
unset, this search starts at 0. If there are no exact or near-exact
matches, a looser approximate match will be used, again favouring
matches with nearby positions.
>>> red2blue = RGBColor(1, 0, 0).interpolate(RGBColor(0, 1, 1), 5)
>>> p = Palette(colors=red2blue)
>>> p.match_color(RGBColor(0.45, 0.45, 0.45))
True
>>> p.match_position
2
>>> p.match_is_approx
True
>>> p[p.match_position]
<CAM16, v=53.0488, s=2.4757, h=209.5203, illuminant=95.0456, 100.0000, 108.9058>
>>> p.match_color(RGBColor(0.5, 0.5, 0.5))
True
>>> p.match_is_approx
False
>>> p.match_color(RGBColor(0.45, 0.45, 0.45), exact=True)
False
>>> p.match_color(RGBColor(0.5, 0.5, 0.5), exact=True)
True
Fires the ``match_changed()`` event when changes happen.
"""
if self.keep_position:
return False
if order is not None:
search_order = order
elif self.match_position is not None:
search_order = _outwards_from(len(self), self.match_position)
else:
search_order = xrange(len(self))
bestmatch_i = None
bestmatch_d = None
is_approx = True
if not isinstance(col, CAM16Color):
col = CAM16Color(color=col)
for i in search_order:
c = self._colors[i]
if c is self._EMPTY_SLOT_ITEM:
continue
# Closest exact or near-exact match by index distance (according to
# the search_order). Considering near-exact matches as equivalent
# to exact matches improves the feel of PaletteNext and
# PalettePrev.
if exact:
if c == col:
bestmatch_i = i
is_approx = False
break
else:
d = color_diff(col, c)
if c == col or d < 1.0:
bestmatch_i = i
is_approx = False
break
if bestmatch_d is None or d < bestmatch_d:
bestmatch_i = i
bestmatch_d = d
# If there are no exact or near-exact matches, choose the most similar
# color anywhere in the palette.
if bestmatch_i is not None:
self._match_position = bestmatch_i
self._match_is_approx = is_approx
self.match_changed()
return True
return False
def move_match_position(self, direction, refcol, group=False):
"""Move the match position in steps, matching first if needed.
:param direction: Direction for moving, positive or negative
:type direction: int:, ``1`` or ``-1``
:param refcol: Reference color, used for initial matching when needed.
:type refcol: lib.color.UIColor
:param group: Whether to loop over groups seperated by blank spaces
:type group: bool
:returns: the color newly matched, if the match position has changed
:rtype: lib.color.UIColor, or None
Invoking this method when there's no current match position will select
the color that's closest to the reference color, just like
`match_color()`
>>> greys = RGBColor(1,1,1).interpolate(RGBColor(0,0,0), 16)
>>> pal = Palette(colors=greys)
>>> refcol = RGBColor(0.5, 0.55, 0.45)
>>> pal.move_match_position(-1, refcol)
>>> pal.match_position
7
>>> pal.match_is_approx
True
When the current match is defined, but only an approximate match, this
method converts it to an exact match but does not change its position.
>>> pal.move_match_position(-1, refcol) is None
False
>>> pal.match_position
7
>>> pal.match_is_approx
False
When the match is initially exact, its position is stepped in the
direction indicated, either by +1 or -1. Blank palette entries are
skipped.
>>> pal.move_match_position(-1, refcol) is None
False
>>> pal.match_position
6
>>> pal.match_is_approx
False
Fires ``match_position_changed()`` and ``match_is_approx_changed()`` as
appropriate. The return value is the newly matched color whenever this
method produces a new exact match.
"""
# Normalize direction
direction = int(direction)
if direction < 0:
direction = -1
elif direction > 0:
direction = 1
else:
return None
# If nothing is selected, pick the closest match without changing
# the managed color.
old_pos = self._match_position
if old_pos is None:
self.match_color(refcol)
return None
# Otherwise, refine the match, or step it in the requested direction.
new_pos = None
if self._match_is_approx:
# Make an existing approximate match concrete.
new_pos = old_pos
else:
# Index reflects a close or identical match.
# Seek in the requested direction, skipping empty entries.
# Loop back around if to other end of array if needed.
# If group=True, stop within a segment surrounded by blanks
pos = old_pos
assert direction != 0
pos += direction
if group is False:
looped = 0
while looped < 2:
if pos == len(self._colors) and direction == 1:
pos = 0
looped += 1
if pos == -1 and direction == -1:
pos = len(self._colors) - 1
looped += 1
if self._colors[pos] is not self._EMPTY_SLOT_ITEM:
new_pos = pos
break
pos += direction
else:
if ((pos == len(self._colors) and direction == 1)
or (pos == -1 and direction == -1)):
return None
elif self._colors[pos] is not self._EMPTY_SLOT_ITEM:
new_pos = pos
# Update the palette index and the managed color.
result = None
if new_pos is not None:
col = self._colors[new_pos]
if col is not self._EMPTY_SLOT_ITEM:
result = self._copy_color_out(col)
self.set_match_position(new_pos)
self.set_match_is_approx(False)
return result
## Property-style access for setters and getters
columns = property(get_columns, set_columns)
name = property(get_name, set_name)
match_position = property(get_match_position, set_match_position)
match_is_approx = property(get_match_is_approx, set_match_is_approx)
## Color access
def _copy_color_out(self, col):
if col is self._EMPTY_SLOT_ITEM:
return None
result = col
result.__name = col.__name
return result
def _copy_color_in(self, col, name=None):
if col is self._EMPTY_SLOT_ITEM or col is None:
result = self._EMPTY_SLOT_ITEM
else:
if name is None:
try:
name = col.__name
except AttributeError:
pass
if name is not None:
name = unicode(name)
if not isinstance(col, CAM16Color):
result = CAM16Color(color=col)
else:
result = col
result.__name = name
return result
def append(self, col, name=None, unique=False, match=False):
"""Appends a color, optionally setting a name for it.
:param col: The color to append.
:param name: Name of the color to insert.
:param unique: If true, don't append if the color already exists
in the palette. Only exact matches count.
:param match: If true, set the match position to the
appropriate palette entry.
"""
col = self._copy_color_in(col, name)
if unique:
# Find the final exact match, if one is present
for i in xrange(len(self._colors)-1, -1, -1):
if col == self._colors[i]:
if match:
self._match_position = i
self._match_is_approx = False
self.match_changed()
return
# Append new color, and select it if requested
end_i = len(self._colors)
self._colors.append(col)
if match:
self._match_position = end_i
self._match_is_approx = False
self.match_changed()
self.sequence_changed()
def insert(self, i, col, name=None):
"""Inserts a color, setting an optional name for it.
:param i: Target index. `None` indicates appending a color.
:param col: Color to insert. `None` indicates an empty slot.
:param name: Name of the color to insert.
>>> grey16 = RGBColor(1, 1, 1).interpolate(RGBColor(0, 0, 0), 16)
>>> p = Palette(colors=grey16)
>>> p.insert(5, RGBColor(1, 0, 0), name="red")
>>> p
<Palette colors=17, columns=0, name=None>
>>> p[5]
<CAM16, v=55.9620, s=104.0363, h=27.4858, illuminant=95.0456, 100.0000, 108.9058>
Fires the `sequence_changed()` event. If the match position changes as
a result, `match_changed()` is fired too.
"""
col = self._copy_color_in(col, name)
if i is None:
self._colors.append(col)
else:
self._colors.insert(i, col)
if self.match_position is not None:
if self.match_position >= i:
self.match_position += 1
self.sequence_changed()
def reposition(self, src_i, targ_i):
"""Moves a color, or copies it to empty slots, or moves it the end.
:param src_i: Source color index.
:param targ_i: Source color index, or None to indicate the end.
This operation performs a copy if the target is an empty slot, and a
remove followed by an insert if the target slot contains a color.
>>> grey16 = RGBColor(1, 1, 1).interpolate(RGBColor(0, 0, 0), 16)
>>> p = Palette(colors=grey16)
>>> p[5] = None # creates an empty slot
>>> p.match_position = 8
>>> p[5] == p[0]
False
>>> p.reposition(0, 5)
>>> p[5] == p[0]
True
>>> p.match_position
8
>>> p[5] = RGBColor(1, 0, 0)
>>> p.reposition(14, 5)
>>> p.match_position # continues pointing to the same color
9
>>> len(p) # repositioning doesn't change the length
16
Fires the `color_changed()` event for copies to empty slots, or
`sequence_changed()` for moves. If the match position changes as a
result, `match_changed()` is fired too.
"""
assert src_i is not None
if src_i == targ_i:
return
try:
col = self._colors[src_i]
assert col is not None # just in case we change the internal repr
except IndexError:
return
# Special case: just copy if the target is an empty slot
match_pos = self.match_position
if targ_i is not None:
targ = self._colors[targ_i]
if targ is self._EMPTY_SLOT_ITEM:
self._colors[targ_i] = self._copy_color_in(col)
self.color_changed(targ_i)
# Copying from the matched color moves the match position.
# Copying to the match position clears the match.
if match_pos == src_i:
self.match_position = targ_i
elif match_pos == targ_i:
self.match_position = None
return
# Normal case. Remove...
self._colors.pop(src_i)
moving_match = False
updated_match = False
if match_pos is not None:
# Moving rightwards. Adjust for the pop().
if targ_i is not None and targ_i > src_i:
targ_i -= 1
# Similar logic for the match position, but allow it to follow
# the move if it started at the src position.
if match_pos == src_i:
match_pos = None
moving_match = True
updated_match = True
elif match_pos > src_i:
match_pos -= 1
updated_match = True
# ... then append or insert.
if targ_i is None:
self._colors.append(col)
if moving_match:
match_pos = len(self._colors) - 1
updated_match = True
else:
self._colors.insert(targ_i, col)
if match_pos is not None:
if moving_match:
match_pos = targ_i
updated_match = True
elif match_pos >= targ_i:
match_pos += 1
updated_match = True
# Announce changes
self.sequence_changed()
if updated_match:
self.match_position = match_pos
self.match_changed()
def pop(self, i):
"""Removes a color, returning it.
Fires the `match_changed()` event if the match index changes as a
result of the removal, and `sequence_changed()` if a color was removed,
prior to its return.
"""
i = int(i)
try:
col = self._colors.pop(i)
except IndexError:
return
if self.match_position == i:
self.match_position = None
elif self.match_position > i:
self.match_position -= 1
self.sequence_changed()
return self._copy_color_out(col)
def get_color(self, i):
"""Looks up a color by its list index."""
if i is None:
return None
try:
col = self._colors[i]
return self._copy_color_out(col)
except IndexError:
return None
def __getitem__(self, i):
return self.get_color(i)
def __setitem__(self, i, col):
self._colors[i] = self._copy_color_in(col, None)
self.color_changed(i)
## Color name access
def get_color_name(self, i):
"""Looks up a color's name by its list index."""
try:
col = self._colors[i]
except IndexError:
return
if col is self._EMPTY_SLOT_ITEM:
return
return col.__name
def set_color_name(self, i, name):
"""Sets a color's name by its list index."""
try:
col = self._colors[i]
except IndexError:
return
if col is self._EMPTY_SLOT_ITEM:
return
col.__name = name
self.color_changed(i)
def get_color_by_name(self, name):
"""Looks up the first color with the given name.
>>> pltt = Palette()
>>> pltt.append(RGBColor(1,0,1), "Magenta")
>>> pltt.get_color_by_name("Magenta")
<CAM16, v=63.8320, s=96.7099, h=334.4049, illuminant=95.0456, 100.0000, 108.9058>
"""
for col in self:
if col.__name == name:
return col
def __iter__(self):
return self.iter_colors()
def iter_colors(self):
"""Iterates across the palette's colors."""
for col in self._colors:
if col is self._EMPTY_SLOT_ITEM:
yield None
else:
yield col
## Observable events
@event
def info_changed(self):
"""Event: palette name, or number of columns was changed."""
@event
def match_changed(self):
"""Event: either match position or match_is_approx was updated."""
@event
def sequence_changed(self):
"""Event: the color ordering or palette length was changed."""
@event
def color_changed(self, i):
"""Event: the color in the given slot, or its name, was modified."""
## Dumping and cloning
def __unicode__(self):
"""Py2-era serialization as a Unicode string.
Used by the Py3 __str__() while we are in transition.
"""
result = u"GIMP Palette\n"
if self._name is not None:
result += u"Name: %s\n" % self._name
if self._columns > 0:
result += u"Columns: %d\n" % self._columns
result += u"#\n"
for col in self._colors:
if col is self._EMPTY_SLOT_ITEM:
col_name = self._EMPTY_SLOT_NAME
r = g = b = 0
else:
col_name = col.__name
# get sRGB D65 RGB values
col.illuminant = None
col.limit_purity = None
col.cachedrgb = None
r, g, b = [clamp(int(c*0xff), 0, 0xff) for c in col.get_rgb()]
if col_name is None:
result += u"%d %d %d\n" % (r, g, b)
else:
result += u"%d %d %d %s\n" % (r, g, b, col_name)
return result
def __str__(self):
"""Py3: serialize as str (=Unicode). Py2: as bytes (lossy!)."""
s = self.__unicode__()
if not PY3:
s = s.encode("utf-8", errors="replace")
return s
def __copy__(self):
clone = Palette()
clone.set_name(self.get_name())
clone.set_columns(self.get_columns())
for col in self._colors:
if col is self._EMPTY_SLOT_ITEM:
clone.append(None)
else:
clone.append(copy(col), col.__name)
return clone
def __deepcopy__(self, memo):
return self.__copy__()
def __repr__(self):
return "<Palette colors=%d, columns=%d, name=%r>" % (
len(self._colors),
self._columns,
self._name,
)
## Conversion to/from simple dict representation
def to_simple_dict(self):
"""Converts the palette to a simple dict form used in the prefs."""
simple = {}
simple["name"] = self.get_name()
simple["columns"] = self.get_columns()
entries = []
for col in self.iter_colors():
if col is None:
entries.append(None)
else:
name = col.__name
entries.append(((col.v, col.s, col.h), name))
simple["entries"] = entries
return simple
@classmethod
def new_from_simple_dict(cls, simple):
"""Constructs and returns a palette from the simple dict form."""
pal = cls()
pal.set_name(simple.get("name", None))
pal.set_columns(simple.get("columns", None))
for entry in simple.get("entries", []):
if entry is None:
pal.append(None)
else:
s, name = entry
# convert old format to CAM16
if "#" in s:
col = CAM16Color(color=RGBColor.new_from_hex_str(s))
else:
col = CAM16Color(vsh=s)
pal.append(col, name)
return pal
## Helper functions
def _outwards_from(n, i):
"""Search order within the palette, outwards from a given index.
Defined for a sequence of len() `n`, outwards from index `i`.
"""
assert i < n and i >= 0
yield i
for j in xrange(n):
exhausted = True
if i - j >= 0:
yield i - j
exhausted = False
if i + j < n:
yield i + j
exhausted = False
if exhausted:
break
## Module testing
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
import doctest
doctest.testmod()
| gpl-2.0 | 846,482,513,551,410,600 | 33.093886 | 91 | 0.535767 | false |
enriquepablo/terms.server | setup.py | 1 | 2240 | # Copyright (c) 2007-2012 by Enrique Pérez Arnaud <[email protected]>
#
# This file is part of the terms project.
# https://github.com/enriquepablo/terms
#
# The terms project is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The terms project is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with any part of the terms project.
# If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
VERSION = '0.1.0a2'
setup(
name = 'terms.server',
version = VERSION,
author = 'Enrique Pérez Arnaud',
author_email = '[email protected]',
url = 'http://pypi.python.org/terms.server',
license = 'GNU GENERAL PUBLIC LICENSE Version 3',
description = 'Terms services',
long_description = (open('README.rst').read() +
'\n' + open('INSTALL.rst').read()) +
'\n' + open('SUPPORT.rst').read(),
packages = find_packages(),
namespace_packages = ['terms'],
test_suite = 'nose.collector',
include_package_data = True,
entry_points = {
'console_scripts': [
'webserver = terms.server.scripts.webserver:serve',
'client = terms.server.scripts.client:client',
'initterms = terms.server.scripts.initialize:init_terms',
],
},
tests_require = [
'nose == 1.3.7',
'coverage == 4.3.4',
],
extras_require = {
'PG': ['psycopg2 == 2.7.1',],
},
install_requires = [
'setuptools==34.3.3',
'bottle==0.12.13',
'repoze.who==2.3',
'gevent==1.2.1',
'gevent-websocket==0.10.1',
'py-bcrypt==0.4',
'sqlalchemy==1.1.7',
'colander==1.3.2',
'colanderalchemy==0.3.3',
'deform==2.0.4',
'mako==1.0.6',
],
)
| gpl-3.0 | 3,798,885,400,614,978,000 | 31.434783 | 75 | 0.60992 | false |
dames57/multimarkdown_reader | mmd_reader.py | 1 | 1283 | import subprocess
from pelican import signals
from pelican.readers import BaseReader
from pelican.utils import pelican_open
class MmdReader(BaseReader):
enabled = True
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def read(self, filename):
with pelican_open(filename) as fp:
text = list(fp.splitlines())
metadata = {}
for i, line in enumerate(text):
kv = line.split(':', 1)
if len(kv) == 2:
name, value = kv[0].lower(), kv[1].strip()
metadata[name] = self.process_metadata(name, value)
else:
content = "\n".join(text[i:])
break
mmd_cmd = ["multimarkdown"]
proc = subprocess.Popen(mmd_cmd,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE)
output = proc.communicate(content.encode('utf-8'))[0].decode('utf-8')
status = proc.wait()
if status:
raise subprocess.CalledProcessError(status, mmd_cmd)
return output, metadata
def add_reader(readers):
for ext in MmdReader.file_extensions:
readers.reader_classes[ext] = MmdReader
def register():
signals.readers_init.connect(add_reader)
| agpl-3.0 | -7,180,659,168,199,426,000 | 29.547619 | 77 | 0.56742 | false |
beproud/bpcommons | beproud/django/commons/templatetags/compat.py | 1 | 5776 | import string
import re
import json
import six
from django.utils.encoding import force_text
from django.core.serializers.json import DjangoJSONEncoder
try:
from django.utils.functional import keep_lazy # Django-1.8 doesn't have it.
except ImportError:
# allow_lazy has been deprecated at Django-1.10, will be removed at 2.0
from django.utils.functional import allow_lazy as keep_lazy
# copy from beproud.utils.strutils.abbrev
def abbrev(s, num=255, end="..."):
"""
文章を要約する
返す文字列の長さは、num以上にならないのを保証します。
>>> abbrev('spamspamspam', 6)
'spa...'
>>> abbrev('spamspamspam', 12)
'spamspamspam'
>>> abbrev('eggseggseggs', 1)
'e'
>>> abbrev('eggseggseggs', 2)
'eg'
>>> abbrev('eggseggseggs', 3)
'egg'
>>> abbrev('eggseggseggs', 4)
'e...'
>>> abbrev('eggseggseggs', 2, '.')
'e.'
"""
index = num - len(end)
if len(s) > num:
s = (s[:index] + end) if index > 0 else s[:num]
return s
# copy from beproud.utils.html.urlize
def escape(html):
"""
Returns the given HTML with ampersands, quotes and angle brackets encoded.
"""
return (force_text(html).replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", '''))
# copy from beproud.utils.html.urlize
HTTP_SCHEME_RE = 'http[s]*'
# See: http://www.ietf.org/rfc/rfc1738.txt
URL_SAFE = "$-_.+"
URL_EXTRA = "!*'(),"
URL_PATH_RESERVED = ';?'
URL_QUERY_RESERVED = '#'
URL_OTHER_RESERVED = ':@&=/'
URL_RESERVED = URL_PATH_RESERVED + URL_QUERY_RESERVED + URL_OTHER_RESERVED
URL_ESCAPE = '%'
URL_ALNUM = string.ascii_letters + string.digits
URL_PATH_VALID_CHARS = URL_ALNUM + URL_SAFE + URL_EXTRA + URL_OTHER_RESERVED + URL_ESCAPE
URL_QUERY_VALID_CHARS = URL_ALNUM + URL_SAFE + URL_EXTRA + URL_OTHER_RESERVED + URL_PATH_RESERVED + URL_ESCAPE
URL_FRAGMENT_VALID_CHARS = URL_ALNUM + URL_SAFE + URL_EXTRA + URL_RESERVED + URL_ESCAPE
# 0-65535
# See: http://www.regular-expressions.info/numericranges.html
PORT_RE = "%s" % "|".join([
"6553[0-5]",
"655[0-2][0^9]",
"65[0-4][0-9][0-9]",
"6[0-4][0-9][0-9][0-9]",
"[1-5][0-9][0-9][0-9][0-9]",
"[1-9][0-9][0-9][0-9]",
"[1-9][0-9][0-9]",
"[1-9][0-9]",
"[1-9]",
])
# See: http://www.shauninman.com/archive/2006/05/08/validating_domain_names
# See: http://www.iana.org/domains/root/db/
DOMAIN_RE = '(?:[a-z0-9](?:[-a-z0-9]*[a-z0-9])?\\.)+(?:(?:aero|arpa|a[cdefgilmnoqrstuwxz])|(?:cat|com|coop|b[abdefghijmnorstvwyz]|biz)|(?:c[acdfghiklmnorsuvxyz])|d[ejkmoz]|(?:edu|e[ceghrstu])|f[ijkmor]|(?:gov|g[abdefghilmnpqrstuwy])|h[kmnrtu]|(?:info|int|i[delmnoqrst])|(?:jobs|j[emop])|k[eghimnprwyz]|l[abcikrstuvy]|(?:mil|mobi|museum|m[acdghklmnopqrstuvwxyz])|(?:name|net|n[acefgilopruz])|(?:om|org)|(?:pro|p[aefghklmnrstwy])|qa|r[eouw]|s[abcdeghijklmnortvyz]|(?:travel|t[cdfghjklmnoprtvwz])|u[agkmsyz]|v[aceginu]|w[fs]|y[etu]|z[amw])'
# See: http://www.regular-expressions.info/regexbuddy/ipaccurate.html
IP_ADDRESS_RE = '(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
# Domain or IP address
IP_DOMAIN_RE = '(%s)|(%s)' % (DOMAIN_RE, IP_ADDRESS_RE)
# Domain or IP address with port number
URL_DOMAIN_RE = '(?:%s)(?::(%s))?' % (IP_DOMAIN_RE, PORT_RE)
URL_RE = r'(%s)\:\/\/(%s)(/[%s]*)?(?:\?([%s]*))?(?:\#([%s]*))?' % (
HTTP_SCHEME_RE,
URL_DOMAIN_RE,
re.escape(URL_PATH_VALID_CHARS),
re.escape(URL_QUERY_VALID_CHARS),
re.escape(URL_FRAGMENT_VALID_CHARS),
)
URL_RE_CMP = re.compile(URL_RE)
URLIZE_TMPL = '<a href="%(link_url)s"%(attrs)s>%(link_text)s</a>'
# copy from beproud.utils.html.urlize
def urlize(text, trim_url_limit=None, attrs={}, url_re=URL_RE_CMP, autoescape=False):
"""text内URLを抽出してアンカータグで囲む
URLのデリミタは半角カンマ、<>(エスケープ済み含む)、\s、全角スペース、行末で、これらが末尾にマッチしない場合はURLとして認識しません。
URL部分は.+の最小マッチ、もしくはtrim_url_limitが指定された場合は{,trim_url_limit}の最小マッチとなります。
-args
text: urlize対象文字列
trim_url_limit: urlとして認識する文字数に上限を設ける場合は数値をセット
nofollow: Trueを与えるとタグにrel="nofollow"を付加
autoescape: Trueを与えるとタグエスケープを行います。
"""
if autoescape:
text = escape(text)
def _repl(m):
return URLIZE_TMPL % {
"link_url": m.group(),
"attrs": "".join(map(lambda x: ' %s="%s"' % x, attrs.items())),
"link_text": (abbrev(m.group(), trim_url_limit)
if trim_url_limit is not None else m.group()),
}
return url_re.sub(_repl, text)
# copy from django.utils.html.strip_entities(). it was removed
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = keep_lazy(strip_entities, six.text_type)
# copy from bputils: beproud.utils.javascript
JS_CONVERT_TYPES = {
'bool': bool,
'int': int,
'string': str,
'array': list,
}
# copy from bputils: beproud.utils.javascript
def force_js(value, typename=None, encoder=None):
"""
Changes a python value to javascript for use in templates
"""
if typename:
typename = typename.lower()
if typename in JS_CONVERT_TYPES:
value = JS_CONVERT_TYPES[typename](value)
return json.dumps(value, cls=(encoder or DjangoJSONEncoder))
| bsd-2-clause | -7,532,070,724,396,816,000 | 31.557576 | 537 | 0.609643 | false |
maartenbreddels/vaex | packages/vaex-ui/vaex/ui/plugin/dispersions.py | 1 | 11670 | __author__ = 'maartenbreddels'
import functools
import matplotlib.patches as patches
import numpy as np
import matplotlib.artist as artist
import vaex.ui.plugin
from vaex.ui.qt import *
import logging
logger = logging.getLogger("plugin.dispersions")
import matplotlib.transforms as transforms
from matplotlib.path import Path
class DispersionEllipse(patches.Patch):
"""
This ellipse has it's center in user coordinates, and the width and height in device coordinates
such that is is not deformed
"""
def __str__(self):
return "DispersionEllipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
#@docstring.dedent_interpd
def __init__(self, xy, width, height, scale=1.0, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
patches.Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.scale = scale
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.width #self.convert_xunits(self.width)
height = self.height #self.convert_yunits(self.height)
trans = artist.Artist.get_transform(self)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5 * self.scale, height * 0.5* self.scale) \
.rotate_deg(self.angle) \
.translate(*trans.transform(center))
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform()
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
class DispersionPlugin(vaex.ui.plugin.PluginLayer):
name = "dispersion"
def __init__(self, parent, layer):
super(DispersionPlugin, self).__init__(parent, layer)
dialog.plug_page(self.plug_page, "Dispersions", 2.25, 1.0)
dialog.plug_grids(self.define_grids, self.draw_grids)
def define_grids(self, grids):
#grids.define_grid("counts_vector", self.dialog.gridsize_vector, "VZ*0+1")
# covariance matrix terms
# diagonals
for dimension in range(self.dialog.dimensions):
axis_name = self.dialog.axisnames[dimension].lower()
expression = self.expressions[dimension].strip()
if len(expression) > 0:
grids.define_grid(axis_name + "_mom1", self.dialog.vector_grid_size, expression)
grids.define_grid(axis_name + "_mom2", self.dialog.vector_grid_size, "(" + expression + ")**2")
else:
grids.define_grid(axis_name + "_mom1", self.dialog.vector_grid_size, None)
grids.define_grid(axis_name + "_mom2", self.dialog.vector_grid_size, None)
if 1:
for dimension1 in range(self.dialog.dimensions):
for dimension2 in range(dimension1+1, self.dialog.dimensions):
axis_name1 = self.dialog.axisnames[dimension1].lower()
axis_name2 = self.dialog.axisnames[dimension2].lower()
expression1 = self.expressions[dimension1].strip()
expression2 = self.expressions[dimension2].strip()
if len(expression1) > 0 and len(expression2) > 0:
grids.define_grid("cov_" + axis_name1 +"_" +axis_name2, self.dialog.vector_grid_size, "(" + expression1 + ")*(" + expression2 +")")
def draw_grids(self, axes, grid_map, grid_map_vector):
if not self.dispersions_draw:
return
self.ellipses = []
dispersions = []
counts = grid_map_vector["counts"]
#print "counts check", np.sum(counts), np.sum(grid_map["counts"])
#print counts
#print grid_map_vector.keys()
if self.dialog.dimensions == 2:
axis_name1 = self.dialog.axisnames[0].lower()
axis_name2 = self.dialog.axisnames[1].lower()
if len(self.expressions[0]) > 0 and len(self.expressions[1]) > 0:
meanx = grid_map_vector[axis_name1 + "_mom1"]/counts
meany = grid_map_vector[axis_name2 + "_mom1"]/counts
varx = grid_map_vector[axis_name1 + "_mom2"]/counts
vary = grid_map_vector[axis_name2 + "_mom2"]/counts
covxy = grid_map_vector["cov_" +axis_name1 + "_" +axis_name2]/counts - meanx*meany
sigmax = (varx-meanx**2)**0.5
sigmay = (vary-meany**2)**0.5
mask = counts > 0
x = grid_map_vector["x"]
y = grid_map_vector["y"]
x, y = np.meshgrid(x, y)
vmax = np.nanmax(np.sqrt(sigmax.reshape(-1)**2 + sigmay.reshape(-1)**2))
width, height = self.dialog.canvas.get_width_height()
#print "width,height", width, height
max_size = min(width, height) / float(self.dialog.vector_grid_size)# * 0.9
#print max_size
#identity_transform = matplotlib.transforms.IdentityTransform()
#deltax = self.dialog.ranges_show[0][1] - self.dialog.ranges_show[0][0]
#deltay = self.dialog.ranges_show[1][1] - self.dialog.ranges_show[1][0]
#aspect = deltay / float(height) / (deltax/float(width))
#for grid in [x, y, sigmax, sigmay, covxy, counts, mask]:
# print grid.shape
for x, y, sigmax, sigmay, covxy in zip(x[mask].reshape(-1), y[mask].reshape(-1), sigmax[mask].reshape(-1), sigmay[mask].reshape(-1), covxy[mask].reshape(-1)):
try:
covmatrix = [[sigmax**2, covxy], [covxy, sigmay**2]]
eigen_values, eigen_vectors = np.linalg.eig(covmatrix)
except:
pass
else:
scaling = 1./vmax * max_size
device_width = (np.sqrt(np.max(eigen_values)) * scaling)
device_height = (np.sqrt(np.min(eigen_values)) * scaling)
if self.dispersions_unit_length:
length = np.sqrt(device_width**2+device_height**2)
device_width /= float(length) / max_size
device_height /= float(length) / max_size
#ellipse_width = np.sqrt(np.max(eigen_values)) * scaling / width * deltax
#ellipse_height = np.sqrt(np.min(eigen_values)) * scaling / height * deltay
#ellipse_height /= aspect
if sigmax < sigmay: # if x was smaller, the largest eigenvalue corresponds to the y value
device_width, device_height = device_height, device_width
#ellipse_width, ellipse_height = ellipse_height, ellipse_width
#ellipse_height /= aspect
angle = np.arctan(2*covxy / (sigmax**2-sigmay**2))/2.
#angle2 = np.arctan(2*covxy / (sigmax**2-sigmay**2))/2.
#angle = angle2 = 0
#print aspect, sigmax, sigmay, sigmax/sigmay, covxy/(sigmax*sigmay), ellipse_width/ellipse_height
#aspect = 0.1
#m = [[np.cos(angle2), np.sin(angle2)*aspect], [-np.sin(angle2), np.cos(angle2)*aspect]]
#ellipse_width, ellipse_height = np.dot(m, [ellipse_width, ellipse_height])
#print covxy/(sigmax*sigmay), angle, sigmax, sigmay, covxy
#device_x, device_y = axes.transData.transform((x, y))
#print device_x, device_y, device_width, device_height
#ellipse = patches.Ellipse(xy=(device_x, device_y), width=device_width, height=device_height, angle=angle, transform=identity_transform,
# alpha=0.4, color="blue") #rand()*360
#ellipse = patches.Ellipse(xy=(x, y), width=ellipse_width, height=ellipse_height, angle=np.degrees(angle),
# alpha=0.4, color="blue") #rand()*360
ellipse = DispersionEllipse(xy=(x, y), width=device_width, height=device_height, angle=np.degrees(angle), scale=self.scale_dispersion,
alpha=0.4, facecolor="green", edgecolor="black") #rand()*360
axes.add_artist(ellipse)
self.ellipses.append(ellipse)
#axes.quiver()
#[Ellipse(xy=rand(2)*10, width=rand(), height=rand(), angle=rand()*360)
#@staticmethod
#def useon(dialog_class):
# return issubclass(dialog_class, vaex.plot_windows.VolumeRenderingPlotDialog)
def plug_page(self, page):
layout = self.layout = QtGui.QGridLayout()
page.setLayout(self.layout)
layout.setSpacing(0)
layout.setContentsMargins(0,0,0,0)
layout.setAlignment(QtCore.Qt.AlignTop)
row = 0
self.dispersions_draw = bool(eval(self.dialog.options.get("disp_draw", "True")))
def setter(value):
self.dispersions_draw = value
self.dialog.plot()
self.dispersions_draw_checkbox = self.dialog.create_checkbox(page, "Draw dispersion tensors", lambda : self.dispersions_draw, setter)
layout.addWidget(self.dispersions_draw_checkbox, row, 1)
row += 1
self.dispersions_unit_length = bool(eval(self.dialog.options.get("disp_unit", "False")))
def setter(value):
self.dispersions_unit_length = value
self.dialog.plot()
self.dispersions_unit_lengthcheckbox = self.dialog.create_checkbox(page, "Unit length", lambda : self.dispersions_unit_length, setter)
layout.addWidget(self.dispersions_unit_lengthcheckbox, row, 1)
row += 1
self.expressions = []
self.expression_boxes = []
for dimension in range(self.dialog.dimensions):
axis_name = self.dialog.axisnames[dimension]
expression_box = QtGui.QComboBox(page)
expression_box.setEditable(True)
expression_box.setMinimumContentsLength(10)
self.expression_boxes.append(expression_box)
self.layout.addWidget(QtGui.QLabel(axis_name + '-axis:', page), row, 0)
self.layout.addWidget(expression_box, row, 1, QtCore.Qt.AlignLeft)
expression = self.dialog.options.get("disp"+axis_name.lower(), "")
expression_box.lineEdit().setText(expression)
self.expressions.append(expression)
#self.onExpressionChangedPartials.append()
#expression_box.lineEdit().editingFinished.connect(self.onExpressionChangedPartials[axisIndex])
calllback = functools.partial(self.onExpressionChanged, axis_index=dimension)
expression_box.lineEdit().editingFinished.connect(calllback)
row += 1
self.scale_dispersion = eval(self.dialog.options.get("disp_scale", "1"))
def setter(value):
self.scale_dispersion = value
for ellipse in self.ellipses:
ellipse.scale = self.scale_dispersion
self.dialog.canvas.draw()
#self.dialog.plot()
self.scale_dispersion_label, self.scale_dispersion_slider, self.scale_dispersion_value_label =\
self.dialog.create_slider(page, "scale: ", 1./100, 100., lambda : self.scale_dispersion, setter, format=" {0:>05.2f}", transform=lambda x: 10**x, inverse=lambda x: np.log10(x))
layout.addWidget(self.scale_dispersion_label, row, 0)
layout.addWidget(self.scale_dispersion_slider, row, 1)
layout.addWidget(self.scale_dispersion_value_label, row, 2)
row += 1
def onExpressionChanged(self, _=None, axis_index=-1):
text = str(self.expression_boxes[axis_index].lineEdit().text())
logger.debug("text set for axis %i: %s" % (axis_index, text))
if text != self.expressions[axis_index]:
axis_name = self.dialog.axisnames[axis_index].lower()
self.expressions[axis_index] = text
if text == "": # check if we can replot without doing the whole calculation
self.dialog.plot()
else:
non_empty = [k for k in self.expressions if len(k) > 0]
if len(non_empty) == len(self.expressions):
self.dialog.compute()
self.dialog.jobsManager.execute()
else:
logger.debug("nothing changed")
| mit | 1,899,751,465,761,447,200 | 38.0301 | 180 | 0.68329 | false |
susurrant-audio/scdown | tests/test_neo.py | 1 | 3011 | from scdown.neo import (Neo,
NODE_USER,
NODE_TRACK,
NODE_COMMENT,
NODE_PROFILE,
REL_FOLLOWS,
REL_UPLOADED,
REL_FAVORITED,
REL_HAS_PROFILE,
REL_WROTE,
REL_REFERS_TO)
from py2neo import Graph
from itertools import product
from nose.tools import with_setup
TEST_DB = "http://127.0.0.1:8585/db/data/"
graph = Graph(TEST_DB)
neo = Neo(graph)
def setup_func():
pass
def teardown_func():
graph.delete_all()
datum = {"id": 1, "name": "Me"}
datum2 = dict(datum)
nested = {"new": {"data": True, "deeply": "nested"}}
datum2["novum"] = nested
def test_deflate():
flat = neo.deflate(datum2)
# adds keys due to nesting
assert len(flat) == len(datum2) + 1
# idempotent
assert flat == neo.deflate(flat)
@with_setup(setup_func, teardown_func)
def test_create_or_update_node():
datum = {"id": 1, "name": "Me"}
datum1 = dict(datum)
datum1["color"] = "red"
node = neo.create_or_update_node(NODE_USER, datum)
assert node.exists
assert NODE_USER in node.labels
node2 = neo.create_or_update_node(NODE_USER, datum1)
assert node.ref == node2.ref
assert node.properties == datum1
@with_setup(setup_func, teardown_func)
def test_node_types():
nodes = set()
for n in [NODE_USER, NODE_COMMENT, NODE_PROFILE, NODE_TRACK]:
node = neo.create_or_update_node(n, datum)
nodes.add(node)
assert len(nodes) == 4
@with_setup(setup_func, teardown_func)
def test_relation_types():
nodes = {}
acceptable = set(
[(NODE_USER, REL_HAS_PROFILE, NODE_PROFILE),
(NODE_USER, REL_FOLLOWS, NODE_USER),
(NODE_USER, REL_UPLOADED, NODE_TRACK),
(NODE_USER, REL_FAVORITED, NODE_TRACK),
(NODE_USER, REL_WROTE, NODE_COMMENT),
(NODE_COMMENT, REL_REFERS_TO, NODE_TRACK)])
accepted = set()
rel_types = [REL_FOLLOWS,
REL_UPLOADED,
REL_FAVORITED,
REL_HAS_PROFILE,
REL_WROTE,
REL_REFERS_TO]
for n in [NODE_USER, NODE_COMMENT, NODE_PROFILE, NODE_TRACK]:
node = neo.create_or_update_node(n, datum)
nodes[n] = node
combos = product(nodes.items(), repeat=2)
for c1, c2 in (tuple(prod) for prod in combos):
k1, n1 = c1
k2, n2 = c2
for r in rel_types:
try:
neo.mk_relation(n1, r, n2)
accepted.add((k1, r, k2))
except AssertionError:
pass
assert acceptable == accepted
@with_setup(setup_func, teardown_func)
def test_nested_properties():
node = neo.create_or_update_node(NODE_COMMENT, datum2)
assert node.exists
assert "novum" in node.properties
assert node.properties["__json_novum"]
assert neo.inflate(node.properties) == datum2
| mit | 1,209,110,973,062,349,000 | 27.951923 | 65 | 0.562936 | false |
edx/ecommerce | ecommerce/extensions/checkout/utils.py | 1 | 2905 |
import logging
from urllib import parse
from babel.numbers import format_currency as default_format_currency
from django.conf import settings
from django.urls import reverse
from django.utils.translation import get_language, to_locale
from requests.exceptions import ConnectionError as ReqConnectionError
from requests.exceptions import Timeout
from slumber.exceptions import SlumberHttpBaseException
logger = logging.getLogger(__name__)
def get_credit_provider_details(credit_provider_id, site_configuration):
""" Returns the credit provider details from LMS.
Args:
credit_provider_id (str): Identifier for the provider
site_configuration (SiteConfiguration): Ecommerce Site Configuration
Returns: dict
"""
try:
return site_configuration.credit_api_client.providers(credit_provider_id).get()
except (ReqConnectionError, SlumberHttpBaseException, Timeout):
logger.exception('Failed to retrieve credit provider details for provider [%s].', credit_provider_id)
return None
def get_receipt_page_url(site_configuration, order_number=None, override_url=None, disable_back_button=False):
""" Returns the receipt page URL.
Args:
order_number (str): Order number
site_configuration (SiteConfiguration): Site Configuration containing the flag for enabling Otto receipt page.
override_url (str): New receipt page to override the default one.
disable_back_button (bool): Whether to disable the back button from receipt page. Defaults to false as the
receipt page is referenced in emails/etc., and we only want to disable the back button from the receipt
page if the user has gone through the payment flow.
Returns:
str: Receipt page URL.
"""
if override_url:
return override_url
url_params = {}
if order_number:
url_params['order_number'] = order_number
if disable_back_button:
url_params['disable_back_button'] = int(disable_back_button)
base_url = site_configuration.build_ecommerce_url(reverse('checkout:receipt'))
params = parse.urlencode(url_params)
return '{base_url}{params}'.format(
base_url=base_url,
params='?{params}'.format(params=params) if params else ''
)
def format_currency(currency, amount, format=None, locale=None): # pylint: disable=redefined-builtin
locale = locale or to_locale(get_language())
format = format or getattr(settings, 'OSCAR_CURRENCY_FORMAT', None)
return default_format_currency(
amount,
currency,
format=format,
locale=locale
)
def add_currency(amount):
""" Adds currency to the price amount.
Args:
amount (Decimal): Price amount
Returns:
str: Formatted price with currency.
"""
return format_currency(settings.OSCAR_DEFAULT_CURRENCY, amount, u'#,##0.00')
| agpl-3.0 | -5,248,878,143,881,827,000 | 33.176471 | 118 | 0.703959 | false |
marionleborgne/nupic | tests/swarming/nupic/swarming/experiments/dummyV2/description.py | 1 | 15299 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htmpredictionmodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'address': { 'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21},
'gym': { 'fieldname': u'gym',
'n': 600,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : {u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': "test data",
u'source': "file://swarming/test_data.csv"}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 | 8,030,776,921,436,739,000 | 37.928753 | 110 | 0.589973 | false |
ddico/odoo | addons/sale/tests/test_reinvoice.py | 2 | 14326 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale.tests.test_sale_common import TestCommonSaleNoChart
from odoo.tests import Form
class TestReInvoice(TestCommonSaleNoChart):
@classmethod
def setUpClass(cls):
super(TestReInvoice, cls).setUpClass()
cls.setUpExpenseProducts()
cls.setUpAdditionalAccounts()
cls.setUpAccountJournal()
cls.analytic_account = cls.env['account.analytic.account'].create({
'name': 'Test AA',
'code': 'TESTSALE_REINVOICE',
'company_id': cls.partner_customer_usd.company_id.id,
'partner_id': cls.partner_customer_usd.id
})
cls.sale_order = cls.env['sale.order'].with_context(mail_notrack=True, mail_create_nolog=True).create({
'partner_id': cls.partner_customer_usd.id,
'partner_invoice_id': cls.partner_customer_usd.id,
'partner_shipping_id': cls.partner_customer_usd.id,
'analytic_account_id': cls.analytic_account.id,
})
cls.AccountMove = cls.env['account.move'].with_context(default_move_type='in_invoice', mail_notrack=True, mail_create_nolog=True)
def test_at_cost(self):
""" Test vendor bill at cost for product based on ordered and delivered quantities. """
# create SO line and confirm SO (with only one line)
sale_order_line1 = self.env['sale.order.line'].create({
'name': self.product_ordered_cost.name,
'product_id': self.product_ordered_cost.id,
'product_uom_qty': 2,
'qty_delivered': 1,
'product_uom': self.product_ordered_cost.uom_id.id,
'price_unit': self.product_ordered_cost.list_price,
'order_id': self.sale_order.id,
})
sale_order_line1.product_id_change()
sale_order_line2 = self.env['sale.order.line'].create({
'name': self.product_deliver_cost.name,
'product_id': self.product_deliver_cost.id,
'product_uom_qty': 4,
'qty_delivered': 1,
'product_uom': self.product_deliver_cost.uom_id.id,
'price_unit': self.product_deliver_cost.list_price,
'order_id': self.sale_order.id,
})
sale_order_line2.product_id_change()
self.sale_order.onchange_partner_id()
self.sale_order._compute_tax_id()
self.sale_order.action_confirm()
# create invoice lines and validate it
move_form = Form(self.AccountMove)
move_form.partner_id = self.partner_customer_usd
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_ordered_cost
line_form.quantity = 3.0
line_form.analytic_account_id = self.analytic_account
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_deliver_cost
line_form.quantity = 3.0
line_form.analytic_account_id = self.analytic_account
invoice_a = move_form.save()
invoice_a.post()
sale_order_line3 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line1 and sol.product_id == self.product_ordered_cost)
sale_order_line4 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line2 and sol.product_id == self.product_deliver_cost)
self.assertTrue(sale_order_line3, "A new sale line should have been created with ordered product")
self.assertTrue(sale_order_line4, "A new sale line should have been created with delivered product")
self.assertEqual(len(self.sale_order.order_line), 4, "There should be 4 lines on the SO (2 vendor bill lines created)")
self.assertEqual(len(self.sale_order.order_line.filtered(lambda sol: sol.is_expense)), 2, "There should be 4 lines on the SO (2 vendor bill lines created)")
self.assertEqual(sale_order_line1.qty_delivered, 1, "Exising SO line 1 should not be impacted by reinvoicing product at cost")
self.assertEqual(sale_order_line2.qty_delivered, 1, "Exising SO line 2 should not be impacted by reinvoicing product at cost")
self.assertEqual((sale_order_line3.price_unit, sale_order_line3.qty_delivered, sale_order_line3.product_uom_qty, sale_order_line3.qty_invoiced), (self.product_ordered_cost.standard_price, 3, 0, 0), 'Sale line is wrong after confirming vendor invoice')
self.assertEqual((sale_order_line4.price_unit, sale_order_line4.qty_delivered, sale_order_line4.product_uom_qty, sale_order_line4.qty_invoiced), (self.product_deliver_cost.standard_price, 3, 0, 0), 'Sale line is wrong after confirming vendor invoice')
self.assertEqual(sale_order_line3.qty_delivered_method, 'analytic', "Delivered quantity of 'expense' SO line should be computed by analytic amount")
self.assertEqual(sale_order_line4.qty_delivered_method, 'analytic', "Delivered quantity of 'expense' SO line should be computed by analytic amount")
# create second invoice lines and validate it
move_form = Form(self.AccountMove)
move_form.partner_id = self.partner_customer_usd
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_ordered_cost
line_form.quantity = 2.0
line_form.analytic_account_id = self.analytic_account
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_deliver_cost
line_form.quantity = 2.0
line_form.analytic_account_id = self.analytic_account
invoice_b = move_form.save()
invoice_b.post()
sale_order_line5 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line1 and sol != sale_order_line3 and sol.product_id == self.product_ordered_cost)
sale_order_line6 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line2 and sol != sale_order_line4 and sol.product_id == self.product_deliver_cost)
self.assertTrue(sale_order_line5, "A new sale line should have been created with ordered product")
self.assertTrue(sale_order_line6, "A new sale line should have been created with delivered product")
self.assertEqual(len(self.sale_order.order_line), 6, "There should be still 4 lines on the SO, no new created")
self.assertEqual(len(self.sale_order.order_line.filtered(lambda sol: sol.is_expense)), 4, "There should be still 2 expenses lines on the SO")
self.assertEqual((sale_order_line5.price_unit, sale_order_line5.qty_delivered, sale_order_line5.product_uom_qty, sale_order_line5.qty_invoiced), (self.product_ordered_cost.standard_price, 2, 0, 0), 'Sale line 5 is wrong after confirming 2e vendor invoice')
self.assertEqual((sale_order_line6.price_unit, sale_order_line6.qty_delivered, sale_order_line6.product_uom_qty, sale_order_line6.qty_invoiced), (self.product_deliver_cost.standard_price, 2, 0, 0), 'Sale line 6 is wrong after confirming 2e vendor invoice')
def test_sales_price(self):
""" Test invoicing vendor bill at sales price for products based on delivered and ordered quantities. Check no existing SO line is incremented, but when invoicing a
second time, increment only the delivered so line.
"""
# create SO line and confirm SO (with only one line)
sale_order_line1 = self.env['sale.order.line'].create({
'name': self.product_deliver_sales_price.name,
'product_id': self.product_deliver_sales_price.id,
'product_uom_qty': 2,
'qty_delivered': 1,
'product_uom': self.product_deliver_sales_price.uom_id.id,
'price_unit': self.product_deliver_sales_price.list_price,
'order_id': self.sale_order.id,
})
sale_order_line1.product_id_change()
sale_order_line2 = self.env['sale.order.line'].create({
'name': self.product_order_sales_price.name,
'product_id': self.product_order_sales_price.id,
'product_uom_qty': 3,
'qty_delivered': 1,
'product_uom': self.product_order_sales_price.uom_id.id,
'price_unit': self.product_order_sales_price.list_price,
'order_id': self.sale_order.id,
})
sale_order_line2.product_id_change()
self.sale_order._compute_tax_id()
self.sale_order.action_confirm()
# create invoice lines and validate it
move_form = Form(self.AccountMove)
move_form.partner_id = self.partner_customer_usd
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_deliver_sales_price
line_form.quantity = 3.0
line_form.analytic_account_id = self.analytic_account
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_order_sales_price
line_form.quantity = 3.0
line_form.analytic_account_id = self.analytic_account
invoice_a = move_form.save()
invoice_a.post()
sale_order_line3 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line1 and sol.product_id == self.product_deliver_sales_price)
sale_order_line4 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line2 and sol.product_id == self.product_order_sales_price)
self.assertTrue(sale_order_line3, "A new sale line should have been created with ordered product")
self.assertTrue(sale_order_line4, "A new sale line should have been created with delivered product")
self.assertEqual(len(self.sale_order.order_line), 4, "There should be 4 lines on the SO (2 vendor bill lines created)")
self.assertEqual(len(self.sale_order.order_line.filtered(lambda sol: sol.is_expense)), 2, "There should be 4 lines on the SO (2 vendor bill lines created)")
self.assertEqual(sale_order_line1.qty_delivered, 1, "Exising SO line 1 should not be impacted by reinvoicing product at cost")
self.assertEqual(sale_order_line2.qty_delivered, 1, "Exising SO line 2 should not be impacted by reinvoicing product at cost")
self.assertEqual((sale_order_line3.price_unit, sale_order_line3.qty_delivered, sale_order_line3.product_uom_qty, sale_order_line3.qty_invoiced), (self.product_deliver_sales_price.list_price, 3, 0, 0), 'Sale line is wrong after confirming vendor invoice')
self.assertEqual((sale_order_line4.price_unit, sale_order_line4.qty_delivered, sale_order_line4.product_uom_qty, sale_order_line4.qty_invoiced), (self.product_order_sales_price.list_price, 3, 0, 0), 'Sale line is wrong after confirming vendor invoice')
self.assertEqual(sale_order_line3.qty_delivered_method, 'analytic', "Delivered quantity of 'expense' SO line 3 should be computed by analytic amount")
self.assertEqual(sale_order_line4.qty_delivered_method, 'analytic', "Delivered quantity of 'expense' SO line 4 should be computed by analytic amount")
# create second invoice lines and validate it
move_form = Form(self.AccountMove)
move_form.partner_id = self.partner_customer_usd
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_deliver_sales_price
line_form.quantity = 2.0
line_form.analytic_account_id = self.analytic_account
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_order_sales_price
line_form.quantity = 2.0
line_form.analytic_account_id = self.analytic_account
invoice_b = move_form.save()
invoice_b.post()
sale_order_line5 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line1 and sol != sale_order_line3 and sol.product_id == self.product_deliver_sales_price)
sale_order_line6 = self.sale_order.order_line.filtered(lambda sol: sol != sale_order_line2 and sol != sale_order_line4 and sol.product_id == self.product_order_sales_price)
self.assertFalse(sale_order_line5, "No new sale line should have been created with delivered product !!")
self.assertTrue(sale_order_line6, "A new sale line should have been created with ordered product")
self.assertEqual(len(self.sale_order.order_line), 5, "There should be 5 lines on the SO, 1 new created and 1 incremented")
self.assertEqual(len(self.sale_order.order_line.filtered(lambda sol: sol.is_expense)), 3, "There should be 3 expenses lines on the SO")
self.assertEqual((sale_order_line6.price_unit, sale_order_line6.qty_delivered, sale_order_line4.product_uom_qty, sale_order_line6.qty_invoiced), (self.product_order_sales_price.list_price, 2, 0, 0), 'Sale line is wrong after confirming 2e vendor invoice')
def test_no_expense(self):
""" Test invoicing vendor bill with no policy. Check nothing happen. """
# confirm SO
sale_order_line = self.env['sale.order.line'].create({
'name': self.product_no_expense.name,
'product_id': self.product_no_expense.id,
'product_uom_qty': 2,
'qty_delivered': 1,
'product_uom': self.product_no_expense.uom_id.id,
'price_unit': self.product_no_expense.list_price,
'order_id': self.sale_order.id,
})
self.sale_order._compute_tax_id()
self.sale_order.action_confirm()
# create invoice lines and validate it
move_form = Form(self.AccountMove)
move_form.partner_id = self.partner_customer_usd
move_form.journal_id = self.journal_purchase
with move_form.line_ids.new() as line_form:
line_form.product_id = self.product_no_expense
line_form.quantity = 3.0
line_form.analytic_account_id = self.analytic_account
invoice_a = move_form.save()
invoice_a.post()
self.assertEqual(len(self.sale_order.order_line), 1, "No SO line should have been created (or removed) when validating vendor bill")
self.assertEqual(sale_order_line.qty_delivered, 1, "The delivered quantity of SO line should not have been incremented")
self.assertTrue(invoice_a.mapped('line_ids.analytic_line_ids'), "Analytic lines should be generated")
| agpl-3.0 | 7,580,846,607,921,912,000 | 61.017316 | 264 | 0.669691 | false |
mterzo/puppetboard | test/test_docker_settings.py | 1 | 3694 | import pytest
import os
from puppetboard import docker_settings
import puppetboard.core
try:
import future.utils
except:
pass
try:
from imp import reload as reload
except:
pass
@pytest.fixture(scope='function')
def cleanUpEnv(request):
for env_var in dir(docker_settings):
if (env_var.startswith('__') or env_var.startswith('_') or
env_var.islower()):
continue
if env_var in os.environ:
del os.environ[env_var]
reload(docker_settings)
return
def test_default_host_port(cleanUpEnv):
assert docker_settings.PUPPETDB_HOST == 'puppetdb'
assert docker_settings.PUPPETDB_PORT == 8080
def test_set_host_port(cleanUpEnv):
os.environ['PUPPETDB_HOST'] = 'puppetdb2'
os.environ['PUPPETDB_PORT'] = '9081'
reload(docker_settings)
assert docker_settings.PUPPETDB_HOST == 'puppetdb2'
assert docker_settings.PUPPETDB_PORT == 9081
def test_set_proto(cleanUpEnv):
os.environ['PUPPETDB_PROTO'] = 'https'
reload(docker_settings)
assert docker_settings.PUPPETDB_PROTO == 'https'
def test_cert_true_test(cleanUpEnv):
os.environ['PUPPETDB_SSL_VERIFY'] = 'True'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is True
os.environ['PUPPETDB_SSL_VERIFY'] = 'true'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is True
def test_cert_false_test(cleanUpEnv):
os.environ['PUPPETDB_SSL_VERIFY'] = 'False'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is False
os.environ['PUPPETDB_SSL_VERIFY'] = 'false'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is False
def test_cert_path(cleanUpEnv):
ca_file = '/usr/ssl/path/ca.pem'
os.environ['PUPPETDB_SSL_VERIFY'] = ca_file
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY == ca_file
def validate_facts(facts):
assert isinstance(facts, list)
assert len(facts) > 0
for map in facts:
assert isinstance(map, tuple)
assert len(map) == 2
def test_inventory_facts_default(cleanUpEnv):
validate_facts(docker_settings.INVENTORY_FACTS)
def test_invtory_facts_custom(cleanUpEnv):
os.environ['INVENTORY_FACTS'] = "A, B, C, D"
reload(docker_settings)
validate_facts(docker_settings.INVENTORY_FACTS)
def test_graph_facts_defautl(cleanUpEnv):
facts = docker_settings.GRAPH_FACTS
assert isinstance(facts, list)
assert 'puppetversion' in facts
def test_graph_facts_custom(cleanUpEnv):
os.environ['GRAPH_FACTS'] = "architecture, puppetversion, extra"
reload(docker_settings)
facts = docker_settings.GRAPH_FACTS
assert isinstance(facts, list)
assert len(facts) == 3
assert 'puppetversion' in facts
assert 'architecture' in facts
assert 'extra' in facts
def test_bad_log_value(cleanUpEnv, mocker):
os.environ['LOGLEVEL'] = 'g'
os.environ['PUPPETBOARD_SETTINGS'] = '../puppetboard/docker_settings.py'
reload(docker_settings)
puppetboard.core.APP = None
with pytest.raises(ValueError) as error:
puppetboard.core.get_app()
def test_default_table_selctor(cleanUpEnv):
assert [10, 20, 50, 100, 500] == docker_settings.TABLE_COUNT_SELECTOR
def test_env_table_selector(cleanUpEnv):
os.environ['TABLE_COUNT_SELECTOR'] = '5,15,25'
reload(docker_settings)
assert [5, 15, 25] == docker_settings.TABLE_COUNT_SELECTOR
def test_env_column_options(cleanUpEnv):
os.environ['DISPLAYED_METRICS'] = 'resources.total, events.failure'
reload(docker_settings)
assert ['resources.total',
'events.failure'] == docker_settings.DISPLAYED_METRICS
| apache-2.0 | -5,558,611,864,670,293,000 | 26.567164 | 76 | 0.695181 | false |
qgis/QGIS-Django | qgis-app/plugins/tests/versionfield.py | 1 | 1690 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Tests for version comparison field
DESCRIPTION
@copyright: 2014 by Alessandro Pasotti - ItOpen (http://www.itopen.it) <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
import re
VERSION_RE = r'(^|(?<=\.))0+(?!\.)|\.#+'
TEST_CASES = (
'1.0.0',
'1.0.1',
'0.0.0',
'1.0',
'1.10',
'1.2',
'1.9',
'1.0.a',
'a.0.a',
'b.a.c',
'a.b',
'0.a.0.1',
'1.0.rc1',
'1.1a',
'1.1b',
'1.9.0',
)
def vjust(str, level=4, delim='.', bitsize=4, fillchar=' ', force_zero=False):
"""
Normalize a dotted version string.
1.12 becomes : 1. 12
1.1 becomes : 1. 1
if force_zero=True and level=2:
1.12 becomes : 1. 12. 0
1.1 becomes : 1. 1. 0
"""
if not str:
return str
nb = str.count(delim)
if nb < level:
if force_zero:
str += (level-nb) * (delim+'0')
else:
str += (level-nb) * delim
parts = []
for v in str.split(delim)[:level+1]:
if not v:
parts.append(v.rjust(bitsize, '#'))
else:
parts.append(v.rjust(bitsize, fillchar))
return delim.join(parts)
def test():
transformed = []
for v in TEST_CASES:
vj = vjust(v, level=5, fillchar='0')
transformed.append(vj)
ck = re.sub(VERSION_RE, '', vj)
print ("Testing\t %s (%s)\t\t %s" % (v, ck, vj))
if v != ck:
print ("!!! failed !!!")
# Test sorting
transformed.sort()
print ("Sorted:")
for v in transformed:
print (v)
if __name__ == "__main__":
test()
| gpl-2.0 | 3,359,734,304,028,984,000 | 17.571429 | 95 | 0.477515 | false |
macbre/index-digest | indexdigest/test/linters/test_0092_select_star.py | 1 | 1853 | from __future__ import print_function
from unittest import TestCase
from indexdigest.linters.linter_0092_select_star import check_select_star, is_wildcard_query
from indexdigest.test import DatabaseTestMixin, read_queries_from_log
class TestLinter(TestCase, DatabaseTestMixin):
def test_is_wildcard_query(self):
assert is_wildcard_query('SELECT * FROM foo;')
assert is_wildcard_query('SELECT t.* FROM foo AS t;')
assert is_wildcard_query('SELECT * FROM `user` WHERE user_id = 34994913 LIMIT 1')
assert is_wildcard_query('/* User::loadFromDatabase */ SELECT * FROM `user` WHERE user_id = 34994913 LIMIT 1')
assert is_wildcard_query('SELECT /* User::loadFromDatabase */ * FROM `user` WHERE user_id = 34994913 LIMIT 1')
assert is_wildcard_query('SELECT id FROM foo') is False
assert is_wildcard_query('SELECT (id+2) * 2 FROM foo') is False
assert is_wildcard_query('SELECT 3 * 3') is False
assert is_wildcard_query('SELECT count(*) FROM foo') is False
assert is_wildcard_query('SELECT /* foo */ test FROM foo') is False
assert is_wildcard_query('INSERT * INTO foo') is False
# assert False
def test_check_select_star(self):
reports = list(check_select_star(self.connection, read_queries_from_log('0092-select-star-log')))
print(list(map(str, reports)))
assert len(reports) == 2
assert str(reports[0]) == 'foo: "SELECT * FROM foo" query uses SELECT *'
assert reports[0].table_name == 'foo'
assert reports[0].context['query'] == 'SELECT * FROM foo;'
assert str(reports[1]) == 'bar: "SELECT t.* FROM bar AS t" query uses SELECT *'
assert reports[1].table_name == 'bar'
assert reports[1].context['query'] == 'SELECT t.* FROM bar AS t;'
# assert False
| mit | -6,784,867,494,317,045,000 | 42.093023 | 122 | 0.650836 | false |
aleosd/sfpy | client/gamedata.py | 1 | 14314 | # -*- coding: UTF-8 -*-
import datetime
import logging
from .settings import LOGGER_NAME
from .gameapi import APIManager
class Resources:
def __init__(self):
self.wallet = {}
def add(self, data):
self.wallet = data.get('wallet', {})
def is_enough_for_mission(self, mission):
for currency_data in mission.price['currencies']:
currency_id = str(currency_data['id'])
if self.wallet.get(currency_id, 0) < currency_data['amount']:
return False
return True
class Progress:
TYPE_MISSION = "FUSE"
TYPE_UPGRADE = "UPGRADE"
def __init__(self, **kwargs):
self.id = kwargs['id']
self.finished = kwargs['finished']
self.start_time = self.time_from_ms(kwargs['startTime'])
self.end_time = self.time_from_ms(kwargs['endTime'])
self.type = kwargs['type']
if self.is_mission():
self.mission_id = kwargs['fuseData']['missionId']
@staticmethod
def time_from_ms(ms):
return datetime.datetime.fromtimestamp(ms // 1000)
def is_finished(self):
return self.finished
def time_elapsed(self):
return self.end_time - datetime.datetime.now()
def time_elapsed_verbose(self):
eta = self.time_elapsed()
return "{:02d}:{:02d}:{:02d}".format(
eta.seconds // 3600,
(eta.seconds // 60) % 60,
eta.seconds % 60
)
def is_mission(self):
return self.type == self.TYPE_MISSION
class Mission:
def __init__(self, **kwargs):
self.id = kwargs['id']
self.name = kwargs['name']
self.in_progress = kwargs['inProgress']
self.is_success = kwargs['isSuccess']
self.difficulty = kwargs['difficulty']
self.duration = kwargs['duration']
self.experience = kwargs['experience']
self.price = kwargs['price']
self._professions = kwargs['professions']
self.slot_count = kwargs['slotCount']
self.quality_name = kwargs['missionQualityName']
self.mission_type = kwargs['missionType']
def is_free(self):
return not (self.price['currencies'] or self.price['resources'])
def is_available(self):
return not self.in_progress
def get_profession_ids(self):
return [i['id'] for i in self._professions]
def is_mining(self):
return self.quality_name == u"Добыча ресурсов" and self.is_free()
def is_battle(self):
return self.quality_name == u"Боевое задание"
def is_cult(self):
return self.quality_name == u"Развитие культа"
def is_invasion(self):
return self.quality_name == u"Вторжение"
def is_case(self):
return self.mission_type == "Case"
def result(self):
if self.is_success:
return u"успех"
return u"неудача"
class Follower:
def __init__(self, **kwargs):
self.id = kwargs['id']
self.efficiency = kwargs['efficiency']
self.in_progress = kwargs['inProgress']
self.profession = kwargs['profession']
def is_available(self):
return not self.in_progress
@property
def profession_id(self):
return self.profession['id']
class ProgressManager:
def __init__(self):
self.progresses = {}
self.logger = logging.getLogger(LOGGER_NAME)
def add_progress(self, data):
p = Progress(**data)
self.progresses[p.id] = p
self.logger.debug(u"Добавляем прогресс id {}".format(p.id))
return p
def remove_progress(self, progress):
del self.progresses[progress.id]
def add_many(self, data, clear=True):
self.logger.info(u"Добавляем информацию по прогрессам")
if clear:
self.clear()
for progress_data in data:
self.add_progress(progress_data)
def clear(self):
self.progresses = {}
def get_mission_progress_list(self):
return [p for p in self.progresses.values() if p.is_mission()]
class MissionManager:
def __init__(self):
self.missions = {}
self.logger = logging.getLogger(LOGGER_NAME)
def add_mission(self, data):
mission = Mission(**data)
self.missions[mission.id] = mission
self.logger.debug(u"Добавляем миссию id {}".format(mission.id))
def add_many(self, data, clear=True):
if clear:
self.clear()
self.logger.info(u"Добавляем миссии: {}".format(len(data)))
for mission_data in data:
self.add_mission(mission_data)
def clear(self):
self.missions = {}
def mining_missions(self):
u"""
Возвращает список с миссиями, доступными для выполнения и не требующими
ресурсов. Список отсортирован по возрастанию длинтельности миссии и
количества адептов, необходимых для её выполнения
:return: List of missions
"""
missions = [m for m in self.missions.values() if m.is_mining() and
m.is_available()]
return sorted(missions, key=lambda m: (m.duration, m.slot_count))
def invasion_missions(self):
missions = [m for m in self.missions.values() if m.is_invasion() and
m.is_available()]
return sorted(missions, key=lambda m: (m.duration, m.slot_count))
def case_missions(self):
return [m for m in self.missions.values() if m.is_case() and
m.is_available()]
def cult_missions(self):
return [m for m in self.missions.values() if m.is_cult() and
m.is_available()]
def get(self, id_):
return self.missions.get(id_)
class FollowerManager:
def __init__(self):
self.followers = {}
self.logger = logging.getLogger(LOGGER_NAME)
def add_follower(self, data):
follower = Follower(**data)
self.followers[follower.id] = follower
def add_many(self, data, clear=True):
if data and clear:
self.clear()
for follower in data:
self.add_follower(follower)
def clear(self):
self.followers = {}
def free_followers(self):
return {k: f for k, f in self.followers.items() if f.is_available()}
def get_for_profession(self, profession, free=False):
u"""
Возвращает список сотрудников с определенной профессией
:param free: Bool, учитывать только не занятых адептов
:param profession: int, profession id
:return: list
"""
if free:
followers = self.free_followers().values()
else:
followers = self.followers.values()
if isinstance(profession, (list, tuple)):
return [f for f in followers if f.profession_id in profession]
if isinstance(profession, int):
return [f for f in followers if f.profession_id == profession]
raise ValueError(u"Profession must be an int or list or tuple")
def get_efficient(self, count=None, free=False, exclude=None):
u"""
Возвращает отсортированный по эффективности список адептов.
При помощи count можно указать ограничение на количество возвращаемых
значений.
:param free: Bool, учитывать только не занятых адептов
:param count: int
:param exclude: followers list to exclude from result
:return: list
"""
if free:
followers = self.free_followers().values()
else:
followers = self.followers.values()
if exclude:
followers = [f for f in followers if f not in exclude]
fs = sorted(followers, key=lambda k: k.efficiency, reverse=True)
return fs[0:count]
class Game:
def __init__(self):
self.logger = logging.getLogger(LOGGER_NAME)
self.progress_manager = ProgressManager()
self.mission_manager = MissionManager()
self.follower_manager = FollowerManager()
self.resources = Resources()
self.api = APIManager()
self.data_has_changed = False
def start(self, session):
start_data = self.api.start(session)
self.update_state(start_data)
self.process_state()
def turn(self):
data = self.api.get_game_data()
self.update_state(data)
self.process_state()
def update_state(self, data):
self.resources.add(data)
self.progress_manager.add_many(data.get('progresses', []))
self.mission_manager.add_many(data.get('missions', []))
self.follower_manager.add_many(data.get('followers', []))
def process_state(self):
self.process_progresses(
self.progress_manager.get_mission_progress_list())
case_missions = self.mission_manager.case_missions()
if case_missions:
self.process_case_missions(case_missions)
mining_missions = self.mission_manager.mining_missions()
if mining_missions:
self.process_missions(mining_missions)
invasion_missions = self.mission_manager.invasion_missions()
if invasion_missions:
self.process_missions(invasion_missions)
cult_missions = self.mission_manager.cult_missions()
if cult_missions:
self.process_missions(cult_missions)
if self.data_has_changed:
self.logger.info(u"Данные изменились, обрабатываем повторно")
self.data_has_changed = False
self.process_state()
def process_progresses(self, progresses):
u"""
Проверяет состояние текущих прогресов, если есть завершенные -
отправляет запрос к API.
:param progresses: Список прогрессов
"""
for p in progresses:
if self.data_has_changed:
break
mission = self.mission_manager.get(p.mission_id)
self.logger.info(u"Проверяем состояние прогресса {} по "
u"миссии \"{}\"".format(p.id, mission.name))
if p.is_finished():
self.logger.info(
u"Прогресс {} завершен, отправляем запрос".format(p.id))
status, result = self.api.finish_progress(p)
self._handle_call_result(status, result)
else:
self.logger.info(
u"До окончания прогресса {} еще {}, результат - {}".format(
p.id, p.time_elapsed_verbose(), mission.result()))
def process_missions(self, missions):
self.logger.info(u"Доступно миссий {}: {}".format(
missions[0].quality_name, len(missions)))
for mission in missions:
if self.data_has_changed:
break
status, result = self.process_mission(mission)
self._handle_call_result(status, result)
def process_mission(self, mission):
self.logger.info(u"Пробуем запустить миссию {}".format(mission.id))
followers = self.follower_manager.free_followers()
if mission.slot_count > len(followers):
return self.api.STATUS_ACTION_NOT_AVAILABLE, \
u"Недостаточно последователей"
if not self.resources.is_enough_for_mission(mission):
return self.api.STATUS_ACTION_NOT_AVAILABLE, \
u"Недостаточно ресурсов"
matched_followers = self.follower_manager.get_for_profession(
mission.get_profession_ids(), free=True)
if len(matched_followers) < mission.slot_count:
additional_followers = self.follower_manager.get_efficient(
mission.slot_count - len(matched_followers), free=True,
exclude=matched_followers
)
matched_followers = matched_followers + additional_followers
return self.api.start_mission(mission, matched_followers)
def process_case_missions(self, missions):
self.logger.info(u"Доступно ивентовых миссий: {}".format(len(missions)))
for mission in missions:
if self.data_has_changed:
break
status, result = self.process_case_mission(mission)
self._handle_call_result(status, result)
def process_case_mission(self, mission):
self.logger.info(u"Пробуем запустить миссию {}".format(mission.id))
followers = self.follower_manager.free_followers()
if mission.slot_count > len(followers):
return self.api.STATUS_ACTION_NOT_AVAILABLE, \
u"Недостаточно последователей"
followers = self.follower_manager.get_efficient(free=True)
return self.api.start_mission(mission, followers[-mission.slot_count:])
def _handle_call_result(self, status, result):
if status == self.api.STATUS_SUCCESS:
self.logger.info(u"Успешный запрос, сервер вернул \"{}\"".format(
result['operationResult']['actionFailCause']
))
self.update_state(result['updateData'])
self.data_has_changed = True
elif status == self.api.STATUS_ACTION_NOT_AVAILABLE:
self.logger.info(result)
elif status == self.api.STATUS_GAME_ERROR:
self.logger.error(u"Ошибка выполнения запроса: \"{}\"".format(
result['operationResult']['actionFailCause']
))
else:
self.logger.critical(result)
| mit | 4,507,464,201,464,738,000 | 33.430412 | 80 | 0.601916 | false |
UManPychron/pychron | pychron/dvc/tasks/actions.py | 1 | 7897 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import os
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from pyface.message_dialog import warning, information
from pyface.tasks.action.task_action import TaskAction
from traitsui.menu import Action
from pychron.core.ui.progress_dialog import myProgressDialog
from pychron.dvc import repository_path
from pychron.envisage.resources import icon
from pychron.envisage.tasks.actions import restart
from pychron.pychron_constants import DVC_PROTOCOL
class LocalRepositoryAction(TaskAction):
enabled_name = 'selected_local_repository_name'
class RemoteRepositoryAction(TaskAction):
enabled_name = 'selected_repository'
class CloneAction(RemoteRepositoryAction):
method = 'clone'
name = 'Clone'
image = icon('repo-clone')
tooltip = 'Clone repository from remote. e.g. git clone https://github.com...'
class AddBranchAction(LocalRepositoryAction):
name = 'Add Branch'
method = 'add_branch'
image = icon('git-branch')
tooltip = 'Add branch to selected repository'
class CheckoutBranchAction(LocalRepositoryAction):
name = 'Checkout Branch'
method = 'checkout_branch'
image = icon('check')
tooltip = 'Checkout branch. e.g. git checkout <branch_name>'
class PushAction(LocalRepositoryAction):
name = 'Push'
method = 'push'
image = icon('repo-push')
tooltip = 'Push changes to remote. git push'
class PullAction(LocalRepositoryAction):
name = 'Pull'
method = 'pull'
image = icon('repo-pull')
tooltip = 'Pull changes from remote. git pull'
class RebaseAction(LocalRepositoryAction):
name = 'Rebase'
method = 'rebase'
image = icon('git-merge')
tooltip = 'Rebase commits from [master] onto current branch. git rebase'
class FindChangesAction(LocalRepositoryAction):
name = 'Find Changes'
method = 'find_changes'
tooltip = 'Search all local repositories for changes. e.g. git log <remote>/branch..HEAD'
image = icon('search')
class DeleteLocalChangesAction(LocalRepositoryAction):
name = 'Delete Local Changes'
method = 'delete_local_changes'
image = icon('trashcan')
class DeleteChangesAction(LocalRepositoryAction):
name = 'Delete Commits'
method = 'delete_commits'
image = icon('trashcan')
class ArchiveRepositoryAction(LocalRepositoryAction):
name = 'Archive Repository'
method = 'archive_repository'
image = icon('squirrel')
class LoadOriginAction(TaskAction):
name = 'Load Origin'
method = 'load_origin'
image = icon('cloud-download')
tooltip = 'Update the list of available repositories'
class SyncSampleInfoAction(LocalRepositoryAction):
name = 'Sync Repo/DB Sample Info'
method = 'sync_sample_info'
tooltip = 'Copy information from Central Database to the selected repository'
image = icon('octicon-database')
class SyncRepoAction(LocalRepositoryAction):
name = 'Sync'
method = 'sync_repo'
tooltip = 'Sync to Origin. aka Pull then Push'
image = icon('sync')
class RepoStatusAction(LocalRepositoryAction):
name = 'Status'
method = 'status'
tooltip = 'Report the repository status. e.g. git status'
image = icon('pulse')
class BookmarkAction(LocalRepositoryAction):
name = 'Bookmark'
method = 'add_bookmark'
tooltip = 'Add a bookmark to the data reduction history. e.g. git tag -a <name> -m <message>'
image = icon('git-bookmark')
class SortLocalReposAction(TaskAction):
name = 'Sort Repos'
method = 'sort_repos'
tooltip = 'Sort repos by most recently analyzed'
# class SyncMetaDataAction(Action):
# name = 'Sync Repo/DB Metadata'
#
# def perform(self, event):
# app = event.task.window.application
# app.information_dialog('Sync Repo disabled')
# return
#
# dvc = app.get_service('pychron.dvc.dvc.DVC')
# if dvc:
# dvc.repository_db_sync('IR986', dry_run=False)
class ShareChangesAction(Action):
name = 'Share Changes'
def perform(self, event):
from git import Repo
from git.exc import InvalidGitRepositoryError
from pychron.paths import paths
remote = 'origin'
branch = 'master'
repos = []
for d in os.listdir(paths.repository_dataset_dir):
if d.startswith('.') or d.startswith('~'):
continue
try:
r = Repo(repository_path(d))
except InvalidGitRepositoryError:
continue
repos.append(r)
n = len(repos)
pd = myProgressDialog(max=n - 1,
can_cancel=True,
can_ok=False)
pd.open()
shared = False
for r in repos:
pd.change_message('Fetch {}'.format(os.path.basename(r.working_dir)))
c = r.git.log('{}/{}..HEAD'.format(remote, branch), '--oneline')
if c:
r.git.pull()
d = os.path.basename(r.working_dir)
if confirm(None, 'Share changes made to {}.\n\n{}'.format(d, c)) == YES:
r.git.push(remote, branch)
shared = True
msg = 'Changes successfully shared' if shared else 'No changes to share'
information(None, msg)
class GenerateCurrentsAction(Action):
name = 'Generate Currents'
def perform(self, event):
app = event.task.window.application
dvc = app.get_service(DVC_PROTOCOL)
dvc.generate_currents()
class MapRunIDsAction(Action):
name = 'Map RunIDs'
def perform(self, event):
app = event.task.window.application
dvc = app.get_service(DVC_PROTOCOL)
from pychron.dvc.map_runid import MapRunID
mr = MapRunID()
mr.map(dvc)
class ClearCacheAction(Action):
name = 'Clear Cache'
def perform(self, event):
app = event.task.window.application
dvc = app.get_service(DVC_PROTOCOL)
dvc.clear_cache()
class WorkOfflineAction(Action):
name = 'Work Offline'
def perform(self, event):
app = event.task.window.application
dvc = app.get_service(DVC_PROTOCOL)
if dvc.db.kind != 'mysql':
warning(None, 'Your are not using a centralized MySQL database')
else:
from pychron.dvc.work_offline import WorkOffline
wo = WorkOffline(dvc=dvc, application=app)
if wo.initialize():
wo.edit_traits()
class UseOfflineDatabase(Action):
name = 'Use Offline Database'
def perform(self, event):
from pychron.dvc.work_offline import switch_to_offline_database
app = event.task.window.application
switch_to_offline_database(app.preferences)
ret = confirm(None, 'You are now using the offline database. Restart now for changes to take effect')
if ret == YES:
restart()
# ============= EOF =============================================
| apache-2.0 | 155,290,666,617,389,280 | 29.141221 | 109 | 0.629859 | false |
thombashi/DataProperty | dataproperty/_container.py | 1 | 3704 | """
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
import abc
from typepy import RealNumber
class AbstractContainer(metaclass=abc.ABCMeta):
@abc.abstractproperty
def min_value(self): # pragma: no cover
pass
@abc.abstractproperty
def max_value(self): # pragma: no cover
pass
@abc.abstractmethod
def mean(self): # pragma: no cover
pass
@abc.abstractmethod
def update(self, value): # pragma: no cover
pass
@abc.abstractmethod
def merge(self, value): # pragma: no cover
pass
def __repr__(self):
if not self.has_value():
return "None"
return ", ".join(["min={}".format(self.min_value), "max={}".format(self.max_value)])
def has_value(self):
return self.min_value is not None and self.max_value is not None
def is_same_value(self):
return self.has_value() and self.min_value == self.max_value
def is_zero(self):
return self.has_value() and self.min_value == 0 and self.max_value == 0
class ListContainer(AbstractContainer):
__slots__ = ("__value_list",)
@property
def min_value(self):
try:
return min(self.__value_list)
except ValueError:
return None
@property
def max_value(self):
try:
return max(self.__value_list)
except ValueError:
return None
@property
def value_list(self):
return self.__value_list
def __init__(self, value_list=None):
if value_list is None:
self.__value_list = []
else:
self.__value_list = value_list
def mean(self):
try:
return sum(self.__value_list) / len(self.__value_list)
except ZeroDivisionError:
return float("nan")
def update(self, value):
store_value = RealNumber(value).try_convert()
if store_value is None:
return
self.__value_list.append(store_value)
def merge(self, value):
try:
self.__value_list.extend(value)
except TypeError:
pass
class MinMaxContainer(AbstractContainer):
__slots__ = ("__min_value", "__max_value")
@property
def min_value(self):
return self.__min_value
@property
def max_value(self):
return self.__max_value
def __init__(self, value_list=None):
self.__min_value = None
self.__max_value = None
if value_list is None:
return
for value in value_list:
self.update(value)
def __eq__(self, other):
return all([self.min_value == other.min_value, self.max_value == other.max_value])
def __ne__(self, other):
return any([self.min_value != other.min_value, self.max_value != other.max_value])
def __contains__(self, x):
return self.min_value <= x <= self.max_value
def diff(self):
try:
return self.max_value - self.min_value
except TypeError:
return float("nan")
def mean(self):
try:
return (self.max_value + self.min_value) * 0.5
except TypeError:
return float("nan")
def update(self, value):
if value is None:
return
if self.__min_value is None:
self.__min_value = value
else:
self.__min_value = min(self.__min_value, value)
if self.__max_value is None:
self.__max_value = value
else:
self.__max_value = max(self.__max_value, value)
def merge(self, value):
self.update(value.min_value)
self.update(value.max_value)
| mit | -3,326,360,890,995,376,000 | 23.368421 | 92 | 0.560475 | false |
CodyKochmann/generators | generators/inline_tools.py | 1 | 2039 | #!/usr/bin/env python
from __future__ import print_function
_print = print
del print_function
from inspect import getsource
from strict_functions import strict_globals, noglobals
__all__ = 'asserts', 'print', 'attempt'
@strict_globals(getsource=getsource)
def asserts(input_value, rule, message=''):
""" this function allows you to write asserts in generators since there are
moments where you actually want the program to halt when certain values
are seen.
"""
assert callable(rule) or type(rule)==bool, 'asserts needs rule to be a callable function or a test boolean'
assert isinstance(message, str), 'asserts needs message to be a string'
# if the message is empty and rule is callable, fill message with rule's source code
if len(message)==0 and callable(rule):
try:
s = getsource(rule).splitlines()[0].strip()
except:
s = repr(rule).strip()
message = 'illegal input of {} breaks - {}'.format(input_value, s)
if callable(rule):
# if rule is a function, run the function and assign it to rule
rule = rule(input_value)
# now, assert the rule and return the input value
assert rule, message
return input_value
del getsource
@strict_globals(_print=_print)
def print(*a):
""" print just one that returns what you give it instead of None """
try:
_print(*a)
return a[0] if len(a) == 1 else a
except:
_print(*a)
del _print
@noglobals
def attempt(fn, default_output=None):
''' attempt running a function in a try block without raising exceptions '''
assert callable(fn), 'generators.inline_tools.attempt needs fn to be a callable function'
try:
return fn()
except:
return default_output
del strict_globals, noglobals
if __name__ == '__main__':
print(print(attempt(lambda:1/0)))
print(print(attempt(lambda:1/2)))
print(print(attempt(lambda:asserts(1, lambda i:1/i))))
print(asserts(0, lambda i:1>i))
asserts(2, lambda i:1>i)
| mit | -1,161,471,738,659,893,500 | 30.859375 | 111 | 0.659637 | false |
njoubert/ardupilot | Tools/ardupilotwaf/px4.py | 1 | 11068 | #!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for PX4 build
"""
from waflib import Errors, Logs, Task, Utils
from waflib.TaskGen import after_method, before_method, feature
import os
import shutil
import sys
_dynamic_env_data = {}
def _load_dynamic_env_data(bld):
bldnode = bld.bldnode.make_node('modules/PX4Firmware')
for name in ('cxx_flags', 'include_dirs', 'definitions'):
_dynamic_env_data[name] = bldnode.find_node(name).read().split(';')
@feature('px4_ap_library', 'px4_ap_program')
@before_method('process_source')
def px4_dynamic_env(self):
# The generated files from configuration possibly don't exist if it's just
# a list command (TODO: figure out a better way to address that).
if self.bld.cmd == 'list':
return
if not _dynamic_env_data:
_load_dynamic_env_data(self.bld)
self.env.append_value('INCLUDES', _dynamic_env_data['include_dirs'])
self.env.prepend_value('CXXFLAGS', _dynamic_env_data['cxx_flags'])
self.env.prepend_value('CXXFLAGS', _dynamic_env_data['definitions'])
# Single static library
# NOTE: This only works only for local static libraries dependencies - fake
# libraries aren't supported yet
@feature('px4_ap_program')
@after_method('apply_link')
@before_method('process_use')
def px4_import_objects_from_use(self):
queue = list(Utils.to_list(getattr(self, 'use', [])))
names = set()
while queue:
name = queue.pop(0)
if name in names:
continue
names.add(name)
try:
tg = self.bld.get_tgen_by_name(name)
except Errors.WafError:
continue
tg.post()
for t in getattr(tg, 'compiled_tasks', []):
self.link_task.set_inputs(t.outputs)
queue.extend(Utils.to_list(getattr(tg, 'use', [])))
class px4_copy(Task.Task):
color = 'CYAN'
def run(self):
shutil.copy2(self.inputs[0].abspath(), self.outputs[0].abspath())
def keyword(self):
return "PX4: Copying %s to" % self.inputs[0].name
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class px4_add_git_hashes(Task.Task):
run_str = '${PYTHON} ${PX4_ADD_GIT_HASHES} --ardupilot ${PX4_APM_ROOT} --px4 ${PX4_ROOT} --nuttx ${PX4_NUTTX_ROOT} ${SRC} ${TGT}'
color = 'CYAN'
def keyword(self):
return "PX4: Copying firmware and adding git hashes"
def __str__(self):
return self.outputs[0].path_from(self.outputs[0].ctx.launch_node())
def _update_firmware_sig(fw_task, firmware, elf):
original_post_run = fw_task.post_run
def post_run():
original_post_run()
firmware.sig = firmware.cache_sig = Utils.h_file(firmware.abspath())
elf.sig = elf.cache_sig = Utils.h_file(elf.abspath())
fw_task.post_run = post_run
_cp_px4io = None
_firmware_semaphorish_tasks = []
_upload_task = []
@feature('px4_ap_program')
@after_method('process_source')
def px4_firmware(self):
global _cp_px4io, _firmware_semaphorish_tasks, _upload_task
board_name = self.env.get_flat('PX4_BOARD_NAME')
px4 = self.bld.cmake('px4')
px4.vars['APM_PROGRAM_LIB'] = self.link_task.outputs[0].abspath()
if self.env.PX4_PX4IO_NAME and not _cp_px4io:
px4io_task = self.create_cmake_build_task('px4', 'fw_io')
px4io = px4io_task.cmake.bldnode.make_node(
'src/modules/px4iofirmware/%s.bin' % self.env.PX4_PX4IO_NAME,
)
px4io_elf = px4.bldnode.make_node(
'src/modules/px4iofirmware/%s' % self.env.PX4_PX4IO_NAME
)
px4io_task.set_outputs([px4io, px4io_elf])
romfs = self.bld.bldnode.make_node(self.env.PX4_ROMFS_BLD)
romfs_px4io = romfs.make_node('px4io/px4io.bin')
romfs_px4io.parent.mkdir()
_cp_px4io = self.create_task('px4_copy', px4io, romfs_px4io)
_cp_px4io.keyword = lambda: 'PX4: Copying PX4IO to ROMFS'
px4io_elf_dest = self.bld.bldnode.make_node(self.env.PX4IO_ELF_DEST)
cp_px4io_elf = self.create_task('px4_copy', px4io_elf, px4io_elf_dest)
fw_task = self.create_cmake_build_task(
'px4',
'build_firmware_%s' % board_name,
)
fw_task.set_run_after(self.link_task)
# we need to synchronize in order to avoid the output expected by the
# previous ap_program being overwritten before used
for t in _firmware_semaphorish_tasks:
fw_task.set_run_after(t)
_firmware_semaphorish_tasks = []
if self.env.PX4_PX4IO_NAME and _cp_px4io.generator is self:
fw_task.set_run_after(_cp_px4io)
firmware = px4.bldnode.make_node(
'src/firmware/nuttx/nuttx-%s-apm.px4' % board_name,
)
fw_elf = px4.bldnode.make_node(
'src/firmware/nuttx/firmware_nuttx',
)
_update_firmware_sig(fw_task, firmware, fw_elf)
fw_dest = self.bld.bldnode.make_node(
os.path.join(self.program_dir, '%s.px4' % self.program_name)
)
git_hashes = self.create_task('px4_add_git_hashes', firmware, fw_dest)
git_hashes.set_run_after(fw_task)
_firmware_semaphorish_tasks.append(git_hashes)
fw_elf_dest = self.bld.bldnode.make_node(
os.path.join(self.program_dir, self.program_name)
)
cp_elf = self.create_task('px4_copy', fw_elf, fw_elf_dest)
cp_elf.set_run_after(fw_task)
_firmware_semaphorish_tasks.append(cp_elf)
self.build_summary = dict(
target=self.name,
binary=fw_elf_dest.path_from(self.bld.bldnode),
)
if self.bld.options.upload:
if _upload_task:
Logs.warn('PX4: upload for %s ignored' % self.name)
return
_upload_task = self.create_cmake_build_task('px4', 'upload')
_upload_task.set_run_after(fw_task)
_firmware_semaphorish_tasks.append(_upload_task)
def _px4_taskgen(bld, **kw):
if 'cls_keyword' in kw and not callable(kw['cls_keyword']):
cls_keyword = str(kw['cls_keyword'])
kw['cls_keyword'] = lambda tsk: 'PX4: ' + cls_keyword
if 'cls_str' in kw and not callable(kw['cls_str']):
cls_str = str(kw['cls_str'])
kw['cls_str'] = lambda tsk: cls_str
kw['color'] = 'CYAN'
return bld(**kw)
@feature('_px4_romfs')
def _process_romfs(self):
bld = self.bld
file_list = [
'init.d/rc.APM',
'init.d/rc.error',
'init.d/rcS',
(bld.env.PX4_BOOTLOADER, 'bootloader/fmu_bl.bin'),
]
if bld.env.PX4_BOARD_RC:
board_rc = 'init.d/rc.%s' % bld.env.get_flat('PX4_BOARD_NAME')
file_list.append((board_rc, 'init.d/rc.board'))
romfs_src = bld.srcnode.find_dir(bld.env.PX4_ROMFS_SRC)
romfs_bld = bld.bldnode.make_node(bld.env.PX4_ROMFS_BLD)
for item in file_list:
if isinstance(item, str):
src = romfs_src.make_node(item)
dst = romfs_bld.make_node(item)
else:
src = romfs_src.make_node(item[0])
dst = romfs_bld.make_node(item[1])
bname = os.path.basename(str(src))
if bname in bld.env.ROMFS_EXCLUDE:
print("Excluding %s" % bname)
continue
dst.parent.mkdir()
self.create_task('px4_copy', src, dst)
def configure(cfg):
cfg.env.CMAKE_MIN_VERSION = '3.2'
cfg.load('cmake')
bldnode = cfg.bldnode.make_node(cfg.variant)
env = cfg.env
env.AP_PROGRAM_FEATURES += ['px4_ap_program']
kw = env.AP_LIBRARIES_OBJECTS_KW
kw['features'] = Utils.to_list(kw.get('features', [])) + ['px4_ap_library']
def srcpath(path):
return cfg.srcnode.make_node(path).abspath()
def bldpath(path):
return bldnode.make_node(path).abspath()
board_name = env.get_flat('PX4_BOARD_NAME')
# TODO: we should move stuff from mk/PX4 to Tools/ardupilotwaf/px4 after
# stop using the make-based build system
env.PX4_ROMFS_SRC = 'mk/PX4/ROMFS'
env.PX4_ROMFS_BLD = 'px4-extra-files/ROMFS'
env.PX4_BOOTLOADER = '/../bootloader/%s' % env.PX4_BOOTLOADER_NAME
env.PX4_ADD_GIT_HASHES = srcpath('Tools/scripts/add_git_hashes.py')
env.PX4_APM_ROOT = srcpath('')
env.PX4_ROOT = srcpath('modules/PX4Firmware')
env.PX4_NUTTX_ROOT = srcpath('modules/PX4NuttX')
if env.PX4_PX4IO_NAME:
env.PX4IO_ELF_DEST = 'px4-extra-files/px4io'
nuttx_config='nuttx_%s_apm' % board_name
env.PX4_CMAKE_VARS = dict(
CONFIG=nuttx_config,
CMAKE_MODULE_PATH=srcpath('Tools/ardupilotwaf/px4/cmake'),
NUTTX_SRC=env.PX4_NUTTX_ROOT,
PX4_NUTTX_ROMFS=bldpath(env.PX4_ROMFS_BLD),
ARDUPILOT_BUILD='YES',
EXTRA_CXX_FLAGS=' '.join((
# NOTE: these "-Wno-error=*" flags should be removed as we update
# the submodule
'-Wno-error=double-promotion',
'-Wno-error=reorder',
# NOTE: *Temporarily* using this definition so that both
# PX4Firmware build systems (cmake and legacy make-based) can live
# together
'-DCMAKE_BUILD',
'-DARDUPILOT_BUILD',
'-I%s' % bldpath('libraries/GCS_MAVLink'),
'-I%s' % bldpath('libraries/GCS_MAVLink/include/mavlink'),
'-Wl,--gc-sections',
)),
EXTRA_C_FLAGS=' '.join((
# NOTE: *Temporarily* using this definition so that both
# PX4Firmware build systems (cmake and legacy make-based) can live
# together
'-DCMAKE_BUILD',
)),
)
def build(bld):
board_name = bld.env.get_flat('PX4_BOARD_NAME')
px4 = bld.cmake(
name='px4',
cmake_src=bld.srcnode.find_dir('modules/PX4Firmware'),
cmake_vars=bld.env.PX4_CMAKE_VARS,
cmake_flags=['-Wno-deprecated'],
)
px4.build(
'msg_gen',
group='dynamic_sources',
cmake_output_patterns='src/modules/uORB/topics/*.h',
)
px4.build(
'prebuild_targets',
group='dynamic_sources',
cmake_output_patterns='%s/NuttX/nuttx-export/**/*.h' % board_name,
)
bld(
name='px4_romfs_static_files',
group='dynamic_sources',
features='_px4_romfs',
)
bld.extra_build_summary = _extra_build_summary
def _extra_build_summary(bld, build_summary):
build_summary.text('')
build_summary.text('PX4')
build_summary.text('', '''
The ELF files are pointed by the path in the "%s" column. The .px4 files are in
the same directory of their corresponding ELF files.
''' % build_summary.header_text['target'])
if not bld.options.upload:
build_summary.text('')
build_summary.text('', '''
You can use the option --upload to upload the firmware to the PX4 board if you
have one connected.''')
if bld.env.PX4_PX4IO_NAME:
build_summary.text('')
build_summary.text('PX4IO')
summary_data_list = bld.size_summary([bld.env.PX4IO_ELF_DEST])
header = bld.env.BUILD_SUMMARY_HEADER[:]
try:
header.remove('target')
except ValueError:
pass
header.insert(0, 'binary_path')
build_summary.print_table(summary_data_list, header)
| gpl-3.0 | -3,635,688,299,489,346,600 | 31.84273 | 133 | 0.615287 | false |
nicproulx/mne-python | mne/simulation/evoked.py | 2 | 5502 | # Authors: Alexandre Gramfort <[email protected]>
# Daniel Strohmeier <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
import copy
import warnings
import numpy as np
from ..io.pick import pick_channels_cov
from ..forward import apply_forward
from ..utils import check_random_state, verbose, _time_mask
@verbose
def simulate_evoked(fwd, stc, info, cov, snr=3., tmin=None, tmax=None,
iir_filter=None, random_state=None, verbose=None):
"""Generate noisy evoked data.
.. note:: No projections from ``info`` will be present in the
output ``evoked``. You can use e.g.
:func:`evoked.add_proj <mne.Evoked.add_proj>` or
:func:`evoked.set_eeg_reference <mne.Evoked.set_eeg_reference>`
to add them afterward as necessary.
Parameters
----------
fwd : Forward
a forward solution.
stc : SourceEstimate object
The source time courses.
info : dict
Measurement info to generate the evoked.
cov : Covariance object
The noise covariance.
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) ).
tmin : float | None
start of time interval to estimate SNR. If None first time point
is used.
tmax : float | None
start of time interval to estimate SNR. If None last time point
is used.
iir_filter : None | array
IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
random_state : None | int | np.random.RandomState
To specify the random generator state.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
evoked : Evoked object
The simulated evoked data
See Also
--------
simulate_raw
simulate_stc
simulate_sparse_stc
Notes
-----
.. versionadded:: 0.10.0
"""
evoked = apply_forward(fwd, stc, info)
if snr < np.inf:
noise = simulate_noise_evoked(evoked, cov, iir_filter, random_state)
evoked_noise = add_noise_evoked(evoked, noise, snr, tmin=tmin,
tmax=tmax)
else:
evoked_noise = evoked
return evoked_noise
def simulate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
"""Create noise as a multivariate Gaussian.
The spatial covariance of the noise is given from the cov matrix.
Parameters
----------
evoked : evoked object
an instance of evoked used as template
cov : Covariance object
The noise covariance
iir_filter : None | array
IIR filter coefficients (denominator)
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
noise : evoked object
an instance of evoked
Notes
-----
.. versionadded:: 0.10.0
"""
noise = evoked.copy()
noise.data = _generate_noise(evoked.info, cov, iir_filter, random_state,
evoked.data.shape[1])[0]
return noise
def _generate_noise(info, cov, iir_filter, random_state, n_samples, zi=None):
"""Create spatially colored and temporally IIR-filtered noise."""
from scipy.signal import lfilter
noise_cov = pick_channels_cov(cov, include=info['ch_names'], exclude=[])
if set(info['ch_names']) != set(noise_cov.ch_names):
raise ValueError('Evoked and covariance channel names are not '
'identical. Cannot generate the noise matrix. '
'Channels missing in covariance %s.' %
np.setdiff1d(info['ch_names'], noise_cov.ch_names))
rng = check_random_state(random_state)
c = np.diag(noise_cov.data) if noise_cov['diag'] else noise_cov.data
mu_channels = np.zeros(len(c))
# we almost always get a positive semidefinite warning here, so squash it
with warnings.catch_warnings(record=True):
noise = rng.multivariate_normal(mu_channels, c, n_samples).T
if iir_filter is not None:
if zi is None:
zi = np.zeros((len(c), len(iir_filter) - 1))
noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi)
else:
zf = None
return noise, zf
def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
"""Add noise to evoked object with specified SNR.
SNR is computed in the interval from tmin to tmax.
Parameters
----------
evoked : Evoked object
An instance of evoked with signal
noise : Evoked object
An instance of evoked with noise
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) )
tmin : float
start time before event
tmax : float
end time after event
Returns
-------
evoked_noise : Evoked object
An instance of evoked corrupted by noise
"""
evoked = copy.deepcopy(evoked)
tmask = _time_mask(evoked.times, tmin, tmax, sfreq=evoked.info['sfreq'])
tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
np.mean((noise.data ** 2).ravel()))
noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
evoked.data += noise.data
return evoked
| bsd-3-clause | -6,964,635,638,887,342,000 | 32.345455 | 77 | 0.613413 | false |
mfrasca/UNAS-unit_testing | test/testglobals.py | 1 | 2135 | # -*- coding: utf-8 -*-
#
# copyright by its authors
#
# This file is part of the didactic code used at the UNAS
#
# UNAS didactic code is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# UNAS didactic code is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with UNAS didactic code. If not, see <http://www.gnu.org/licenses/>.
# este archivo contiene las pruebas unitarias para las funciones globales
# definidas en los módulos.
import unittest
#from unittest import SkipTest
from unas.globfunc import mergevalues
class GlobalTests(unittest.TestCase):
def test_mergevalues_equal(self):
'if the values are equal, return it'
self.assertEquals(mergevalues(1, 1), '1')
self.assertEquals(mergevalues('1', '1'), '1')
self.assertEquals(mergevalues('1', 1), '1')
self.assertEquals(mergevalues(1, '1'), '1')
def test_mergevalues_conflict(self):
'if they conflict, return both'
self.assertEquals(mergevalues(25, 48), '25|48')
self.assertEquals(mergevalues(25, 48, "%s::%s"), '25::48')
def test_mergevalues_one_empty(self):
'if one is empty, return the non empty one'
self.assertEquals(mergevalues('', 1), '1')
self.assertEquals(mergevalues(1, ''), '1')
self.assertEquals(mergevalues('33', ''), '33')
def test_mergevalues_both_empty(self):
'if both are empty, return the empty string'
self.assertEquals(mergevalues('', ''), '')
def test_mergevalues_none_as_empty_string(self):
'the function does not distinguish between None and the empty string'
self.assertEquals(mergevalues('', None), '')
self.assertEquals(mergevalues(None, ''), '')
| gpl-3.0 | -8,720,695,417,989,454,000 | 34.566667 | 77 | 0.686504 | false |
okrt/horspool-string-matching | main.py | 1 | 7710 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created: Wed Apr 08 10:31:45 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(647, 735)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setStyleSheet(_fromUtf8(""))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.teMetin = QtGui.QTextEdit(self.centralwidget)
self.teMetin.setGeometry(QtCore.QRect(10, 30, 381, 151))
self.teMetin.setObjectName(_fromUtf8("teMetin"))
self.lePattern = QtGui.QLineEdit(self.centralwidget)
self.lePattern.setGeometry(QtCore.QRect(10, 210, 381, 20))
self.lePattern.setObjectName(_fromUtf8("lePattern"))
self.teAdimlar = QtGui.QTextEdit(self.centralwidget)
self.teAdimlar.setGeometry(QtCore.QRect(10, 310, 621, 151))
self.teAdimlar.setReadOnly(True)
self.teAdimlar.setObjectName(_fromUtf8("teAdimlar"))
self.btnBul = QtGui.QPushButton(self.centralwidget)
self.btnBul.setGeometry(QtCore.QRect(10, 240, 611, 41))
self.btnBul.setObjectName(_fromUtf8("btnBul"))
self.btnDosya = QtGui.QPushButton(self.centralwidget)
self.btnDosya.setGeometry(QtCore.QRect(220, 0, 121, 23))
self.btnDosya.setObjectName(_fromUtf8("btnDosya"))
self.lblMetin = QtGui.QLabel(self.centralwidget)
self.lblMetin.setGeometry(QtCore.QRect(10, 10, 121, 16))
self.lblMetin.setObjectName(_fromUtf8("lblMetin"))
self.lblPattern = QtGui.QLabel(self.centralwidget)
self.lblPattern.setGeometry(QtCore.QRect(10, 190, 121, 16))
self.lblPattern.setObjectName(_fromUtf8("lblPattern"))
self.lblAdimlar = QtGui.QLabel(self.centralwidget)
self.lblAdimlar.setGeometry(QtCore.QRect(10, 290, 121, 16))
self.lblAdimlar.setObjectName(_fromUtf8("lblAdimlar"))
self.lblHakkinda = QtGui.QLabel(self.centralwidget)
self.lblHakkinda.setGeometry(QtCore.QRect(440, 480, 181, 71))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblHakkinda.sizePolicy().hasHeightForWidth())
self.lblHakkinda.setSizePolicy(sizePolicy)
self.lblHakkinda.setObjectName(_fromUtf8("lblHakkinda"))
self.lblPozisyon = QtGui.QLabel(self.centralwidget)
self.lblPozisyon.setGeometry(QtCore.QRect(410, 10, 101, 20))
self.lblPozisyon.setObjectName(_fromUtf8("lblPozisyon"))
self.lblDurum = QtGui.QLabel(self.centralwidget)
self.lblDurum.setGeometry(QtCore.QRect(10, 560, 531, 20))
self.lblDurum.setObjectName(_fromUtf8("lblDurum"))
self.lblIslem = QtGui.QLabel(self.centralwidget)
self.lblIslem.setGeometry(QtCore.QRect(10, 590, 621, 71))
self.lblIslem.setAutoFillBackground(False)
self.lblIslem.setStyleSheet(_fromUtf8("background-color:rgb(255, 255, 255)"))
self.lblIslem.setTextFormat(QtCore.Qt.RichText)
self.lblIslem.setScaledContents(False)
self.lblIslem.setObjectName(_fromUtf8("lblIslem"))
self.cbAdimlar = QtGui.QCheckBox(self.centralwidget)
self.cbAdimlar.setGeometry(QtCore.QRect(20, 510, 401, 21))
self.cbAdimlar.setObjectName(_fromUtf8("cbAdimlar"))
self.cbThDurumAl = QtGui.QCheckBox(self.centralwidget)
self.cbThDurumAl.setGeometry(QtCore.QRect(20, 490, 421, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbThDurumAl.sizePolicy().hasHeightForWidth())
self.cbThDurumAl.setSizePolicy(sizePolicy)
self.cbThDurumAl.setChecked(True)
self.cbThDurumAl.setTristate(False)
self.cbThDurumAl.setObjectName(_fromUtf8("cbThDurumAl"))
self.lblDurum_2 = QtGui.QLabel(self.centralwidget)
self.lblDurum_2.setGeometry(QtCore.QRect(10, 470, 531, 20))
self.lblDurum_2.setObjectName(_fromUtf8("lblDurum_2"))
self.lvKonumlar = QtGui.QListView(self.centralwidget)
self.lvKonumlar.setGeometry(QtCore.QRect(410, 30, 211, 201))
self.lvKonumlar.setObjectName(_fromUtf8("lvKonumlar"))
self.cbRenklendir = QtGui.QCheckBox(self.centralwidget)
self.cbRenklendir.setGeometry(QtCore.QRect(20, 530, 391, 21))
self.cbRenklendir.setObjectName(_fromUtf8("cbRenklendir"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 670, 601, 16))
self.label.setObjectName(_fromUtf8("label"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 647, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Horspool String Matching @NKU", None))
self.btnBul.setText(_translate("MainWindow", "Find", None))
self.btnDosya.setText(_translate("MainWindow", "Import File", None))
self.lblMetin.setText(_translate("MainWindow", "Text:", None))
self.lblPattern.setText(_translate("MainWindow", "Pattern:", None))
self.lblAdimlar.setText(_translate("MainWindow", "Steps:", None))
self.lblHakkinda.setText(_translate("MainWindow", "<html><head/><body><p>Ferhat Yeşiltarla</p><p>Gökmen Güreşçi</p><p>Oğuz Kırat</p></body></html>", None))
self.lblPozisyon.setText(_translate("MainWindow", "Positions Found", None))
self.lblDurum.setText(_translate("MainWindow", "Status", None))
self.lblIslem.setText(_translate("MainWindow", "Ready", None))
self.cbAdimlar.setText(_translate("MainWindow", "Show steps (Not recommended on long texts)", None))
self.cbThDurumAl.setText(_translate("MainWindow", "Get info from string matching thread while processing.", None))
self.lblDurum_2.setText(_translate("MainWindow", "Options", None))
self.cbRenklendir.setText(_translate("MainWindow", "Colorize patterns found. (Not recommended on long texts)", None))
self.label.setText(_translate("MainWindow", "Quickly developed for \"Pattern Matching in Texts\" course assignment @ nku.edu.tr", None))
| gpl-2.0 | -656,853,989,499,909,800 | 55.226277 | 163 | 0.704401 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/it/ddown.py | 1 | 2923 | import re
import requests
import xbmc,xbmcaddon,time
import urllib
from ..common import get_rd_domains, filter_host,send_log,error_log
from ..scraper import Scraper
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
s = requests.session()
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class ddown(Scraper):
domains = ['https://directdownload.tv/']
name = "Direct Download"
sources = []
def __init__(self):
self.base_link = 'https://directdownload.tv/'
self.sources = []
if dev_log=='true':
self.start_time = time.time()
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
if not debrid:
return []
season_url = "0%s"%season if len(season)<2 else season
episode_url = "0%s"%episode if len(episode)<2 else episode
start_url = 'https://directdownload.tv/api?key=4B0BB862F24C8A29&qualities/disk-480p,disk-1080p-x265,tv-480p,tv-720p,web-480p,web-720p,web-1080p,web-1080p-x265,movie-480p-x265,movie-1080p-x265&limit=50&keyword=%s+s%se%s' %(title.lower(),season_url,episode_url)
start_url=start_url.replace(' ','%20')
#SEND2LOG(start_url)
content = requests.get(start_url).content
#print 'content >> ' +content
links=re.compile('"http(.+?)"',re.DOTALL).findall(content)
count = 0
for url in links:
url = 'http' + url.replace('\/', '/')
if '720p' in url:
res = '720p'
elif '1080p' in url:
res = '1080p'
else:
res='480p'
host = url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
rd_domains = get_rd_domains()
if host in rd_domains:
if 'k2s.cc' not in url:
count +=1
self.sources.append({'source': host,'quality': res,'scraper': self.name,'url': url,'direct': False, 'debridonly': True})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
# def resolve(self, url):
# return url
def SEND2LOG(Txt):
print ':::::::::::::::::::::::::::::::::::::::::::::::::'
print ':'
print ': LOG string: ' + (str(Txt))
print ':'
print ':::::::::::::::::::::::::::::::::::::::::::::::::'
return | gpl-2.0 | -5,524,192,560,469,390,000 | 36.974026 | 271 | 0.509408 | false |
R2pChyou/starcheat | starcheat/assets/techs.py | 1 | 1741 | import os
import logging
from io import BytesIO
from PIL import Image
class Techs():
def __init__(self, assets):
self.assets = assets
self.starbound_folder = assets.starbound_folder
def is_tech(self, key):
return key.endswith(".tech")
def index_data(self, asset):
key = asset[0]
path = asset[1]
offset = asset[2]
length = asset[3]
name = os.path.basename(asset[0]).split(".")[0]
asset_data = self.assets.read(key, path, False, offset, length)
if asset_data is None:
return
# TODO: Switch over to new tech system
# item = self.assets.read(asset[0]+"item", asset[1])
# if item is None or "itemName" not in item:
# logging.warning("No techitem for %s in %s" % asset[0], asset[1])
# return
return (key, path, offset, length, "tech", "", name, "")
def all(self):
"""Return a list of all techs."""
c = self.assets.db.cursor()
c.execute("select desc from assets where type = 'tech' order by desc")
return [x[0] for x in c.fetchall()]
def get_tech(self, name):
q = "select key, path from assets where type = 'tech' and (name = ? or desc = ?)"
c = self.assets.db.cursor()
c.execute(q, (name, name))
tech = c.fetchone()
if tech is None:
return
asset = self.assets.read(tech[0], tech[1])
info = self.assets.read(tech[0]+"item", tech[1])
icon = self.assets.read(info["inventoryIcon"], tech[1], image=True)
if icon is None:
icon = self.assets.items().missing_icon()
return info, Image.open(BytesIO(icon)).convert("RGBA"), tech[0], asset
| mit | 2,986,662,716,974,814,000 | 30.089286 | 89 | 0.563469 | false |
h-hwang/octodns | tests/helpers.py | 1 | 1328 | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from shutil import rmtree
from tempfile import mkdtemp
class SimpleSource(object):
def __init__(self, id='test'):
pass
class SimpleProvider(object):
SUPPORTS_GEO = False
SUPPORTS = set(('A',))
def __init__(self, id='test'):
pass
def populate(self, zone, source=False, lenient=False):
pass
def supports(self, record):
return True
def __repr__(self):
return self.__class__.__name__
class GeoProvider(object):
SUPPORTS_GEO = True
def __init__(self, id='test'):
pass
def populate(self, zone, source=False, lenient=False):
pass
def supports(self, record):
return True
def __repr__(self):
return self.__class__.__name__
class NoSshFpProvider(SimpleProvider):
def supports(self, record):
return record._type != 'SSHFP'
class TemporaryDirectory(object):
def __init__(self, delete_on_exit=True):
self.delete_on_exit = delete_on_exit
def __enter__(self):
self.dirname = mkdtemp()
return self
def __exit__(self, *args, **kwargs):
if self.delete_on_exit:
rmtree(self.dirname)
else:
raise Exception(self.dirname)
| mit | -6,261,348,014,156,640,000 | 17.971429 | 67 | 0.593373 | false |
jonasjberg/autonameow | autonameow/util/text/regexbatch.py | 1 | 3842 | # -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <[email protected]>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
import re
from collections import defaultdict
def replace(regex_replacement_tuples, strng, ignore_case=False):
assert isinstance(strng, str)
if not strng:
return strng
re_flags = 0
if ignore_case:
re_flags |= re.IGNORECASE
matches = list()
for regex, replacement in regex_replacement_tuples:
match = re.search(regex, strng, re_flags)
if match:
matches.append((regex, replacement))
sorted_by_longest_replacement = sorted(
matches, key=lambda x: len(x[1]), reverse=True
)
for regex, replacement in sorted_by_longest_replacement:
strng = re.sub(regex, replacement, strng, flags=re_flags)
return strng
def find_longest_match(regexes, strng, ignore_case=False):
"""
Searches a string with a list of regular expressions for the longest match.
NOTE: Does not handle groups!
Args:
regexes: List or set of regular expressions as Unicode strings or
compiled regular expressions.
strng (str): The string to search.
ignore_case: Whether to ignore letter case.
Returns:
The longest match found when searching the string with all given
regular expressions, as a Unicode string.
"""
assert isinstance(strng, str)
if not strng:
return None
re_flags = 0
if ignore_case:
re_flags |= re.IGNORECASE
matches = list()
for regex in regexes:
matches.extend(re.findall(regex, strng, re_flags))
if matches:
sorted_by_longest_match = sorted(
matches, key=lambda x: len(x), reverse=True
)
return sorted_by_longest_match[0]
return None
def find_replacement_value(value_regexes, strng, flags=0):
"""
Returns a value associated with one or more regular expressions.
The value whose associated regular expressions produced the longest total
substring match is returned.
NOTE: Do not pass 'flags' ff the regular expressions are already compiled.
Args:
value_regexes (dict): Dictionary keyed by any values, each storing
lists/tuples of regular expression patterns.
strng (str): The text to search.
flags: Regular expression flags applied to all regular expressions.
Returns:
The "best" matched key in the "value_regexes" dict, or None.
"""
assert isinstance(strng, str)
if not strng:
return strng
# Use canonical form with longest total length of matched substrings.
value_match_lengths = defaultdict(int)
for value, regexes in value_regexes.items():
for regex in regexes:
matches = re.finditer(regex, strng, flags)
for match in matches:
value_match_lengths[value] += len(match.group(0))
if value_match_lengths:
value_associated_with_longest_match = max(value_match_lengths,
key=value_match_lengths.get)
return value_associated_with_longest_match
return None
| gpl-2.0 | -8,631,422,745,035,632,000 | 30.227642 | 79 | 0.659984 | false |
gios-asu/text-geolocator | docs/source/conf.py | 1 | 9841 | # -*- coding: utf-8 -*-
#
# Text-Geolocator documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 2 18:40:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../geolocator'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Text-Geolocator'
copyright = (u'2015, Adam McNabb, Weston Neal, Samantha Juntiff, '
'Christopher Silvia, Jack Workman, Jang Won')
author = (u'Adam McNabb, Weston Neal, Samantha Juntiff, Christopher Silvia, '
'Jack Workman, Jang Won')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Text-Geolocatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Text-Geolocator.tex', u'Text-Geolocator Documentation',
u'Adam McNabb, Weston Neal, Samantha Juntiff, Christopher Silvia, '
'Jack Workman, Jang Won', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'text-geolocator', u'Text-Geolocator Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Text-Geolocator', u'Text-Geolocator Documentation',
author, 'Text-Geolocator', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit | -798,293,896,022,486,400 | 32.13468 | 79 | 0.700335 | false |
geoserver/wps-remote | src/wpsremote/ConfigParser.py | 1 | 28188 | # (c) 2016 Open Source Geospatial Foundation - all rights reserved
# (c) 2014 - 2015 Centre for Maritime Research and Experimentation (CMRE)
# (c) 2013 - 2014 German Aerospace Center (DLR)
# This code is licensed under the GPL 2.0 license, available at the root
# application directory.
import re
import UserDict as _UserDict
__author__ = "Alessio Fabiani"
__copyright__ = "Copyright 2016 Open Source Geospatial Foundation - all rights reserved"
__license__ = "GPL"
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section.lower() == "default":
raise ValueError('Invalid section name: %s' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError('Not a boolean: %s' % v)
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section] or
option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)
| gpl-2.0 | 5,430,769,433,577,599,000 | 35.512953 | 100 | 0.547573 | false |
wsmith323/frozenordereddict | frozenordereddict/__init__.py | 1 | 1193 |
from collections import Mapping
try:
reduce
except NameError:
from functools import reduce
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import operator
import os
with open(os.path.join(os.path.dirname(__file__), 'VERSION.txt')) as f:
__version__ = f.read().strip()
class FrozenOrderedDict(Mapping):
"""
Frozen OrderedDict.
"""
def __init__(self, *args, **kwargs):
self.__dict = OrderedDict(*args, **kwargs)
self.__hash = None
def __getitem__(self, item):
return self.__dict[item]
def __iter__(self):
return iter(self.__dict)
def __len__(self):
return len(self.__dict)
def __hash__(self):
if self.__hash is None:
self.__hash = reduce(operator.xor, map(hash, self.__dict.items()), 0)
return self.__hash
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.__dict.items())
def copy(self, *args, **kwargs):
new_dict = self.__dict.copy()
if args or kwargs:
new_dict.update(OrderedDict(*args, **kwargs))
return self.__class__(new_dict)
| mit | -6,693,519,827,597,702,000 | 21.092593 | 81 | 0.582565 | false |
EDITD/jsond | jsond/tests/test_simple.py | 1 | 2499 | import datetime
import json
import unittest
import jsond
import six
class TestSimple(unittest.TestCase):
def test_same_as_json(self):
"""If no date objects exist, then we should be the same as json.
"""
to_encode = {
"a": 1,
"b": 2,
"c": 3
}
self.assertEqual(
json.dumps(to_encode),
jsond.dumps(to_encode)
)
def test_process_date(self):
"""Test that we can process a date.
"""
relevant_date = datetime.datetime(2011, 3, 15, 0, 0, 0)
# We should be able to encode this structure.
#
to_encode = {
"some_value": 345,
"my_list": [1, 2, 3, 4],
"relevant_date": relevant_date
}
encoded = jsond.dumps(to_encode)
# And get the same date back when we decode.
#
decoded = jsond.loads(encoded)
self.assertEqual(decoded["relevant_date"], relevant_date)
def test_safe_for_json(self):
"""When we jsond.dumps something, it should be safe to decode in json.
"""
relevant_date = datetime.datetime(2011, 3, 15, 0, 0, 0)
to_encode = {
"relevant_date": relevant_date
}
encoded = jsond.dumps(to_encode)
# jsond should encode this such that it is safe to decode in json.
# The only difference is that json will treat the date as a string.
#
json_decoded = json.loads(encoded)
self.assertIsInstance(json_decoded["relevant_date"], six.string_types)
self.assertEqual(
json_decoded["relevant_date"],
"datetime:2011-03-15T00:00:00"
)
def test_json_serialisable_object(self):
"""Ensure that we can prepare an object to be json serialised (despite
it having `datetime` values!) and vice-versa.
"""
dangerous_object = {"date": datetime.datetime(2011, 3, 15),
"name": "some string key"}
with self.assertRaises(TypeError):
# We can't json serialise this!
json.dumps(dangerous_object)
safe_object = jsond.to_json_serialisable_object(dangerous_object)
# But now we can jsond serialise it.
json.dumps(safe_object)
# And we should be able to get the original object back.
final_object = jsond.from_json_serialisable_object(safe_object)
self.assertEqual(dangerous_object, final_object)
| mit | 1,675,455,936,935,370,200 | 30.2375 | 78 | 0.57383 | false |
opinkerfi/okconfig | tests/test_group.py | 1 | 1924 | # !/usr/bin/env python
"""Test adding objects"""
from __future__ import absolute_import
import os.path
import sys
# Make sure we import from working tree
okconfig_base = os.path.dirname(os.path.realpath(__file__ + "/.."))
sys.path.insert(0, okconfig_base)
import unittest2 as unittest
import okconfig
from pynag import Model
import tests
class Group(tests.OKConfigTest):
"""Template additions tests"""
def setUp(self):
super(Group, self).setUp()
okconfig.addhost("www.okconfig.org")
okconfig.addhost("okconfig.org")
okconfig.addhost("aliased.okconfig.org",
address="192.168.1.1",
group_name="testgroup")
def test_basic(self):
"""Add a group"""
okconfig.addgroup("testgroup1")
contacts = Model.Contactgroup.objects.filter(
contactgroup_name='testgroup1'
)
self.assertEqual(1, len(contacts), 'There can be only one')
hostgroups = Model.Hostgroup.objects.filter(
hostgroup_name='testgroup1'
)
self.assertEqual(1, len(hostgroups), 'There can be only one')
def test_alias(self):
"""Add a group with an alias"""
okconfig.addgroup("testgroup1", alias="the first testgroup")
contacts = Model.Contactgroup.objects.filter(
contactgroup_name='testgroup1',
alias='the first testgroup')
self.assertEqual(1, len(contacts))
def test_conflict(self):
"""Test adding a conflicting group"""
okconfig.addgroup("testgroup1")
self.assertRaises(okconfig.OKConfigError,
okconfig.addgroup,
"testgroup1")
def test_force(self):
"""Test force adding a group"""
okconfig.addgroup("testgroup1")
okconfig.addgroup("testgroup1", force=True)
if __name__ == "__main__":
unittest.main() | gpl-3.0 | 6,050,673,122,055,601,000 | 26.5 | 69 | 0.608628 | false |
alfa-jor/addon | plugin.video.alfa/channels/canalpelis.py | 1 | 19469 | # -*- coding: utf-8 -*-
# -*- Channel CanalPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse, base64
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "canalpelis"
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
host = "https://cinexin.net/"
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas", action="peliculas", thumbnail=get_thumb('movies', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="generos", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Año de Estreno", action="year_release", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host + 'peliculas/',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
thumbnail=get_thumb('tvshows', auto=True), page=0))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
return itemlist
#color en base al rating (evaluacion)
def color_rating(rating):
try:
rating_f = float(rating)
if rating_f < 5: color = "tomato"
elif rating_f >= 7: color = "palegreen"
else: color = "grey"
except:
color = "grey"
return color
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>", "", data)
# logger.info(data)
patron = '<div class="thumbnail animation-2"><a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)" alt="([^"]+)" />.*?' # img and title
patron += '<span class="([^"]+)".*?' # tipo
patron += '<span class="rating">IMDb (.*?)</span>' #rating
patron += '<span class="year">([^<]+)</span>' # year
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, rating, year in matches[item.page:item.page + 30]:
#para tomar la imagen completa
scrapedthumbnail = scrapedthumbnail.replace("-150x150", "")
title = scrapedtitle
if not config.get_setting('unify'):
#rating con color(evaluacion)
rcolor = color_rating(rating)
title += " [COLOR blue](%s)[/COLOR] [COLOR %s](%s)[/COLOR] [COLOR yellow][%s][/COLOR]" % (
year, rcolor, rating, quality)
new_item = item.clone(title=title, url=scrapedurl, page=0,
infoLabels={"year": year},
thumbnail=scrapedthumbnail)
#diferencia series y peliculas
if tipo != "movies":
new_item.action = "temporadas"
new_item.contentSerieName = scrapedtitle
if not config.get_setting('unify'):
new_item.title += " [COLOR khaki](Serie)[/COLOR]"
else:
new_item.action = "findvideos"
new_item.contentTitle=scrapedtitle
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle,
action=action, infoLabels={"year": year},
thumbnail=scrapedthumbnail, page=0))
#busquedas que coinciden con genero arrojan cientos de resultados
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30, action="sub_search",
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
data, '<a class=\'arrow_pag\' href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »"))
tmdb.set_infoLabels(itemlist)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'peliculas/'
elif categoria == 'infantiles':
item.url = host + "genero/animacion/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
action = "findvideos"
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
patron = 'class="item movies">.*?<img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip() movies
patron += '<span class="icon-star2"></span> (.*?)</div>.*?' # rating
patron += '<span class="quality">([^<]+)</span>.*?' # calidad
patron += '<a href="([^"]+)"><div class="see"></div>.*?' # url
patron += '<span>(\d+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
#para tomar la imagen completa
scrapedthumbnail = scrapedthumbnail.replace("-185x278", "")
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = scrapedtitle
if not config.get_setting('unify'):
#rating con color(evaluacion)
rcolor = color_rating(rating)
title += " [COLOR blue](%s)[/COLOR] [COLOR %s](%s)[/COLOR] [COLOR yellow][%s][/COLOR]" % (
year, rcolor, rating, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos",
url=scrapedurl, infoLabels={'year': year},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer", quality=quality))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »"))
else:
next_page = scrapertools.find_single_match(
data, "<span class=\"current\">\d+</span><a href='([^']+)'")
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »"))
for item in itemlist:
if item.infoLabels['plot'] == '':
datas = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
item.fanart = scrapertools.find_single_match(
datas, "<meta property='og:image' content='([^']+)' />")
item.fanart = item.fanart.replace('w780', 'original')
item.plot = scrapertools.find_single_match(datas, '</h4><p>(.*?)</p>')
item.plot = scrapertools.htmlclean(item.plot)
item.infoLabels['director'] = scrapertools.find_single_match(
datas, '<div class="name"><a href="[^"]+">([^<]+)</a>')
item.infoLabels['genre'] = scrapertools.find_single_match(
datas, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
if cantidad != '0':# and not '♦' in scrapedtitle:
title = "%s (%s)" % (scrapedtitle, cantidad)
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title, page=0,
url=scrapedurl, viewmode="movie_with_plot"))
return itemlist
def year_release(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
# logger.info(data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, viewmode="movie_with_plot", extra='next'))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
patron = '<div class="poster">.*?<img src="([^"]+)" alt="([^"]+)">'
patron += '.*?<a href="([^"]+)">.*?IMDb: (.*?)</span>\s*'
patron += '<span>(\d+)</span>.*?<div class="texto">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, stitle, url, rating, year, plot in matches[item.page:item.page + 30]:
if plot == '':
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
stitle = stitle.strip()
thumbnail = thumbnail.replace("-185x278", "")
filter_list = {"first_air_date": year}
filter_list = filter_list.items()
rcolor = color_rating(rating)
title = stitle
if not config.get_setting('unify'):
title += " [COLOR blue](%s)[/COLOR] [COLOR %s](%s)[/COLOR]" % (year, rcolor, rating)
itemlist.append(item.clone(title=title, url=url, action="temporadas",
contentSerieName=stitle, plot=plot,
thumbnail=thumbnail, contentType='tvshow',
infoLabels={'filtro': filter_list}))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »"))
else:
next_page = scrapertools.find_single_match(
data, '<link rel="next" href="([^"]+)" />')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »"))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
patron = "<span class='title'>([^<]+)<i>.*?" # numeros de temporadas
patron += "<img src='([^']+)'>" # capitulos
# logger.info(datas)
matches = scrapertools.find_multiple_matches(datas, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
scrapedseason = " ".join(scrapedseason.split())
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if not config.get_setting('unify'):
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadirselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
patron = "<div class='imagen'>.*?"
patron += "<div class='numerando'>(.*?)</div>.*?"
patron += "<a href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(datas, patron)
for scrapedtitle, scrapedurl, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos",
contentTitle=title, contentType="episode")
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if not config.get_setting('unify'):
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName,
thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
patron = "data-post='(\d+)' data-nume='(\d+)'.*?img src='([^']+)'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
lang = lang.lower().strip()
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]',
'es': '[COLOR green](CAST)[/COLOR]',
'en': '[COLOR red](VOSE)[/COLOR]',
'gb': '[COLOR red](VOSE)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
else:
lang = idioma['en']
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type': 'movie'}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer': item.url}).data
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
b64_url = scrapertools.find_single_match(url, "y=(.*?)&")
if b64_url:
url = base64.b64decode(b64_url)
if url != '':
itemlist.append(
Item(channel=item.channel, action='play', language=lang, infoLabels=item.infoLabels,
url=url, title='Ver en: ' + '[COLOR yellowgreen]%s [/COLOR]' + lang))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
itemlist.sort(key=lambda it: it.language, reverse=False)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist
| gpl-3.0 | 7,756,523,842,567,727,000 | 39.012658 | 119 | 0.547171 | false |
TaliesinSkye/evennia | wintersoasis-master/objects/examples/object.py | 1 | 7393 | """
Template for Objects
Copy this module up one level and name it as you like, then
use it as a template to create your own Objects.
To make the default commands default to creating objects of your new
type (and also change the "fallback" object used when typeclass
creation fails), change settings.BASE_OBJECT_TYPECLASS to point to
your new class, e.g.
settings.BASE_OBJECT_TYPECLASS = "game.gamesrc.objects.myobj.MyObj"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Object
class ExampleObject(Object):
"""
This is the root typeclass object, implementing an in-game Evennia
game object, such as having a location, being able to be
manipulated or looked at, etc. If you create a new typeclass, it
must always inherit from this object (or any of the other objects
in this file, since they all actually inherit from BaseObject, as
seen in src.object.objects).
The BaseObject class implements several hooks tying into the game
engine. By re-implementing these hooks you can control the
system. You should never need to re-implement special Python
methods, such as __init__ and especially never __getattribute__ and
__setattr__ since these are used heavily by the typeclass system
of Evennia and messing with them might well break things for you.
* Base properties defined/available on all Objects
key (string) - name of object
name (string)- same as key
aliases (list of strings) - aliases to the object. Will be saved to database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
dbobj (Object, read-only) - link to database model. dbobj.typeclass points back to this class
typeclass (Object, read-only) - this links back to this class as an identified only. Use self.swap_typeclass() to switch.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
player (Player) - controlling player (will also return offline player)
location (Object) - current location. Is None if this is a room
home (Object) - safety start-location
sessions (list of Sessions, read-only) - returns all sessions connected to this object
has_player (bool, read-only)- will only return *connected* players
contents (list of Objects, read-only) - returns all objects inside this object (including exits)
exits (list of Objects, read-only) - returns all exits from this object, if any
destination (Object) - only set if this object is an exit.
is_superuser (bool, read-only) - True/False if this user is a superuser
* Handlers available
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods (see src.objects.objects.py for full headers)
search(ostring, global_search=False, attribute_name=None, use_nicks=False, location=None, ignore_errors=False, player=False)
execute_cmd(raw_string)
msg(message, from_obj=None, data=None)
msg_contents(message, exclude=None, from_obj=None, data=None)
move_to(destination, quiet=False, emit_to_obj=None, use_destination=True)
copy(new_key=None)
delete()
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hooks (these are class methods, so their arguments should also start with self):
basetype_setup() - only called once, used for behind-the-scenes setup. Normally not modified.
basetype_posthook_setup() - customization in basetype, after the object has been created; Normally not modified.
at_object_creation() - only called once, when object is first created. Object customizations go here.
at_object_delete() - called just before deleting an object. If returning False, deletion is aborted. Note that all objects
inside a deleted object are automatically moved to their <home>, they don't need to be removed here.
at_init() - called whenever typeclass is cached from memory, at least once every server restart/reload
at_cmdset_get() - this is called just before the command handler requests a cmdset from this object
at_first_login() - (player-controlled objects only) called once, the very first time user logs in.
at_pre_login() - (player-controlled objects only) called every time the user connects, after they have identified, before other setup
at_post_login() - (player-controlled objects only) called at the end of login, just before setting the player loose in the world.
at_disconnect() - (player-controlled objects only) called just before the user disconnects (or goes linkless)
at_server_reload() - called before server is reloaded
at_server_shutdown() - called just before server is fully shut down
at_before_move(destination) - called just before moving object to the destination. If returns False, move is cancelled.
announce_move_from(destination) - called in old location, just before move, if obj.move_to() has quiet=False
announce_move_to(source_location) - called in new location, just after move, if obj.move_to() has quiet=False
at_after_move(source_location) - always called after a move has been successfully performed.
at_object_leave(obj, target_location) - called when an object leaves this object in any fashion
at_object_receive(obj, source_location) - called when this object receives another object
at_before_traverse(traversing_object) - (exit-objects only) called just before an object traverses this object
at_after_traverse(traversing_object, source_location) - (exit-objects only) called just after a traversal has happened.
at_failed_traverse(traversing_object) - (exit-objects only) called if traversal fails and property err_traverse is not defined.
at_msg_receive(self, msg, from_obj=None, data=None) - called when a message (via self.msg()) is sent to this obj.
If returns false, aborts send.
at_msg_send(self, msg, to_obj=None, data=None) - called when this objects sends a message to someone via self.msg().
return_appearance(looker) - describes this object. Used by "look" command by default
at_desc(looker=None) - called by 'look' whenever the appearance is requested.
at_get(getter) - called after object has been picked up. Does not stop pickup.
at_drop(dropper) - called when this object has been dropped.
at_say(speaker, message) - by default, called if an object inside this object speaks
"""
pass
| bsd-3-clause | 5,246,995,477,435,459,000 | 59.105691 | 144 | 0.714324 | false |
pyarchinit/pipinstaller-qgis-experimental | pipinstaller/resources_rc.py | 1 | 6549 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: ven 7. mar 16:51:28 2014
# by: The Resource Compiler for PyQt (Qt v4.7.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x05\x23\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x03\x07\x0f\x31\x2c\xc1\x4e\xb9\xfa\x00\x00\x04\xb0\x49\x44\
\x41\x54\x48\xc7\xa5\x95\x59\x6c\x55\x55\x14\x86\xbf\x7d\xce\xb9\
\x53\x6f\x7b\x6f\x07\x2a\x1d\x28\x60\x27\x0a\x96\x16\xd1\x80\x65\
\x1e\xa2\x31\xe9\x03\xe0\x83\x9a\x18\x83\x91\x48\xc4\x60\x02\x09\
\x0f\xd5\xf0\x50\xc1\x80\xe1\x81\x08\x09\x89\xf8\x60\x24\x21\x51\
\x13\x13\x8d\x86\x80\x31\x10\x81\x52\x8a\x32\x16\x9a\x42\x62\x41\
\x4a\xb1\x33\x1d\xee\x74\xee\x19\xf6\xf6\xe1\xb6\xa5\x57\x28\x6a\
\x5c\xc9\x4e\x76\xce\x59\xf9\xf6\x5a\xff\x5a\x6b\x6f\x01\x30\xdc\
\x5a\x5f\x0b\x34\x02\xeb\xf9\xff\xf6\x3d\xd0\x98\x5d\x73\xec\x9a\
\x18\x03\xff\x02\x64\x4f\xe5\x2d\xa5\xc2\xb2\x25\x01\x9f\x81\xab\
\xc0\x75\x1d\x3c\x86\xfe\xa4\x03\x86\x81\x55\xda\x58\xc4\x53\x82\
\x95\x52\xc4\xe2\x0e\xfd\xdd\x26\xce\x88\x17\x5b\xe6\x32\x32\xea\
\xa2\x94\x9a\x58\x8f\xb1\x6c\xa0\xd1\x00\xd6\x4c\x7c\xa9\x39\xf6\
\x88\x57\xeb\x89\xd5\x48\x1b\x8e\x5e\xe9\x62\x49\x8d\x9f\x93\x6d\
\x37\xd9\xe0\xcf\x21\x37\x3b\xcc\xae\x83\xb7\xf8\xf4\x8b\x8e\x47\
\xc3\x6e\xad\x07\x58\xaf\x01\xa1\xa9\xa2\xbe\x72\x6c\x15\x05\xf9\
\x7e\x7a\xfb\x92\xb4\xb4\x0d\xf2\xca\xd5\x13\x1c\x6e\x6f\xe7\xfe\
\x1f\x49\x84\x10\x7c\xb4\xbd\x8a\x1d\x9b\xcb\xa7\xd4\x46\x7b\x12\
\x78\x46\x41\x80\x48\xd4\xa1\x6b\x30\xc1\x90\x63\x43\x06\xc8\x20\
\x74\xc7\x13\xb8\x52\xa2\x14\xec\xdc\x3a\x87\x86\x2d\x15\xff\x1e\
\x7e\xf1\x87\x95\x3c\x5d\x12\x44\x29\x70\x5c\x49\x6f\x6f\x92\xe1\
\x4e\x0b\x24\xe8\x09\x41\x5f\xb7\x85\x40\x00\x20\x04\x7c\xf0\x5e\
\x25\x3b\xb7\x56\xfe\x33\xbc\xe5\xbb\x15\x94\xcf\xce\x04\xc0\x95\
\x0a\x29\x15\xa3\xfd\x16\x91\x3b\x16\x24\xc0\xee\x92\x5c\xbb\x3a\
\x82\x02\x74\x4d\xa0\x14\xa0\x60\xc7\xe6\x0a\x1a\xb7\x55\xa5\xb1\
\x8c\xf1\x4d\xc2\x74\x39\xf7\xed\x72\xaa\xca\xb2\xc6\xba\x04\x6c\
\x47\x72\xa3\x2d\xc2\xe5\xfb\xa3\x24\x5f\xd7\x20\x20\x09\xce\x84\
\x15\xae\x8f\xee\xee\x24\xc5\x45\x3e\x0c\x43\x43\xca\x54\xc7\x6c\
\xdf\x54\x86\xa1\x8b\x09\xb8\x18\x6e\xad\x57\x49\xcb\xc5\xe7\xd5\
\xd3\xfa\x3a\x96\x70\xf9\xb5\xe5\x01\xdf\x9c\xe8\xe2\xf8\xbc\x7e\
\x22\xb9\x0e\x38\x90\x31\x04\xd3\x6e\xc3\x4b\x2b\x67\xb3\x21\x30\
\x9d\xba\xa5\x79\x18\x9e\x87\x07\xc0\x58\x2a\x68\x18\xb6\x2d\xd3\
\xc0\x4a\x29\x6c\x47\x72\xe1\xea\x03\xbe\xfe\xb1\x93\x9f\x16\x0e\
\x12\x09\xa7\xc0\x00\x56\x08\xa2\x73\x75\xce\x98\xfd\x64\xb9\x1a\
\x79\x37\x7d\x54\xce\xcd\x44\xd3\x00\x65\x23\x8c\x5c\x40\x22\xb4\
\x00\x86\xc7\xf3\x50\x76\x01\x38\xae\xa2\xa7\x3f\xc9\xe9\xf3\x03\
\xd0\x19\x23\xb2\xe2\x21\x18\x40\xea\x60\xf9\x21\x21\x25\xc7\x65\
\x1f\xe1\x61\x0f\xd5\x46\x26\xd2\x53\x02\xca\x41\x78\xa6\x03\x0a\
\x21\xb4\xf4\x82\xba\x52\x61\x5a\x92\xbb\xf7\x13\xdc\xba\x14\xa1\
\x7d\xba\x35\xa9\x2a\x63\x99\xb9\x20\x1d\x85\x9d\x94\x68\xa6\xa0\
\xa3\x23\xca\x85\x36\x0f\xde\x40\x01\xc2\x93\x3f\x21\x8b\x52\xee\
\xdf\xe0\xae\xc2\x34\x5d\x4e\x9d\xea\xa3\xd9\x3f\x48\x5b\xbd\x95\
\x8a\x7a\xf2\x84\x5b\xe0\xc4\x15\xc9\xb8\x24\x6a\x39\x74\x06\x15\
\xd7\xee\x99\x18\xba\x3b\x49\xf3\xb4\x56\x54\x13\xf0\xa1\x51\x07\
\xc7\x91\xe8\x36\xe0\x01\x92\x63\x2b\x90\xd2\x4d\x1f\x01\x37\xae\
\x70\x87\x14\x66\x1c\xcc\x1e\xb8\x7e\xa9\x97\x48\xd4\x62\xf2\x35\
\x23\xad\xee\xf1\xa4\x45\xea\x12\x4a\xed\xa8\x59\x10\xa6\xea\x62\
\x88\xcb\x7b\x46\x28\x34\x0a\xf1\x28\xe8\x89\x76\x53\x12\xf6\x23\
\x07\x1c\xee\xe9\x0e\x72\x9a\x8e\x77\x95\x87\xb9\x66\x2e\xef\x6e\
\x5c\x48\x38\xe4\x23\x16\xb7\xc7\xc0\x3d\x28\x7b\x00\xbd\x61\x4b\
\x65\x63\x6a\xd2\x04\x7f\xf6\x98\x64\x06\x0d\x10\x82\xea\x9a\x30\
\xa3\x43\x8a\xfc\xca\xc5\x4c\x9b\x5d\x42\x69\x41\x84\x57\x37\x94\
\x70\x7b\xc4\x64\xdd\xa6\x6d\xcc\x2b\x5f\x80\xdb\x76\x87\xf7\xdf\
\xae\xe3\x85\xc5\x33\x48\x98\xa9\xaa\xef\xde\x7f\x86\xe5\xcf\xca\
\x47\x27\x74\xcd\x1b\xe7\x18\x8d\xd8\x14\x17\xf8\x09\x67\x19\xd8\
\xc9\x24\xcb\x96\x2e\x62\x49\xdd\x73\x04\x0c\x97\x67\x2a\x82\x84\
\xb3\x3c\xf4\xdd\xed\x66\x74\x78\x88\xba\xe7\x66\xb2\x76\x6d\x29\
\x66\x32\x05\xde\x73\xa0\x89\x7d\x87\x9a\xd3\x35\xd7\x83\xf3\xd1\
\x83\xf3\x19\x1c\xb2\x78\xf1\xcd\x66\x7a\xfb\x93\xe4\x84\xbc\x94\
\xce\xca\xe1\xd6\x8d\xdf\xb9\x7e\xa9\x8d\xea\xaa\x6c\xc2\xd9\xf9\
\x78\xfd\x79\xcc\xa9\x28\x67\x46\x51\x31\x52\x2a\xcc\xb8\x8d\x10\
\x82\x7d\x87\x9a\xd9\x7b\xb0\x89\x71\x1e\x80\x36\xbe\x19\xb7\xa1\
\x11\x9b\x75\xef\x5c\xe0\xf6\xbd\x18\x33\x8b\x34\x54\xf4\x37\x16\
\x55\x76\x52\x5b\x3d\x13\x4f\xa0\x90\x97\xd7\x94\xf2\xe5\xe1\x03\
\x9c\xfd\xf9\x2b\xde\x7a\xad\x16\x25\x15\xfb\x3f\x6b\x61\xf7\xfe\
\x33\x69\x1c\x3d\x38\x1f\x11\xe9\xf8\x30\x06\x32\x03\x20\xab\xec\
\x93\x89\x9f\x01\xbf\xce\xa1\x5d\xb5\x14\xe4\x7b\x79\x2a\xcf\xc7\
\xac\x8a\xe7\x19\x18\x4c\xd0\xd3\x17\x25\x91\xb0\x29\x29\x0a\x53\
\x5c\x98\xc5\xe7\x47\x2f\xd3\xf0\xf1\xc9\x34\x70\xa4\xa3\x21\xd5\
\x26\xb1\xbb\x7b\xcf\x4a\x67\x64\xd9\xe3\xdf\x4e\x97\xe8\x83\x76\
\xfc\x3e\x81\x91\x51\x89\xe5\x08\x62\x71\x0b\xc3\xd0\xc8\x08\x78\
\xf1\x18\xda\xd4\x0f\x85\x11\x6e\xd2\x8c\x50\xdd\x11\x21\xf4\xe8\
\x63\x1d\x34\x9d\x50\xde\x3c\xbc\x99\x73\xd0\x0d\x3f\x7e\x9f\x41\
\x5e\x4e\x80\xac\xa0\x0f\x8f\x21\xa6\x04\x0b\xa1\x47\x8d\x50\xdd\
\x11\xa1\x94\x5a\x6d\x0d\x9f\x2e\x73\x46\xcf\x6f\x9c\x2a\x83\xff\
\x62\x9a\x11\x6e\x32\x42\x75\x47\xbc\xd9\x2b\x3b\xfe\x02\xd3\x9f\
\x1f\x05\x53\xff\xf1\x77\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0c\
\x04\x48\xd2\x62\
\x00\x70\
\x00\x69\x00\x70\x00\x69\x00\x6e\x00\x73\x00\x74\x00\x61\x00\x6c\x00\x6c\x00\x65\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x32\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-3.0 | 3,940,596,658,675,446,000 | 50.164063 | 96 | 0.723164 | false |
NTUTVisualScript/Visual_Script | GeometrA/src/ADB/robot.py | 1 | 1526 | class Robot:
def key_press(self, keycode):
raise NotImplementedError("Subclasses should implement this!")
def key_release(self, keycode):
raise NotImplementedError("Subclasses should implement this!")
def send_keys(self, keys):
raise NotImplementedError("Subclasses should implement this!")
def drag_and_drop(self, start_x, start_y, end_x, end_y, duration=None):
raise NotImplementedError("Subclasses should implement this!")
def capture_screen(self):
raise NotImplementedError("Subclasses should implement this!")
def tap(self, x, y, duration):
raise NotImplementedError("Subclasses should implement this!")
def swipe(self, start_x, start_y, end_x, end_y, duration):
raise NotImplementedError("Subclasses should implement this!")
def pinch(self, x, y, w, h, percent, steps):
"""Pinch on an element a certain amount
:Args:
- x, y, w, h - the rect to pinch
- percent - (optional) amount to pinch.
- steps - (optional) number of steps in the pinch action
"""
raise NotImplementedError("Subclasses should implement this!")
def zoom(self, x, y, w, h, percent, steps):
"""Zoom on an element a certain amount
:Args:
- x, y, w, h - the rect to zoom
- percent - (optional) amount to zoom.
- steps - (optional) number of steps in the zoom action
"""
raise NotImplementedError("Subclasses should implement this!")
| mit | 8,194,011,252,543,002,000 | 34.488372 | 75 | 0.640236 | false |
dennishuo/dataproc-initialization-actions | kafka/test_kafka.py | 1 | 1443 | import os
import unittest
from parameterized import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class KafkaTestCase(DataprocTestCase):
COMPONENT = 'kafka'
INIT_ACTIONS = ['kafka/kafka.sh']
TEST_SCRIPT_FILE_NAME = 'validate.sh'
def verify_instance(self, name):
self.upload_test_file(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME), name)
self.__run_test_script(name)
self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name)
def __run_test_script(self, name):
self.assert_instance_command(
name, "bash {}".format(self.TEST_SCRIPT_FILE_NAME))
@parameterized.expand(
[
("HA", "1.2", ["m-0", "m-1", "m-2"]),
("HA", "1.3", ["m-0", "m-1", "m-2"]),
],
testcase_func_name=DataprocTestCase.generate_verbose_test_name)
def test_kafka(self, configuration, dataproc_version, machine_suffixes):
self.createCluster(configuration,
self.INIT_ACTIONS,
dataproc_version,
machine_type="n1-standard-2")
for machine_suffix in machine_suffixes:
self.verify_instance("{}-{}".format(self.getClusterName(),
machine_suffix))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,321,424,766,428,474,400 | 33.357143 | 76 | 0.568954 | false |
rafaelvieiras/PseudoTV_Live | plugin.video.pseudotv.live/resources/lib/parsers/MKVParser.py | 1 | 5919 | # Copyright (C) 2020 Jason Anderson, Lunatixz
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
from resources.lib.globals import *
class MKVParser:
def determineLength(self, filename):
log("MKVParser: determineLength " + filename)
try:
self.File = xbmcvfs.File(filename, "r")
except:
log("MKVParser: Unable to open the file")
log(traceback.format_exc(), xbmc.LOGERROR)
return
size = self.findHeader()
if size == 0:
log('MKVParser: Unable to find the segment info')
dur = 0
else:
dur = self.parseHeader(size)
log("MKVParser: Duration is " + str(dur))
return dur
def parseHeader(self, size):
duration = 0
timecode = 0
fileend = self.File.tell() + size
datasize = 1
data = 1
while self.File.tell() < fileend and datasize > 0 and data > 0:
data = self.getEBMLId()
datasize = self.getDataSize()
if data == 0x2ad7b1:
timecode = 0
try:
for x in range(datasize):
timecode = (timecode << 8) + struct.unpack('B', self.getData(1))[0]
except:
timecode = 0
if duration != 0 and timecode != 0:
break
elif data == 0x4489:
try:
if datasize == 4:
duration = int(struct.unpack('>f', self.getData(datasize))[0])
else:
duration = int(struct.unpack('>d', self.getData(datasize))[0])
except:
log("MKVParser: Error getting duration in header, size is " + str(datasize))
duration = 0
if timecode != 0 and duration != 0:
break
else:
try:
self.File.seek(datasize, 1)
except:
log('MKVParser: Error while seeking')
return 0
if duration > 0 and timecode > 0:
dur = (duration * timecode) / 1000000000
return dur
return 0
def findHeader(self):
log("MKVParser: findHeader")
filesize = self.getFileSize()
if filesize == 0:
log("MKVParser: Empty file")
return 0
data = self.getEBMLId()
# Check for 1A 45 DF A3
if data != 0x1A45DFA3:
log("MKVParser: Not a proper MKV")
return 0
datasize = self.getDataSize()
try:
self.File.seek(datasize, 1)
except:
log('MKVParser: Error while seeking')
return 0
data = self.getEBMLId()
# Look for the segment header
while data != 0x18538067 and self.File.tell() < filesize and data > 0 and datasize > 0:
datasize = self.getDataSize()
try:
self.File.seek(datasize, 1)
except:
log('MKVParser: Error while seeking')
return 0
data = self.getEBMLId()
datasize = self.getDataSize()
data = self.getEBMLId()
# Find segment info
while data != 0x1549A966 and self.File.tell() < filesize and data > 0 and datasize > 0:
datasize = self.getDataSize()
try:
self.File.seek(datasize, 1)
except:
log('MKVParser: Error while seeking')
return 0
data = self.getEBMLId()
datasize = self.getDataSize()
if self.File.tell() < filesize:
return datasize
return 0
def getFileSize(self):
size = 0
try:
pos = self.File.tell()
self.File.seek(0, 2)
size = self.File.tell()
self.File.seek(pos, 0)
except:
pass
return size
def getData(self, datasize):
data = self.File.readBytes(datasize)
return data
def getDataSize(self):
data = self.File.readBytes(1)
try:
firstbyte = struct.unpack('>B', data)[0]
datasize = firstbyte
mask = 0xFFFF
for i in range(8):
if datasize >> (7 - i) == 1:
mask = mask ^ (1 << (7 - i))
break
datasize = datasize & mask
if firstbyte >> 7 != 1:
for i in range(1, 8):
datasize = (datasize << 8) + struct.unpack('>B', self.File.readBytes(1))[0]
if firstbyte >> (7 - i) == 1:
break
except:
datasize = 0
return datasize
def getEBMLId(self):
data = self.File.readBytes(1)
try:
firstbyte = struct.unpack('>B', data)[0]
ID = firstbyte
if firstbyte >> 7 != 1:
for i in range(1, 4):
ID = (ID << 8) + struct.unpack('>B', self.File.readBytes(1))[0]
if firstbyte >> (7 - i) == 1:
break
except:
ID = 0
return ID
| gpl-3.0 | 41,527,159,163,878,820 | 27.320574 | 96 | 0.497043 | false |
koreyou/word_embedding_loader | setup.py | 1 | 3965 | import os
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.sdist import sdist as _sdist
cython_modules = [
["word_embedding_loader", "loader", "word2vec_bin"],
["word_embedding_loader", "saver", "word2vec_bin"]
]
def _cythonize(extensions, apply_cythonize):
import numpy
import six
ext = '.pyx' if apply_cythonize else '.cpp'
extensions = [
Extension(
'.'.join(mod), ['/'.join(mod) + ext],
language="c++"
) for mod in extensions
]
for i in six.moves.xrange(len(extensions)):
extensions[i].include_dirs.append(numpy.get_include())
# Add signiture for Sphinx
extensions[i].cython_directives = {"embedsignature": True}
if apply_cythonize:
from Cython.Build import cythonize
extensions = cythonize(extensions)
return extensions
class sdist(_sdist):
def run(self):
# Force cythonize for sdist
_cythonize(cython_modules, True)
_sdist.run(self)
class lazy_cythonize(list):
# Adopted from https://stackoverflow.com/a/26698408/7820599
def _cythonize(self):
self._list = _cythonize(self._list, self._apply_cythonize)
self._is_cythonized = True
def __init__(self, extensions, apply_cythonize=False):
super(lazy_cythonize, self).__init__()
self._list = extensions
self._apply_cythonize = apply_cythonize
self._is_cythonized = False
def c_list(self):
if not self._is_cythonized:
self._cythonize()
return self._list
def __iter__(self):
for e in self.c_list():
yield e
def __getitem__(self, ii):
return self.c_list()[ii]
def __len__(self):
return len(self.c_list())
try:
with open('README.rst') as f:
readme = f.read()
except IOError:
readme = ''
name = 'WordEmbeddingLoader'
exec(open('word_embedding_loader/_version.py').read())
release = __version__
version = '.'.join(release.split('.')[:2])
setup(
name=name,
author='Yuta Koreeda',
author_email='[email protected]',
maintainer='Yuta Koreeda',
maintainer_email='[email protected]',
version=release,
description='Loaders and savers for different implentations of word embedding.',
long_description=readme,
url='https://github.com/koreyou/word_embedding_loader',
packages=['word_embedding_loader',
'word_embedding_loader.loader',
'word_embedding_loader.saver'
],
ext_modules=lazy_cythonize(
cython_modules,
os.environ.get('DEVELOP_WE', os.environ.get('READTHEDOCS')) is not None
),
license='MIT',
cmdclass = {'sdist': sdist},
install_requires=[
'Click',
'numpy>=1.10',
'six'
],
entry_points = {
'console_scripts': ['word-embedding-loader=word_embedding_loader.cli:cli'],
},
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'release': ('setup.py', release)}},
setup_requires = ['Cython',
'numpy>=1.10',
'six'
],
classifiers=[
"Environment :: Console",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Cython",
"Topic :: Documentation :: Sphinx",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis"
]
)
| mit | 8,672,868,639,906,950,000 | 28.589552 | 84 | 0.590668 | false |
umeboshi2/vignewton | vignewton/views/admin/sitetext.py | 1 | 7950 | from cStringIO import StringIO
from datetime import datetime
import transaction
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.security import authenticated_userid
from pyramid.renderers import render
from pyramid.response import Response
from trumpet.models.sitecontent import SiteText
from trumpet.resources import MemoryTmpStore
from trumpet.managers.admin.images import ImageManager
from trumpet.views.base import NotFound
from trumpet.views.menus import BaseMenu
from vignewton.views.base import AdminViewer, make_main_menu
from vignewton.managers.wiki import WikiArchiver
import colander
import deform
tmpstore = MemoryTmpStore()
def prepare_main_data(request):
layout = request.layout_manager.layout
menu = layout.ctx_menu
imgroute = 'admin_images'
url = request.route_url(imgroute, context='list', id=None)
menu.append_new_entry('List Images', url)
url = request.route_url(imgroute, context='add', id=None)
menu.append_new_entry('Add Image', url)
main_menu = make_main_menu(request)
layout.title = 'Manage Images'
layout.header = 'Manage Images'
layout.main_menu = main_menu.render()
layout.ctx_menu = menu
class EditSiteTextSchema(colander.Schema):
name = colander.SchemaNode(
colander.String(),
title='Name')
content = colander.SchemaNode(
colander.String(),
title='Content',
widget=deform.widget.TextAreaWidget(rows=10, cols=60))
class SiteTextViewer(AdminViewer):
def __init__(self, request):
super(SiteTextViewer, self).__init__(request)
#prepare_main_data(self.request)
self.images = ImageManager(self.request.db)
self._dispatch_table = dict(
list=self.list_site_text,
add=self.create_site_text,
delete=self.main,
confirmdelete=self.main,
viewentry=self.view_site_text,
editentry=self.edit_site_text,
create=self.create_site_text,
download_wiki_archive=self.download_wiki_archive,)
self.context = self.request.matchdict['context']
self._view = self.context
self.dispatch()
def _set_menu(self):
menu = self.layout.ctx_menu
menu.set_header('Site Text Menu')
url = self.url(context='list', id='all')
menu.append_new_entry('List Entries', url)
url = self.url(context='create', id='new')
menu.append_new_entry('Create New Entry', url)
url = self.url(context='download_wiki_archive', id='all')
menu.append_new_entry('Download Wiki Archive', url)
def main(self):
self._set_menu()
content = '<h1>Here is where we manage site text.</h1>'
self.layout.content = content
def manage_site_text(self):
self._set_menu()
action = None
if 'action' in self.request.GET:
action = self.request.GET['action']
return self._manage_site_text_action_map[action]()
def view_site_text(self):
self._set_menu()
id = int(self.request.matchdict['id'])
self.layout.footer = str(type(id))
entry = self.request.db.query(SiteText).get(id)
self.layout.subheader = entry.name
self.layout.content = '<pre width="80">%s</pre>' % entry.content
def list_site_text(self):
self._set_menu()
template = 'vignewton:templates/list-site-text.mako'
entries = self.request.db.query(SiteText).all()
env = dict(viewer=self, entries=entries)
self.layout.content = self.render(template, env)
def list_site_text_orig(self):
self._set_menu()
content = '<h1>Here is where we <b>list</b> site text.</h1>'
self.layout.content = content
anchors = []
edit_anchors = []
entries = self.request.db.query(SiteText).all()
for entry in entries:
getdata = dict(action='viewentry', id=entry.id)
href = self.url(context='viewentry', id=entry.id)
anchors.append('<a href="%s">%s</a>' % (href, entry.name))
getdata['action'] = 'editentry'
href = self.url(context='editentry', id=entry.id)
edit_anchors.append('<a href="%s">edit</a>' % href)
list_items = []
for index in range(len(anchors)):
list_item = '<li>%s(%s)</li>'
list_item = list_item % (anchors[index], edit_anchors[index])
list_items.append(list_item)
ul = '<ul>%s</ul>' % '\n'.join(list_items)
self.layout.content = ul
def _edit_site_text_form(self):
schema = EditSiteTextSchema()
submit_button = deform.form.Button(name='submit_site_text',
title='Update Content')
form = deform.Form(schema, buttons=(submit_button,))
self.layout.resources.deform_auto_need(form)
return form
def _validate_site_text(self, form, create=False):
controls = self.request.POST.items()
try:
data = form.validate(controls)
except deform.ValidationFailure, e:
self.layout.content = e.render()
return {}
if create:
db = self.request.db
query = db.query(SiteText).filter_by(name=data['name'])
rows = query.all()
if rows:
h1 = '<h1>Site Text "%s" already exists.</h1>'
h1 = h1 % data['name']
self.layout.content = h1 + form.render(data)
return {}
else:
self.layout.subheader = str(rows)
return data
def _submit_site_text(self, form, data={}):
rendered = form.render(data)
if 'submit_site_text' in self.request.params:
if not self._validate_site_text(form):
return
else:
self.layout.content = rendered
self.layout.subheader = 'Please edit content'
def create_site_text(self):
self._set_menu()
form = self._edit_site_text_form()
# check submission
if 'submit_site_text' in self.request.params:
valid = self._validate_site_text(form, create=True)
if not valid:
return
transaction.begin()
entry = SiteText(valid['name'], valid['content'])
self.request.db.add(entry)
transaction.commit()
self.layout.content = 'Submitted for approval.'
else:
self.layout.content = form.render()
self.layout.subheader = 'Please edit content'
def edit_site_text(self):
self._set_menu()
form = self._edit_site_text_form()
rendered = form.render()
id = int(self.request.matchdict['id'])
entry = self.request.db.query(SiteText).get(id)
data = dict(name=entry.name, content=entry.content)
if 'submit_site_text' in self.request.params:
valid = self._validate_site_text(form)
if not valid:
return
transaction.begin()
entry.content = valid['content']
self.request.db.add(entry)
transaction.commit()
self.layout.content = 'Submitted for approval.'
else:
self.layout.content = form.render(data)
self.layout.subheader = 'Please edit content'
def download_wiki_archive(self):
self._set_menu()
archiver = WikiArchiver(self.request.db)
archiver.create_new_zipfile()
archive = archiver.archive_pages()
content_type = 'application/zip'
r = Response(content_type=content_type, body=archive)
r.content_disposition = 'attachment; filename="tutwiki-archive.zip"'
self.response = r
| unlicense | -3,346,914,358,330,187,300 | 33.415584 | 76 | 0.588176 | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/golf/GolfRewardDialog.py | 1 | 13295 | from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from math import *
from direct.distributed.ClockDelta import *
from toontown.golf import GolfGlobals
from toontown.shtiker.GolfPage import GolfTrophy
class GolfRewardDialog:
notify = directNotify.newCategory('GolfRewardDialog')
def __init__(self, avIdList, trophyList, rankingsList, holeBestList, courseBestList, cupList, localAvId, tieBreakWinner, aimTimesList, endMovieCallback = None):
self.avIdList = avIdList
self.trophyList = trophyList
self.rankingsList = rankingsList
self.holeBestList = holeBestList
self.courseBestList = courseBestList
self.cupList = cupList
self.tieBreakWinner = tieBreakWinner
self.movie = None
self.myPlace = 0
self.victory = None
self.endMovieCallback = endMovieCallback
self.aimTimesList = aimTimesList
self.setup(localAvId)
return
def calcTrophyTextListForOnePlayer(self, avId):
retval = []
av = base.cr.doId2do.get(avId)
if av and avId in self.avIdList:
playerIndex = self.avIdList.index(avId)
name = av.getName()
for trophyIndex in xrange(len(self.trophyList[playerIndex])):
wonTrophy = self.trophyList[playerIndex][trophyIndex]
if wonTrophy:
trophyName = TTLocalizer.GolfTrophyDescriptions[trophyIndex]
text = TTLocalizer.GolfAvReceivesTrophy % {'name': name,
'award': trophyName}
retval.append(text)
return retval
def calcCupTextListForAllPlayers(self, localAvId):
retval = []
for cupPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cupPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[cupPlayerIndex])
name = ''
if av:
name = av.getName()
cupIndex = 0
for cupIndex in xrange(len(self.cupList[cupPlayerIndex])):
if self.cupList[cupPlayerIndex][cupIndex]:
cupName = TTLocalizer.GolfCupDescriptions[cupIndex]
text = TTLocalizer.GolfAvReceivesCup % {'name': name,
'cup': cupName}
retval.append(text)
for cupPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cupPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[cupPlayerIndex])
name = av.getName()
cupIndex = 0
for cupIndex in xrange(len(self.cupList[cupPlayerIndex])):
if self.cupList[cupPlayerIndex][cupIndex]:
cupName = TTLocalizer.GolfCupDescriptions[cupIndex]
text = TTLocalizer.GolfAvReceivesCup % {'name': name,
'cup': cupName}
retval.append(text)
return retval
def calcRankings(self, localAvId):
retval = []
self.notify.debug('aimTimesList=%s' % self.aimTimesList)
for rank in xrange(len(self.rankingsList) + 1):
for avIndex in xrange(len(self.avIdList)):
if self.rankingsList[avIndex] == rank:
name = ' '
av = base.cr.doId2do.get(self.avIdList[avIndex])
if av:
name = av.getName()
text = '%d. ' % rank + ' ' + name
if GolfGlobals.TIME_TIE_BREAKER:
time = self.aimTimesList[avIndex]
minutes = int(time / 60)
time -= minutes * 60
seconds = int(time)
padding = (seconds < 10 and ['0'] or [''])[0]
time -= seconds
fraction = str(time)[2:4]
fraction = fraction + '0' * (2 - len(fraction))
timeStr = "%d'%s%d''%s" % (minutes,
padding,
seconds,
fraction)
text += ' - ' + timeStr
retval.append(text)
if self.avIdList[avIndex] == localAvId:
self.myPlace = rank
return retval
def calcHoleBestTextListForAllPlayers(self, localAvId):
retval = []
if GolfGlobals.CalcOtherHoleBest:
for hbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[hbPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[hbPlayerIndex])
name = av.getName()
for hbIndex in xrange(len(self.holeBestList[hbPlayerIndex])):
if self.holeBestList[hbPlayerIndex][hbIndex]:
hbName = TTLocalizer.GolfHoleNames[hbIndex]
text = TTLocalizer.GolfAvReceivesHoleBest % {'name': name,
'hole': hbName}
retval.append(text)
for hbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[hbPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[hbPlayerIndex])
name = av.getName()
for hbIndex in xrange(len(self.holeBestList[hbPlayerIndex])):
if self.holeBestList[hbPlayerIndex][hbIndex]:
hbName = TTLocalizer.GolfHoleNames[hbIndex]
text = TTLocalizer.GolfAvReceivesHoleBest % {'name': name,
'hole': hbName}
retval.append(text)
return retval
def calcCourseBestTextListForAllPlayers(self, localAvId):
retval = []
if GolfGlobals.CalcOtherCourseBest:
for cbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cbPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[cbPlayerIndex])
name = av.getName()
for cbIndex in xrange(len(self.holeBestList[cbPlayerIndex])):
if self.holeBestList[cbPlayerIndex][cbIndex]:
cbName = TTLocalizer.GolfCourseNames[cbIndex]
text = TTLocalizer.GolfAvReceivesCourseBest % {'name': name,
'course': cbName}
retval.append(text)
for cbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cbPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[cbPlayerIndex])
name = av.getName()
for cbIndex in xrange(len(self.courseBestList[cbPlayerIndex])):
if self.courseBestList[cbPlayerIndex][cbIndex]:
cbName = TTLocalizer.GolfCourseNames[cbIndex]
text = TTLocalizer.GolfAvReceivesCourseBest % {'name': name,
'course': cbName}
retval.append(text)
return retval
def createRewardMovie(self, localAvId):
retval = Sequence(name='Reward sequence', autoPause=1)
self.trophy = None
def setTrophyLabelText(text, playerIndex, trophyIndex):
self.rankLabel.hide()
self.rewardLabel.hide()
self.trophy = GolfTrophy(level=self.trophyList[playerIndex][trophyIndex], parent=self.trophyLabel, pos=(1.3, 0, -0.25))
self.trophy.setScale(0.65, 1, 0.65)
self.trophy.show()
self.trophyLabel['text'] = text
def setRewardLabelText(text):
self.rewardLabel.show()
self.rankLabel.hide()
self.trophyLabel.hide()
if self.trophy:
self.trophy.hide()
self.rewardLabel['text'] = text
def setRankLabelText(text):
self.rankLabel.show()
self.rewardLabel.hide()
self.trophyLabel.hide()
if self.trophy:
self.trophy.hide()
self.rankLabel['text'] = text
if len(self.avIdList) > 1:
self.victory = base.loadSfx('phase_6/audio/sfx/KART_Applause_%d.ogg' % self.myPlace)
self.victory.play()
for avId in self.avIdList:
if avId != localAvId:
rewardTextList = self.calcTrophyTextListForOnePlayer(avId)
trophyIndex = 0
for rewardText in rewardTextList:
playerIndex = self.avIdList.index(avId)
var = (rewardText, playerIndex, trophyIndex)
oneTrophyIval = Parallel(Func(setTrophyLabelText, rewardText, playerIndex, trophyIndex), LerpColorScaleInterval(self.trophyLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
trophyIndex = trophyIndex + 1
retval.append(oneTrophyIval)
rewardTextList = self.calcTrophyTextListForOnePlayer(localAvId)
trophyIndex = 0
playerIndex = self.avIdList.index(localAvId)
for rewardText in rewardTextList:
if len(rewardTextList) > 0:
var = (rewardText, playerIndex, trophyIndex)
oneRewardIval = Parallel(Func(setTrophyLabelText, rewardText, playerIndex, trophyIndex), LerpColorScaleInterval(self.trophyLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneRewardIval)
rewardCupList = self.calcCupTextListForAllPlayers(localAvId)
if len(rewardCupList) > 0:
for rewardText in rewardCupList:
oneCupIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='noBlend'))
retval.append(oneCupIval)
if self.tieBreakWinner:
name = ''
av = base.cr.doId2do.get(self.tieBreakWinner)
if av:
name = av.getName()
if GolfGlobals.TIME_TIE_BREAKER:
rewardText = TTLocalizer.GolfTimeTieBreakWinner % {'name': name}
else:
rewardText = TTLocalizer.GolfTieBreakWinner % {'name': name}
randomWinnerIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 7, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='noBlend'))
retval.append(randomWinnerIval)
rankings = self.calcRankings(localAvId)
rankText = TTLocalizer.GolfRanking + '\n'
for rank in xrange(len(rankings)):
rankText = rankText + rankings[rank] + '\n'
oneRankIval = Parallel(Func(setRankLabelText, rankText), LerpColorScaleInterval(self.rankLabel, 8, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneRankIval)
rewardHoleList = self.calcHoleBestTextListForAllPlayers(localAvId)
if len(rewardHoleList) > 0:
for rewardText in rewardHoleList:
oneHoleIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 8, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneHoleIval)
rewardCourseList = self.calcCourseBestTextListForAllPlayers(localAvId)
if len(rewardCourseList) > 0:
for rewardText in rewardCourseList:
oneCourseIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneCourseIval)
if self.endMovieCallback:
retval.append(Func(self.endMovieCallback))
return retval
def setup(self, localAvId):
self.rewardBoard = DirectFrame(parent=aspect2d, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.75, 1, 0.6), pos=(0, 0, -0.6))
self.rewardLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0, 0, 0), text_align=TextNode.ACenter, text='', text_scale=0.05, text_wordwrap=30)
self.rankLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0, 0, 0.17), text_align=TextNode.ACenter, text='', text_scale=0.06)
self.trophyLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0.7, 0, 0.05), text_align=TextNode.ALeft, text='', text_scale=0.06, text_wordwrap=20)
self.movie = self.createRewardMovie(localAvId)
return
def delete(self):
self.movie.pause()
self.notify.debug('Movie is paused')
self.rewardBoard.destroy()
self.notify.debug('Reward board is destroyed')
self.movie = None
self.notify.debug('Movie is deleted')
return
def getMovie(self):
return self.movie
| apache-2.0 | 1,307,104,951,644,556,000 | 48.059041 | 225 | 0.581271 | false |
abreufernandof/NeuralNets | Brain.py | 1 | 24382 | """
Class: Brain.py
Description: This class summarize the steps needed to make a neural network with, actually,
backpropagation algorithm.
Since: 03/11/2017
Author: Fernando Ferreira Abreu ([email protected])
"""
import Neuron
import pandas
import math
import matplotlib
class Brain:
def __init__(self, dataset, dataset_outputs, bias, learning_tax):
""" Method: __init__
Returns: Nothing
Description: This method initialize the brain (how we'll call a neural network) creating the layers
and the neurons inside each one. The brain could have any hidden layers you want but
just one output layer.
Arguments:
dataset Self explanatory. Represents the dataset used with brain. the
dataset should be a list of lists representing a matrix with
lines and columns. The algorithm will run over the lines of dataset
presenting the columns to each neuron. One inside the method, the
dataset will be splited into three parts, a training set with 50% of
original size, a validation set with 25% and a test set with 25%. So,
it's a good practice have a multiple of 4 number of samples in dataset
dataset_outputs Represents the outputs you want to each line of training set. It must
be a list of lists too, but, should have many lines as the training set
and columns as neurons in the output layer
bias Represents the Bias parameter of Neural Networks. The bias (also called
threshold when it's positive is a way to change the shape of a function
in this case, bias will depends on your network, but, it's a good principle
keep it between zero or one. The value could be any real number.
learning_tax It's a value to slow ou fast the learning curve, this value
should be a real number between 0 and 1. You should be advised that
greater values of learning tax makes the network converge faster but
smaller ones could make the network find more precise answers. Think about
big dogs: They run faster than smaller dogs, but this last one's could enter
small holes in the ground, impossible to the firsts.
Example: 1: Creating a Brain with a bias of -0,1 and learning tax of 0.1:
cortex = Brain.Brain(dataset, dataset_outputs, -0.1, 0.1)
"""
# Defining the length of Training, Validation and Test Sets.
self.Training_Set_Size = int(len(dataset) / 2)
self.Validation_Set_Size = int((len(dataset) / 4) * 3)
# Assigning the elements to the Sets.
self.Training_Set = dataset[:self.Training_Set_Size]
self.Validation_Set = dataset[self.Training_Set_Size + 1:self.Validation_Set_Size]
self.Test_Set = dataset[self.Validation_Set_Size + 1:]
# Assigning the elements to the Output Sets
self.Training_Set_Outputs = dataset_outputs[:self.Training_Set_Size]
self.Validation_Set_Outputs = dataset_outputs[self.Training_Set_Size + 1:self.Validation_Set_Size]
self.Test_Set_Outputs = dataset_outputs[self.Validation_Set_Size + 1:]
# Network variables
self.Bias = bias
self.Learning_Tax = learning_tax
# Creating the layers
self.Hidden_Layer = []
self.Output_Layer = []
# Creating the Output Storages
self.Hidden_Layer_Outputs = []
self.Output_Layer_Outputs = []
self.Training_Outputs = [[0 for x in self.Training_Set_Outputs[0]] for y in self.Training_Set_Outputs]
self.Validation_Outputs = [[0 for x in self.Validation_Set_Outputs[0]] for y in self.Validation_Set_Outputs]
self.Test_Outputs = [[0 for x in self.Test_Set_Outputs[0]] for y in self.Test_Set_Outputs]
self.Softmax_Outputs = []
# Creating the Error Storages
self.Hidden_Layer_Errors = []
self.Output_Layer_Errors = []
self.Training_Errors = [0 for x in self.Training_Set]
self.Training_Error_History = []
self.Validation_Errors = [0 for x in self.Validation_Set]
self.Validation_Error_History = []
self.Test_Errors = [0 for x in self.Test_Set]
self.Biggest_Validation_Error = 0
def set_hidden_layers(self, number_of_layers, number_of_neurons, activation_function, bias_proportion):
""" Method: set_hidden_layers
Returns: Nothing
Description: This method is used to set a list with the hidden layers. Officially the
hidden layers are a list of lists.
Arguments: number_of_layers The number of hidden layers you want to make. This
argument should be a integer number greater than 0
number_of_neurons The number of neurons set in each hidden layer. For
now, this number will be the same for all the hidden
layers. In future, maybe you could set different number
for each one. This number should an integer greater
than 0
activation_function Represents the neuron activation function executed after the sum
of inputs. It values should follow the values:
0 - Sigmoid Function
1 - Heavyside Function
2 - Hyperbolic Tangent Function
3 - Piece-Wise Linear Function
4 - Linear Function
5 - Sign Function
bias_proportion Normally, I set the bias progressively from the first
layer. It means that the first layer has an smaller
value of it than the last ones, so, this parameter is
multiplied by the bias set in the __init__ method. It
value should be a float number between 0 and 1 but
not prevents a greater or smaller.
Example: 1: Setting 2 hidden layers with 3 neurons each, heavyside function and 50%
of the brain bias:
cortex.set_hidden_layers(2,3,1,0.5)
"""
# Initializing the Hidden Layers
self.Hidden_Layer = [[Neuron.Neuron(len(self.Training_Set[0]), activation_function, 1, bias_proportion*self.Bias)
for x in range(number_of_neurons)]
for y in range(number_of_layers)]
# Initializing Output for this layers
self.Hidden_Layer_Outputs = [[0 for x in range(number_of_neurons)]
for y in range(number_of_layers)]
# Initializing Errors for this layers
self.Hidden_Layer_Errors = [[0 for x in range(number_of_neurons)]
for y in range(number_of_layers)]
def set_output_layer(self, number_of_neurons, activation_function, bias_proportion):
""" Method: set_output_layers
Returns: Nothing
Description: As the method before, this one is used to make layers, but in this case,
the output ones.
Arguments: number_of_neurons The number of neurons set in each hidden layer. For
now, this number will be the same for all the hidden
layers. In future, maybe you could set different number
for each one. This number should an integer greater
than 0
activation_function Represents the neuron activation function executed after the sum
of inputs. It values should follow the values:
0 - Sigmoid Function
1 - Heavyside Function
2 - Hyperbolic Tangent Function
3 - Piece-Wise Linear Function
4 - Linear Function
5 - Sign Function
bias_proportion Normally, I set the bias progressively from the first
layer. It means that the first layer has an smaller
value of it than the last ones, so, this parameter is
multiplied by the bias set in the __init__ method. It
value should be a float number between 0 and 1 but
not prevents a greater or smaller.
Example: 1: Setting the output layer with 2 neurons, sigmoid activation and 100% of
brain bias:
cortex.set_output_layers(2,0,1)
"""
# Initializing the Output Layer
self.Output_Layer = [Neuron.Neuron(len(self.Hidden_Layer[0]), activation_function, 0, bias_proportion*self.Bias)
for x in range(number_of_neurons)]
# Initializing Output for this layers
self.Output_Layer_Outputs = [0 for x in range(number_of_neurons)]
# Initializing Errors for this layers
self.Output_Layer_Errors = [0 for x in range(number_of_neurons)]
def start_converging(self, epoch_limit, trigger_limit):
""" Method: start_converging
Returns: Nothing
Description: This method sinthetizes training, validating and testing routines, with
some trigger mechanisms to avoid overfitting.
Arguments: epoch_limit How many times you want to run over the training set? This
parameter represents the maximum iterations the algorithm will
perform. It should be an integer greater than 0.
trigger_limit Generally, to avoid overfitting, we should keep training until
the validation set error starts to raise, but, sometimes the error
curve has some flutuations and the raise could not represent a big
problem, because it'll fall again in a moment. Keep this in mind, I
wrote a sample rule to decide if I want to stop or not:
If the actual error is greater than the predecessor: limit+1
If the actual error is smaller than the predecessor: limit-0.5
so, the trigger limit is the value to compare and stops the algorithm,
this way, greater trigger_limits makes more permissive executions and
smaller ones makes more rigid executions. This value should be an integer
greater than 0.
Example: 1: Start a convergence during 1000 epochs and with a trigger limit of 10:
cortex.start_converging(1000, 10)
"""
# Reseting epoch
epoch = 0
# make one line per epoch in the error histories
self.Training_Error_History = [0 for x in range(epoch_limit)]
self.Validation_Error_History = [0 for x in range(epoch_limit)]
limit = 0
while epoch < epoch_limit:
# Start the training, get the error, calculate the average and storage in history.
self.start_training()
self.Training_Error_History[epoch] = -1 * sum(self.Training_Errors) / len(self.Training_Errors)
# Start the validating, get the error, calculate the average and storage in history.
self.start_validating()
self.Validation_Error_History[epoch] = -1 * sum(self.Validation_Errors) / len(self.Validation_Errors)
# If it's not the first execution, let's calculate the raising limit
if epoch > 0:
# The raising limit is equals to the absolute values of Error[n] - Error[n-1], if
# it's positive, limit is added by 1
if abs(self.Validation_Error_History[epoch]) - abs(self.Validation_Error_History[epoch - 1]) > 0:
limit += 1
# and if it's not, limit falls by 0.5. The limit should never go below 0.
else:
limit -= 0.5
if limit < 0: limit = 0
# Let's print the information on screen
print ("Epoch:\t{}\tTraining Error:\t{:0.8f}\tValidating Error:\t{:0.8f}\tDanger Level:\t{:0.8f}"
.format(epoch, self.Training_Error_History[epoch], self.Validation_Error_History[epoch], limit))
# If the limit is greater than trigger_limit, stop training.
if limit >= trigger_limit:
break
epoch += 1
# Printing the answers
print ("Training Desires:\t{}".format(self.Training_Set_Outputs))
print ("Training Outputs:\t{}".format(self.Training_Outputs))
print ("Validating Desires:\t{}".format(self.Validation_Set_Outputs))
print ("Validating Outputs:\t{}".format(self.Validation_Outputs))
# Run over training set and show it's results on screen
self.start_testing()
print ("Testing Error:\t{:0.8f}\t".format(-1 * sum(self.Test_Errors) / len(self.Test_Errors)))
print ("Testing Desires:\t{}".format(self.Test_Set_Outputs))
print ("Testing Outputs:\t{}".format(self.Test_Outputs))
# print ("Softmax Outputs:\t{}".format(self.Softmax_Outputs))
# The results includes pandas graphs
te = pandas.Series(self.Training_Error_History)
ve = pandas.Series(self.Validation_Error_History)
data = pandas.DataFrame({'Training Error': te, 'Validating Error': ve})
data.plot(kind="area", title=["Training Error", "Validating Error"],
grid=True, subplots=True, legend=False, figsize=[10,7])
matplotlib.pyplot.show()
def start_training(self):
""" Method: start_training
Returns: Nothing
Description: Well, this method makes all the black magic. Officially, it runs over the
training set, presenting the lines as inputs to the neurons in the first
hidden layers. From there, each hidden layer output make itself inputs for
the next one. The output layer will receive so the last hidden layer output,
calculate it owns and from there, calculate the error and update the weights.
With the error calculated, we runs backward into the layers updating the weights
for each layer until the first one. the algorithm will stop when reach the
iteration limit or the target_error parameter.
Example: 1: Setting the brain to train:
cortex.start_training()
"""
# Here we're coming inside the training set. If was the n-th time
# you pass here, it's the n-th iteration over the Training Set.
# 'a' represents the Training Set index
self.Training_Errors = [0 for x in self.Training_Set]
for a in range(len(self.Training_Set)):
# Here we're running over the hidden layers
# 'b' represent the layer index
for b in range(len(self.Hidden_Layer)):
# Here we're running over the neurons in the layers
# 'c' represents the neuron index
for c in range(len(self.Hidden_Layer[b])):
if b == 0:
self.Hidden_Layer[b][c].initialize_inputs(self.Training_Set[a])
self.Hidden_Layer[b][c].get_sum()
self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
else:
self.Hidden_Layer[b][c].initialize_inputs(self.Hidden_Layer_Outputs[b-1])
self.Hidden_Layer[b][c].get_sum()
self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
# Here we're running over the output layer
# 'd' represents the neuron index
for d in range(len(self.Output_Layer)):
self.Output_Layer[d].initialize_inputs(self.Hidden_Layer_Outputs[-1])
self.Output_Layer[d].get_sum()
self.Output_Layer_Outputs[d] = self.Output_Layer[d].get_output()
self.Training_Outputs[a][d] = self.Output_Layer_Outputs[d]
self.Output_Layer_Errors[d] = self.Output_Layer[d].get_error(0, self.Training_Set_Outputs[a][d])
#self.Training_Errors[a] += (self.Training_Set_Outputs[a][d] - self.Output_Layer_Outputs[d]) ** 2
self.Training_Errors[a] += (self.Training_Set_Outputs[a][d]*math.log10(self.Output_Layer_Outputs[d])+(1-self.Training_Set_Outputs[a][d])*math.log10(1-self.Output_Layer_Outputs[d]))
self.Output_Layer[d].update_weights(0, self.Learning_Tax)
# We're updating the hidden layers now. Notice that we should pass backwards, from
# last to first, so, we're using [-(e+1)] indexes.
# '[-(e+1)]' represents the layers index.
for e in range(len(self.Hidden_Layer)):
for f in range(len(self.Hidden_Layer[-(e+1)])):
if e == 0:
self.Hidden_Layer_Errors[-(e + 1)][f] = self.Hidden_Layer[-(e + 1)][f].get_error(0, self.Output_Layer_Errors)
self.Hidden_Layer[-(e + 1)][f].update_weights(0, self.Learning_Tax)
else:
self.Hidden_Layer[-(e + 1)][f].get_error(0, self.Hidden_Layer_Errors[-e])
self.Hidden_Layer[-(e + 1)][f].update_weights(0, self.Learning_Tax)
def start_validating(self):
""" Method: start_validating
Returns: Nothing
Description: Well, as the predecessor, this method runs over the validation set,
coming from start_converging method. Different from it instead, the validation set is
read a time once, and the neuron weights will be not updated here.
Example: 1: Setting the brain to validate the training:
cortex.start_validating()
"""
self.Validation_Errors = [0 for x in self.Validation_Set]
for a in range(len(self.Validation_Set)):
# Here we're running over the hidden layers
# 'b' represent the layer index
for b in range(len(self.Hidden_Layer)):
# Here we're running over the neurons in the layers
# 'c' represents the neuron index
for c in range(len(self.Hidden_Layer[b])):
if b == 0:
self.Hidden_Layer[b][c].initialize_inputs(self.Validation_Set[a])
self.Hidden_Layer[b][c].get_sum()
self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
else:
self.Hidden_Layer[b][c].initialize_inputs(self.Hidden_Layer_Outputs[b - 1])
self.Hidden_Layer[b][c].get_sum()
self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
# Here we're running over the output layer
# 'd' represents the neuron index
for d in range(len(self.Output_Layer)):
self.Output_Layer[d].initialize_inputs(self.Hidden_Layer_Outputs[-1])
self.Output_Layer[d].get_sum()
self.Output_Layer_Outputs[d] = self.Output_Layer[d].get_output()
self.Validation_Outputs[a][d] = self.Output_Layer_Outputs[d]
#self.Validation_Errors[a] += (self.Validation_Set_Outputs[a][d] - self.Output_Layer_Outputs[d]) ** 2
self.Validation_Errors[a] += (self.Validation_Set_Outputs[a][d] * math.log10(self.Output_Layer_Outputs[d]) + (1 - self.Validation_Set_Outputs[a][d]) * math.log10(1 - self.Output_Layer_Outputs[d]))
return self.Validation_Errors, self.Validation_Outputs
def start_testing(self):
""" Method: start_testing
Returns: Nothing
Description: Identical to training and validating methods, but, using the testing set instead.
Example: 1: Setting the brain to validate the testing:
cortex.start_testing()
"""
self.Test_Errors = [0 for x in self.Test_Set]
for a in range(len(self.Test_Set)):
# Here we're running over the hidden layers
# 'b' represent the layer index
for b in range(len(self.Hidden_Layer)):
# Here we're running over the neurons in the layers
# 'c' represents the neuron index
for c in range(len(self.Hidden_Layer[b])):
if b == 0:
self.Hidden_Layer[b][c].initialize_inputs(self.Test_Set[a])
self.Hidden_Layer[b][c].get_sum()
self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
else:
self.Hidden_Layer[b][c].initialize_inputs(self.Hidden_Layer_Outputs[b - 1])
self.Hidden_Layer[b][c].get_sum()
self.Hidden_Layer_Outputs[b][c] = self.Hidden_Layer[b][c].get_output()
# Here we're running over the output layer
# 'd' represents the neuron index
for d in range(len(self.Output_Layer)):
self.Output_Layer[d].initialize_inputs(self.Hidden_Layer_Outputs[-1])
self.Output_Layer[d].get_sum()
self.Output_Layer_Outputs[d] = self.Output_Layer[d].get_output()
self.Test_Outputs[a][d] = self.Output_Layer_Outputs[d]
#self.Test_Errors[a] += (self.Test_Set_Outputs[a][d] - self.Output_Layer_Outputs[d]) ** 2
self.Test_Errors[a] += (
self.Test_Set_Outputs[a][d] * math.log10(self.Output_Layer_Outputs[d]) + (
1 - self.Test_Set_Outputs[a][d]) * math.log10(1 - self.Output_Layer_Outputs[d]))
return self.Test_Errors, self.Test_Outputs
| gpl-3.0 | 2,170,896,560,524,211,200 | 57.190931 | 212 | 0.534206 | false |
openstack/oslo.vmware | oslo_vmware/service.py | 1 | 18788 | # Copyright (c) 2014-2020 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common classes that provide access to vSphere services.
"""
import http.client as httplib
import io
import logging
import netaddr
from oslo_utils import timeutils
from oslo_utils import uuidutils
import requests
import suds
from suds import cache
from suds import client
from suds import plugin
import suds.sax.element as element
from suds import transport
from oslo_vmware._i18n import _
from oslo_vmware import exceptions
from oslo_vmware import vim_util
CACHE_TIMEOUT = 60 * 60 # One hour cache timeout
ADDRESS_IN_USE_ERROR = 'Address already in use'
CONN_ABORT_ERROR = 'Software caused connection abort'
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
SERVICE_INSTANCE = 'ServiceInstance'
LOG = logging.getLogger(__name__)
class ServiceMessagePlugin(plugin.MessagePlugin):
"""Suds plug-in handling some special cases while calling VI SDK."""
# list of XML elements which are allowed to be empty
EMPTY_ELEMENTS = ["VirtualMachineEmptyProfileSpec"]
def add_attribute_for_value(self, node):
"""Helper to handle AnyType.
Suds does not handle AnyType properly. But VI SDK requires type
attribute to be set when AnyType is used.
:param node: XML value node
"""
if node.name == 'value' or node.name == 'val':
node.set('xsi:type', 'xsd:string')
# removeKey may be a 'int' or a 'string'
if node.name == 'removeKey':
try:
int(node.text)
node.set('xsi:type', 'xsd:int')
except (ValueError, TypeError):
node.set('xsi:type', 'xsd:string')
def prune(self, el):
pruned = []
for c in el.children:
self.prune(c)
if c.isempty(False) and c.name not in self.EMPTY_ELEMENTS:
pruned.append(c)
for p in pruned:
el.children.remove(p)
def marshalled(self, context):
"""Modifies the envelope document before it is sent.
This method provides the plug-in with the opportunity to prune empty
nodes and fix nodes before sending it to the server.
:param context: send context
"""
# Suds builds the entire request object based on the WSDL schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values; e.g., <test/> as opposed to <test>test</test>.
self.prune(context.envelope)
context.envelope.walk(self.add_attribute_for_value)
class Response(io.BytesIO):
"""Response with an input stream as source."""
def __init__(self, stream, status=200, headers=None):
self.status = status
self.headers = headers or {}
self.reason = requests.status_codes._codes.get(
status, [''])[0].upper().replace('_', ' ')
io.BytesIO.__init__(self, stream)
@property
def _original_response(self):
return self
@property
def msg(self):
return self
def read(self, chunk_size, **kwargs):
return io.BytesIO.read(self, chunk_size)
def info(self):
return self
def get_all(self, name, default):
result = self.headers.get(name)
if not result:
return default
return [result]
def getheaders(self, name):
return self.get_all(name, [])
def release_conn(self):
self.close()
class LocalFileAdapter(requests.adapters.HTTPAdapter):
"""Transport adapter for local files.
See http://stackoverflow.com/a/22989322
"""
def __init__(self, pool_maxsize=10):
super(LocalFileAdapter, self).__init__(pool_connections=pool_maxsize,
pool_maxsize=pool_maxsize)
def _build_response_from_file(self, request):
file_path = request.url[7:]
with open(file_path, 'rb') as f:
file_content = f.read()
buff = bytearray(file_content.decode(), "utf-8")
resp = Response(buff)
return self.build_response(request, resp)
def send(self, request, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Sends request for a local file."""
return self._build_response_from_file(request)
class RequestsTransport(transport.Transport):
def __init__(self, cacert=None, insecure=True, pool_maxsize=10,
connection_timeout=None):
transport.Transport.__init__(self)
# insecure flag is used only if cacert is not
# specified.
self.verify = cacert if cacert else not insecure
self.session = requests.Session()
self.session.mount('file:///',
LocalFileAdapter(pool_maxsize=pool_maxsize))
self.session.mount('https://', requests.adapters.HTTPAdapter(
pool_connections=pool_maxsize, pool_maxsize=pool_maxsize))
self.cookiejar = self.session.cookies
self._connection_timeout = connection_timeout
def open(self, request):
resp = self.session.get(request.url, verify=self.verify)
return io.BytesIO(resp.content)
def send(self, request):
resp = self.session.post(request.url,
data=request.message,
headers=request.headers,
verify=self.verify,
timeout=self._connection_timeout)
return transport.Reply(resp.status_code, resp.headers, resp.content)
class MemoryCache(cache.ObjectCache):
def __init__(self):
self._cache = {}
def get(self, key):
"""Retrieves the value for a key or None."""
now = timeutils.utcnow_ts()
for k in list(self._cache):
(timeout, _value) = self._cache[k]
if timeout and now >= timeout:
del self._cache[k]
return self._cache.get(key, (0, None))[1]
def put(self, key, value, time=CACHE_TIMEOUT):
"""Sets the value for a key."""
timeout = 0
if time != 0:
timeout = timeutils.utcnow_ts() + time
self._cache[key] = (timeout, value)
return True
_CACHE = MemoryCache()
class CompatibilitySudsClient(client.Client):
"""suds client with added cookiejar attribute
The cookiejar properties allow reading/setting the cookiejar used by the
underlying transport.
"""
def __init__(self, *args, **kwargs):
super(CompatibilitySudsClient, self).__init__(*args, **kwargs)
@property
def cookiejar(self):
return self.options.transport.cookiejar
@cookiejar.setter
def cookiejar(self, cookies):
self.options.transport.session.cookies = cookies
self.options.transport.cookiejar = cookies
class Service(object):
"""Base class containing common functionality for invoking vSphere
services
"""
def __init__(self, wsdl_url=None, soap_url=None,
cacert=None, insecure=True, pool_maxsize=10,
connection_timeout=None, op_id_prefix='oslo.vmware'):
self.wsdl_url = wsdl_url
self.soap_url = soap_url
self.op_id_prefix = op_id_prefix
LOG.debug("Creating suds client with soap_url='%s' and wsdl_url='%s'",
self.soap_url, self.wsdl_url)
transport = RequestsTransport(cacert=cacert,
insecure=insecure,
pool_maxsize=pool_maxsize,
connection_timeout=connection_timeout)
self.client = CompatibilitySudsClient(self.wsdl_url,
transport=transport,
location=self.soap_url,
plugins=[ServiceMessagePlugin()],
cache=_CACHE)
self._service_content = None
self._vc_session_cookie = None
@staticmethod
def build_base_url(protocol, host, port):
proto_str = '%s://' % protocol
host_str = '[%s]' % host if netaddr.valid_ipv6(host) else host
port_str = '' if port is None else ':%d' % port
return proto_str + host_str + port_str
@staticmethod
def _retrieve_properties_ex_fault_checker(response):
"""Checks the RetrievePropertiesEx API response for errors.
Certain faults are sent in the SOAP body as a property of missingSet.
This method raises VimFaultException when a fault is found in the
response.
:param response: response from RetrievePropertiesEx API call
:raises: VimFaultException
"""
fault_list = []
details = {}
if not response:
# This is the case when the session has timed out. ESX SOAP
# server sends an empty RetrievePropertiesExResponse. Normally
# missingSet in the response objects has the specifics about
# the error, but that's not the case with a timed out idle
# session. It is as bad as a terminated session for we cannot
# use the session. Therefore setting fault to NotAuthenticated
# fault.
LOG.debug("RetrievePropertiesEx API response is empty; setting "
"fault to %s.",
exceptions.NOT_AUTHENTICATED)
fault_list = [exceptions.NOT_AUTHENTICATED]
else:
for obj_cont in response.objects:
if hasattr(obj_cont, 'missingSet'):
for missing_elem in obj_cont.missingSet:
f_type = missing_elem.fault.fault
f_name = f_type.__class__.__name__
fault_list.append(f_name)
if f_name == exceptions.NO_PERMISSION:
details['object'] = \
vim_util.get_moref_value(f_type.object)
details['privilegeId'] = f_type.privilegeId
if fault_list:
fault_string = _("Error occurred while calling "
"RetrievePropertiesEx.")
raise exceptions.VimFaultException(fault_list,
fault_string,
details=details)
def _set_soap_headers(self, op_id):
"""Set SOAP headers for the next remote call to vCenter.
SOAP headers may include operation ID and vcSessionCookie.
The operation ID is a random string which allows to correlate log
messages across different systems (OpenStack, vCenter, ESX).
vcSessionCookie is needed when making PBM calls.
"""
headers = []
if self._vc_session_cookie:
elem = element.Element('vcSessionCookie').setText(
self._vc_session_cookie)
headers.append(elem)
if op_id:
elem = element.Element('operationID').setText(op_id)
headers.append(elem)
if headers:
self.client.set_options(soapheaders=headers)
@property
def service_content(self):
if self._service_content is None:
self._service_content = self.retrieve_service_content()
return self._service_content
def get_http_cookie(self):
"""Return the vCenter session cookie."""
cookies = self.client.cookiejar
for cookie in cookies:
if cookie.name.lower() == 'vmware_soap_session':
return cookie.value
def __getattr__(self, attr_name):
"""Returns the method to invoke API identified by param attr_name."""
def request_handler(managed_object, **kwargs):
"""Handler for vSphere API calls.
Invokes the API and parses the response for fault checking and
other errors.
:param managed_object: managed object reference argument of the
API call
:param kwargs: keyword arguments of the API call
:returns: response of the API call
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
try:
if isinstance(managed_object, str):
# For strings, use string value for value and type
# of the managed object.
managed_object = vim_util.get_moref(managed_object,
managed_object)
if managed_object is None:
return
skip_op_id = kwargs.pop('skip_op_id', False)
op_id = None
if not skip_op_id:
# Generate opID. It will appear in vCenter and ESX logs for
# this particular remote call.
op_id = '%s-%s' % (self.op_id_prefix,
uuidutils.generate_uuid())
LOG.debug('Invoking %s.%s with opID=%s',
vim_util.get_moref_type(managed_object),
attr_name,
op_id)
self._set_soap_headers(op_id)
request = getattr(self.client.service, attr_name)
response = request(managed_object, **kwargs)
if (attr_name.lower() == 'retrievepropertiesex'):
Service._retrieve_properties_ex_fault_checker(response)
return response
except exceptions.VimFaultException:
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response.
raise
except suds.WebFault as excep:
fault_string = None
if excep.fault:
fault_string = excep.fault.faultstring
doc = excep.document
detail = None
if doc is not None:
detail = doc.childAtPath('/detail')
if not detail:
# NOTE(arnaud): this is needed with VC 5.1
detail = doc.childAtPath('/Envelope/Body/Fault/detail')
fault_list = []
details = {}
if detail:
for fault in detail.getChildren():
fault_type = fault.get('type')
# NOTE(vbala): PBM faults use vim25 namespace. Also,
# PBM APIs throw NotAuthenticated in vSphere 6.5 for
# session expiry.
if (fault_type.endswith(exceptions.SECURITY_ERROR) or
fault_type.endswith(
exceptions.NOT_AUTHENTICATED)):
fault_type = exceptions.NOT_AUTHENTICATED
fault_list.append(fault_type)
for child in fault.getChildren():
details[child.name] = child.getText()
raise exceptions.VimFaultException(fault_list, fault_string,
excep, details)
except AttributeError as excep:
raise exceptions.VimAttributeException(
_("No such SOAP method %s.") % attr_name, excep)
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader) as excep:
raise exceptions.VimSessionOverLoadException(
_("httplib error in %s.") % attr_name, excep)
except requests.RequestException as excep:
raise exceptions.VimConnectionException(
_("requests error in %s.") % attr_name, excep)
except Exception as excep:
# TODO(vbala) should catch specific exceptions and raise
# appropriate VimExceptions.
# Socket errors which need special handling; some of these
# might be caused by server API call overload.
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise exceptions.VimSessionOverLoadException(
_("Socket error in %s.") % attr_name, excep)
# Type error which needs special handling; it might be caused
# by server API call overload.
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise exceptions.VimSessionOverLoadException(
_("Type error in %s.") % attr_name, excep)
else:
raise exceptions.VimException(
_("Exception in %s.") % attr_name, excep)
return request_handler
def __repr__(self):
return "vSphere object"
def __str__(self):
return "vSphere object"
class SudsLogFilter(logging.Filter):
"""Filter to mask/truncate vCenter credentials in suds logs."""
def filter(self, record):
if not hasattr(record.msg, 'childAtPath'):
return True
# Suds will log vCenter credentials if SessionManager.Login or
# SessionManager.SessionIsActive fails.
login = (record.msg.childAtPath('/Envelope/Body/Login') or
record.msg.childAtPath('/Envelope/Body/SessionIsActive'))
if login is None:
return True
if login.childAtPath('userName') is not None:
login.childAtPath('userName').setText('***')
if login.childAtPath('password') is not None: # nosec
login.childAtPath('password').setText('***') # nosec
session_id = login.childAtPath('sessionID')
if session_id is not None:
session_id.setText(session_id.getText()[-5:])
return True
# Set log filter to mask/truncate vCenter credentials in suds logs.
suds.client.log.addFilter(SudsLogFilter())
| apache-2.0 | 6,293,512,933,116,968,000 | 37.818182 | 79 | 0.568288 | false |
dbiesecke/dbiesecke.github.io | repo/script.module.urlresolver/lib/urlresolver/lib/kodi.py | 1 | 9725 | """
URLResolver Addon for Kodi
Copyright (C) 2016 t0mm0, tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcaddon
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib
import urlparse
import sys
import os
import re
import time
import strings
import CustomProgressDialog
addon = xbmcaddon.Addon('script.module.urlresolver')
get_setting = addon.getSetting
show_settings = addon.openSettings
sleep = xbmc.sleep
_log = xbmc.log
def get_path():
return addon.getAddonInfo('path').decode('utf-8')
def get_profile():
return addon.getAddonInfo('profile').decode('utf-8')
def translate_path(path):
return xbmc.translatePath(path).decode('utf-8')
def set_setting(id, value):
if not isinstance(value, basestring):
value = str(value)
addon.setSetting(id, value)
def get_version():
return addon.getAddonInfo('version')
def get_id():
return addon.getAddonInfo('id')
def get_name():
return addon.getAddonInfo('name')
def open_settings():
return addon.openSettings()
def get_keyboard(heading, default=''):
keyboard = xbmc.Keyboard()
keyboard.setHeading(heading)
if default: keyboard.setDefault(default)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
else:
return None
def i18n(string_id):
try:
return addon.getLocalizedString(strings.STRINGS[string_id]).encode('utf-8', 'ignore')
except Exception as e:
_log('Failed String Lookup: %s (%s)' % (string_id, e))
return string_id
def get_plugin_url(queries):
try:
query = urllib.urlencode(queries)
except UnicodeEncodeError:
for k in queries:
if isinstance(queries[k], unicode):
queries[k] = queries[k].encode('utf-8')
query = urllib.urlencode(queries)
return sys.argv[0] + '?' + query
def end_of_directory(cache_to_disc=True):
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)
def set_content(content):
xbmcplugin.setContent(int(sys.argv[1]), content)
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)
def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if menu_items is None: menu_items = []
if is_folder is None:
is_folder = False if is_playable else True
if is_playable is None:
playable = 'false' if is_folder else 'true'
else:
playable = 'true' if is_playable else 'false'
liz_url = get_plugin_url(queries)
if fanart: list_item.setProperty('fanart_image', fanart)
list_item.setInfo('video', {'title': list_item.getLabel()})
list_item.setProperty('isPlayable', playable)
list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, list_item, isFolder=is_folder, totalItems=total_items)
def parse_query(query):
q = {'mode': 'main'}
if query.startswith('?'): query = query[1:]
queries = urlparse.parse_qs(query)
for key in queries:
if len(queries[key]) == 1:
q[key] = queries[key][0]
else:
q[key] = queries[key]
return q
def notify(header=None, msg='', duration=2000, sound=None):
if header is None: header = get_name()
if sound is None: sound = get_setting('mute_notifications') == 'false'
icon_path = os.path.join(get_path(), 'icon.png')
try:
xbmcgui.Dialog().notification(header, msg, icon_path, duration, sound)
except:
builtin = "XBMC.Notification(%s,%s, %s, %s)" % (header, msg, duration, icon_path)
xbmc.executebuiltin(builtin)
def close_all():
xbmc.executebuiltin('Dialog.Close(all)')
def get_current_view():
skinPath = translate_path('special://skin/')
xml = os.path.join(skinPath, 'addon.xml')
f = xbmcvfs.File(xml)
read = f.read()
f.close()
try:
src = re.search('defaultresolution="([^"]+)', read, re.DOTALL).group(1)
except:
src = re.search('<res.+?folder="([^"]+)', read, re.DOTALL).group(1)
src = os.path.join(skinPath, src, 'MyVideoNav.xml')
f = xbmcvfs.File(src)
read = f.read()
f.close()
match = re.search('<views>([^<]+)', read, re.DOTALL)
if match:
views = match.group(1)
for view in views.split(','):
if xbmc.getInfoLabel('Control.GetLabel(%s)' % view):
return view
class WorkingDialog(object):
def __init__(self):
xbmc.executebuiltin('ActivateWindow(busydialog)')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
xbmc.executebuiltin('Dialog.Close(busydialog)')
def has_addon(addon_id):
return xbmc.getCondVisibility('System.HasAddon(%s)' % addon_id) == 1
class ProgressDialog(object):
def __init__(self, heading, line1='', line2='', line3='', background=False, active=True, timer=0, custom=False):
self.begin = time.time()
self.timer = timer
self.background = background
self.custom = custom
self.heading = heading
if active and not timer:
self.pd = self.__create_dialog(line1, line2, line3)
self.pd.update(0)
else:
self.pd = None
def __create_dialog(self, line1, line2, line3):
if self.background:
pd = xbmcgui.DialogProgressBG()
msg = line1 + line2 + line3
pd.create(self.heading, msg)
else:
if self.custom:
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
pd.create(self.heading, line1, line2, line3)
return pd
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
del self.pd
def is_canceled(self):
if self.pd is not None and not self.background:
return self.pd.iscanceled()
else:
return False
def update(self, percent, line1='', line2='', line3=''):
if self.pd is None and self.timer and (time.time() - self.begin) >= self.timer:
self.pd = self.__create_dialog(line1, line2, line3)
if self.pd is not None:
if self.background:
msg = line1 + line2 + line3
self.pd.update(percent, self.heading, msg)
else:
self.pd.update(percent, line1, line2, line3)
class CountdownDialog(object):
__INTERVALS = 5
def __init__(self, heading, line1='', line2='', line3='', active=True, countdown=60, interval=5, custom=False):
self.heading = heading
self.countdown = countdown
self.custom = custom
self.interval = interval
self.line3 = line3
if active:
if self.custom:
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
if not self.line3: line3 = 'Expires in: %s seconds' % countdown
pd.create(self.heading, line1, line2, line3)
pd.update(100)
self.pd = pd
else:
self.pd = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
del self.pd
def start(self, func, args=None, kwargs=None):
if args is None: args = []
if kwargs is None: kwargs = {}
result = func(*args, **kwargs)
if result:
return result
if self.pd is not None:
start = time.time()
expires = time_left = self.countdown
interval = self.interval
while time_left > 0:
for _ in range(CountdownDialog.__INTERVALS):
sleep(interval * 1000 / CountdownDialog.__INTERVALS)
if self.is_canceled(): return
time_left = expires - int(time.time() - start)
if time_left < 0: time_left = 0
progress = time_left * 100 / expires
line3 = 'Expires in: %s seconds' % time_left if not self.line3 else ''
self.update(progress, line3=line3)
result = func(*args, **kwargs)
if result:
return result
def is_canceled(self):
if self.pd is None:
return False
else:
return self.pd.iscanceled()
def update(self, percent, line1='', line2='', line3=''):
if self.pd is not None:
self.pd.update(percent, line1, line2, line3)
| mit | -4,834,126,515,700,619,000 | 30.070288 | 139 | 0.601028 | false |
lcy-seso/models | fluid/face_detection/widerface_eval.py | 1 | 11459 | import os
import time
import numpy as np
import argparse
import functools
from PIL import Image
import paddle.fluid as fluid
import reader
from pyramidbox import PyramidBox
from visualize import draw_bboxes
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('use_gpu', bool, True, "Whether use GPU or not.")
add_arg('use_pyramidbox', bool, True, "Whether use PyramidBox model.")
add_arg('data_dir', str, 'data/WIDER_val/images/', "The validation dataset path.")
add_arg('model_dir', str, '', "The model path.")
add_arg('pred_dir', str, 'pred', "The path to save the evaluation results.")
add_arg('file_list', str, 'data/wider_face_split/wider_face_val_bbx_gt.txt', "The validation dataset path.")
add_arg('infer', bool, False, "Whether do infer or eval.")
add_arg('confs_threshold', float, 0.15, "Confidence threshold to draw bbox.")
add_arg('image_path', str, '', "The image used to inference and visualize.")
# yapf: enable
def infer(args, config):
model_dir = args.model_dir
pred_dir = args.pred_dir
if not os.path.exists(model_dir):
raise ValueError("The model path [%s] does not exist." % (model_dir))
if args.infer:
image_path = args.image_path
image = Image.open(image_path)
if image.mode == 'L':
image = img.convert('RGB')
shrink, max_shrink = get_shrink(image.size[1], image.size[0])
det0 = detect_face(image, shrink)
det1 = flip_test(image, shrink)
[det2, det3] = multi_scale_test(image, max_shrink)
det4 = multi_scale_test_pyramid(image, max_shrink)
det = np.row_stack((det0, det1, det2, det3, det4))
dets = bbox_vote(det)
keep_index = np.where(dets[:, 4] >= args.confs_threshold)[0]
dets = dets[keep_index, :]
draw_bboxes(image_path, dets[:, 0:4])
else:
test_reader = reader.test(config, args.file_list)
for image, image_path in test_reader():
shrink, max_shrink = get_shrink(image.size[1], image.size[0])
det0 = detect_face(image, shrink)
det1 = flip_test(image, shrink)
[det2, det3] = multi_scale_test(image, max_shrink)
det4 = multi_scale_test_pyramid(image, max_shrink)
det = np.row_stack((det0, det1, det2, det3, det4))
dets = bbox_vote(det)
save_widerface_bboxes(image_path, dets, pred_dir)
print("Finish evaluation.")
def save_widerface_bboxes(image_path, bboxes_scores, output_dir):
"""
Save predicted results, including bbox and score into text file.
Args:
image_path (string): file name.
bboxes_scores (np.array|list): the predicted bboxed and scores, layout
is (xmin, ymin, xmax, ymax, score)
output_dir (string): output directory.
"""
image_name = image_path.split('/')[-1]
image_class = image_path.split('/')[-2]
image_name = image_name.encode('utf-8')
image_class = image_class.encode('utf-8')
odir = os.path.join(output_dir, image_class)
if not os.path.exists(odir):
os.makedirs(odir)
ofname = os.path.join(odir, '%s.txt' % (image_name[:-4]))
f = open(ofname, 'w')
f.write('{:s}\n'.format(image_class + '/' + image_name))
f.write('{:d}\n'.format(bboxes_scores.shape[0]))
for box_score in bboxes_scores:
xmin, ymin, xmax, ymax, score = box_score
f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(xmin, ymin, (
xmax - xmin + 1), (ymax - ymin + 1), score))
f.close()
print("The predicted result is saved as {}".format(ofname))
def detect_face(image, shrink):
image_shape = [3, image.size[1], image.size[0]]
if shrink != 1:
h, w = int(image_shape[1] * shrink), int(image_shape[2] * shrink)
image = image.resize((w, h), Image.ANTIALIAS)
image_shape = [3, h, w]
img = np.array(image)
img = reader.to_chw_bgr(img)
mean = [104., 117., 123.]
scale = 0.007843
img = img.astype('float32')
img -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')
img = img * scale
img = [img]
img = np.array(img)
detection, = exe.run(infer_program,
feed={'image': img},
fetch_list=fetches,
return_numpy=False)
detection = np.array(detection)
# layout: xmin, ymin, xmax. ymax, score
if detection.shape == (1, ):
print("No face detected")
return np.array([[0, 0, 0, 0, 0]])
det_conf = detection[:, 1]
det_xmin = image_shape[2] * detection[:, 2] / shrink
det_ymin = image_shape[1] * detection[:, 3] / shrink
det_xmax = image_shape[2] * detection[:, 4] / shrink
det_ymax = image_shape[1] * detection[:, 5] / shrink
det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))
return det
def bbox_vote(det):
order = det[:, 4].ravel().argsort()[::-1]
det = det[order, :]
if det.shape[0] == 0:
dets = np.array([[10, 10, 20, 20, 0.002]])
det = np.empty(shape=[0, 5])
while det.shape[0] > 0:
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = np.maximum(det[0, 1], det[:, 1])
xx2 = np.minimum(det[0, 2], det[:, 2])
yy2 = np.minimum(det[0, 3], det[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[0] + area[:] - inter)
# nms
merge_index = np.where(o >= 0.3)[0]
det_accu = det[merge_index, :]
det = np.delete(det, merge_index, 0)
if merge_index.shape[0] <= 1:
if det.shape[0] == 0:
try:
dets = np.row_stack((dets, det_accu))
except:
dets = det_accu
continue
det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))
max_score = np.max(det_accu[:, 4])
det_accu_sum = np.zeros((1, 5))
det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4],
axis=0) / np.sum(det_accu[:, -1:])
det_accu_sum[:, 4] = max_score
try:
dets = np.row_stack((dets, det_accu_sum))
except:
dets = det_accu_sum
dets = dets[0:750, :]
return dets
def flip_test(image, shrink):
img = image.transpose(Image.FLIP_LEFT_RIGHT)
det_f = detect_face(img, shrink)
det_t = np.zeros(det_f.shape)
# image.size: [width, height]
det_t[:, 0] = image.size[0] - det_f[:, 2]
det_t[:, 1] = det_f[:, 1]
det_t[:, 2] = image.size[0] - det_f[:, 0]
det_t[:, 3] = det_f[:, 3]
det_t[:, 4] = det_f[:, 4]
return det_t
def multi_scale_test(image, max_shrink):
# Shrink detecting is only used to detect big faces
st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
det_s = detect_face(image, st)
index = np.where(
np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
> 30)[0]
det_s = det_s[index, :]
# Enlarge one times
bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
det_b = detect_face(image, bt)
# Enlarge small image x times for small faces
if max_shrink > 2:
bt *= 2
while bt < max_shrink:
det_b = np.row_stack((det_b, detect_face(image, bt)))
bt *= 2
det_b = np.row_stack((det_b, detect_face(image, max_shrink)))
# Enlarged images are only used to detect small faces.
if bt > 1:
index = np.where(
np.minimum(det_b[:, 2] - det_b[:, 0] + 1,
det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
det_b = det_b[index, :]
# Shrinked images are only used to detect big faces.
else:
index = np.where(
np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
det_b = det_b[index, :]
return det_s, det_b
def multi_scale_test_pyramid(image, max_shrink):
# Use image pyramids to detect faces
det_b = detect_face(image, 0.25)
index = np.where(
np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
> 30)[0]
det_b = det_b[index, :]
st = [0.75, 1.25, 1.5, 1.75]
for i in range(len(st)):
if (st[i] <= max_shrink):
det_temp = detect_face(image, st[i])
# Enlarged images are only used to detect small faces.
if st[i] > 1:
index = np.where(
np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,
det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]
det_temp = det_temp[index, :]
# Shrinked images are only used to detect big faces.
else:
index = np.where(
np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,
det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]
det_temp = det_temp[index, :]
det_b = np.row_stack((det_b, det_temp))
return det_b
def get_shrink(height, width):
"""
Args:
height (int): image height.
width (int): image width.
"""
# avoid out of memory
max_shrink_v1 = (0x7fffffff / 577.0 / (height * width))**0.5
max_shrink_v2 = ((678 * 1024 * 2.0 * 2.0) / (height * width))**0.5
def get_round(x, loc):
str_x = str(x)
if '.' in str_x:
str_before, str_after = str_x.split('.')
len_after = len(str_after)
if len_after >= 3:
str_final = str_before + '.' + str_after[0:loc]
return float(str_final)
else:
return x
max_shrink = get_round(min(max_shrink_v1, max_shrink_v2), 2) - 0.3
if max_shrink >= 1.5 and max_shrink < 2:
max_shrink = max_shrink - 0.1
elif max_shrink >= 2 and max_shrink < 3:
max_shrink = max_shrink - 0.2
elif max_shrink >= 3 and max_shrink < 4:
max_shrink = max_shrink - 0.3
elif max_shrink >= 4 and max_shrink < 5:
max_shrink = max_shrink - 0.4
elif max_shrink >= 5:
max_shrink = max_shrink - 0.5
shrink = max_shrink if max_shrink < 1 else 1
return shrink, max_shrink
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
config = reader.Settings(data_dir=args.data_dir)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
main_program = fluid.Program()
startup_program = fluid.Program()
image_shape = [3, 1024, 1024]
with fluid.program_guard(main_program, startup_program):
network = PyramidBox(
image_shape, sub_network=args.use_pyramidbox, is_infer=True)
infer_program, nmsed_out = network.infer(main_program)
fetches = [nmsed_out]
fluid.io.load_persistables(
exe, args.model_dir, main_program=main_program)
infer(args, config)
| apache-2.0 | 9,215,710,945,514,166,000 | 35.964516 | 116 | 0.53242 | false |
lukipuki/obnam | obnamlib/bag_store.py | 1 | 3767 | # Copyright 2015 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-3+ =*=
import os
import random
import obnamlib
class BagStore(object):
def __init__(self):
self._fs = None
self._dirname = None
self._id_inventor = IdInventor()
self._id_inventor.set_filename_maker(self._make_bag_filename)
def _make_bag_filename(self, bag_id):
return os.path.join(self._dirname, '%016x.bag' % bag_id)
def set_location(self, fs, dirname):
self._fs = fs
self._dirname = dirname
self._id_inventor.set_fs(fs)
def reserve_bag_id(self):
return self._id_inventor.reserve_id()
def put_bag(self, bag):
filename = self._make_bag_filename(bag.get_id())
serialised = serialise_bag(bag)
self._fs.overwrite_file(filename, serialised)
def get_bag(self, bag_id):
filename = self._make_bag_filename(bag_id)
serialised = self._fs.cat(filename)
return deserialise_bag(serialised)
def has_bag(self, bag_id):
filename = self._make_bag_filename(bag_id)
try:
st = self._fs.lstat(filename)
except (IOError, OSError): # pragma: no cover
return False
return st.st_size > 0
def get_bag_ids(self):
for pathname, _ in self._fs.scan_tree(self._dirname):
if self._is_bag_filename(pathname):
yield self._get_bag_id_from_filename(pathname)
def _is_bag_filename(self, pathname):
return pathname.endswith('.bag')
def _get_bag_id_from_filename(self, pathname):
basename = os.path.basename(pathname)
return int(basename[:-len('.bag')], 16)
def remove_bag(self, bag_id):
filename = self._make_bag_filename(bag_id)
self._fs.remove(filename)
class IdInventor(object):
def __init__(self):
self.set_fs(None)
self._filename_maker = None
def set_fs(self, fs):
self._fs = fs
self._prev_id = None
def set_filename_maker(self, maker):
self._filename_maker = maker
def reserve_id(self):
while True:
self._next_id()
if self._reserve_succeeds():
return self._prev_id
self._prev_id = None # pragma: no cover
def _next_id(self):
if self._prev_id is None:
self._prev_id = random.randint(0, obnamlib.MAX_ID)
else:
self._prev_id += 1 # pragma: no cover
def _reserve_succeeds(self):
filename = self._filename_maker(self._prev_id)
try:
self._fs.write_file(filename, '')
except OSError as e: # pragma: no cover
if e.errno == e.EEXIST:
return False
raise
return True
def serialise_bag(bag):
obj = {
'bag-id': bag.get_id(),
'blobs': [bag[i] for i in range(len(bag))],
}
return obnamlib.serialise_object(obj)
def deserialise_bag(serialised):
obj = obnamlib.deserialise_object(serialised)
bag = obnamlib.Bag()
bag.set_id(obj['bag-id'])
for blob in obj['blobs']:
bag.append(blob)
return bag
| gpl-3.0 | -8,144,014,369,732,960,000 | 27.976923 | 71 | 0.606849 | false |
noba3/KoTos | addons/plugin.video.movie25/resources/libs/plugins/tvrelease.py | 1 | 14043 | import urllib, urllib2,re,string,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
from t0mm0.common.addon import Addon
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon(addon_id, sys.argv)
art = main.art
error_logo = art+'/bigx.png'
BASEURL = 'http://www.tv-release.net/'
prettyName = 'TVRelease'
def MAINMENU():
main.addDir('Search Tv-Release', BASEURL+'?seacher=', 1006,art+'/tvrsearch1.png')
main.addDir('TV 480', BASEURL+'?cat=TV-480p', 1001,art+'/TV480.png')
main.addDir('TV 720', BASEURL+'?cat=TV-720p', 1001,art+'/TV720.png')
main.addDir('TV MP4', BASEURL+'?cat=TV-Mp4', 1001,art+'/TVmp4.png')
main.addDir('TV Xvid', BASEURL+'?cat=TV-XviD', 1001,art+'/TVxvid.png')
#main.addDir('TV Packs', BASEURL+'category/tvshows/tvpack/', 1007,art+'/TVpacks.png')
main.addDir('TV Foreign', BASEURL+'?cat=TV-Foreign', 1001,art+'/TVforeign.png')
main.addDir('Movies 480', BASEURL+'?cat=Movies-480p', 1001,art+'/Movies480.png')
main.addDir('Movies 720', BASEURL+'?cat=Movies-720p', 1001,art+'/Movies720.png')
main.addDir('Movies Xvid', BASEURL+'?cat=Movies-XviD', 1001,art+'/Moviesxvid.png')
main.addDir('Movies Foreign', BASEURL+'?cat=Movies-Foreign', 1001,art+'/Moviesforeign.png')
main.addSpecial('Resolver Settings',BASEURL, 1004,art+'/tvrresolver.png')
main.VIEWSB()
def INDEX(url):
types = []
SearchType = None
if '!' in url:
r = url.rpartition('!')
print r
url = r[0]
SearchType = r[2]
else:
url = url
if 'cat=TV' in url:
types = 'tv'
elif 'cat=Movies' in url:
types = 'movie'
html = GETHTML(url)
if html == None:
return
pattern = '<tr><td[^>]*?><a [^>]*?>([^<]*?)</a></td><td[^>]*?><a href=\'([^\']*?)\'[^>]*?>([^<]*?)<'
r = re.findall(pattern, html, re.I|re.M|re.DOTALL)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until list is cached.')
totalLinks = len(r)
loadedLinks = 0
remaining_display = 'Media loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
for tag, url, name in r:
if re.search('(?i)WEB-DL',name): tag = tag.strip() + " WEB-DL"
if re.findall('\d{4}p', name):
r = re.findall('(.+?)\s(\d+p)', name)
for name, quality in r:
tag = tag.replace('720p',quality)
pass
if re.findall('\ss\d+e\d+\s', name, re.I|re.DOTALL):
r = re.findall('(.+?)\ss(\d+)e(\d+)\s', name, re.I)
for name, season, episode in r:
name = name+' S'+season+'E'+episode
elif re.findall('\s\d{4}\s\d{2}\s\d{2}\s', name):
r = re.findall('(.+?)\s(\d{4})\s(\d{2})\s(\d{2})\s',name)
for name, year, month, day in r:
name = name+' '+year+' '+month+' '+day
elif re.findall('\shdtv\sx', name, re.I):
r = re.findall('(.+?)\shdtv\sx',name, re.I)
for name in r:
pass
name = re.sub('\s\s+',' ',name).strip()
name = name+' [COLOR red]'+re.sub('(?sim)^(TV-|Movies-)(.*)','\\2',tag)+'[/COLOR]'
if SearchType == None:
if 'TV' in tag:
main.addDirTE(main.CleanTitle(name),url,1003,'','','','','','')
elif 'Movies' in tag:
if re.findall('\s\d+\s',name):
r = name.rpartition('\s\d{4}\s')
main.addDirM(main.CleanTitle(name),url,1003,'','','','','','')
elif SearchType == 'tv' and 'TV' in tag:
main.addDirTE(main.CleanTitle(name),url,1003,'','','','','','')
elif SearchType == 'movie' and 'Movies' in tag:
r = name.rpartition('\s\d{4}\s')
main.addDirM(main.CleanTitle(name),url,1003,'','','','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Media loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if dialogWait.iscanceled(): break
dialogWait.close()
del dialogWait
if "<div class='zmg_pn'" in html and loadedLinks >= totalLinks:
r = re.findall("""<span class='zmg_pn_current'>(\d+?)</span>[^<]*?<span class='zmg_pn_standar'><a href="([^"]+?)">""", html, re.I|re.DOTALL|re.M)
total = re.findall('">(\d+)</a></span>', html)
if total: total = total[-1]
else: total = "1"
for current, url in r:
name = 'Page '+current+' of '+total+' [COLOR green]Next Page >>>[/COLOR]'
main.addDir('[COLOR green]Go to Page[/COLOR]', url+':'+total, 1002, art+'/gotopagetr.png')
main.addDir(name, url.replace('%5C',''), 1001, art+'/nextpage.png')
main.VIEWS()
def LISTHOSTERS(name,url):
html = GETHTML(url)
if html == None: return
if selfAddon.getSetting("hide-download-instructions") != "true":
main.addLink("[COLOR red]For Download Options, Bring up Context Menu Over Selected Link.[/COLOR]",'','')
r = re.findall(r'class="td_cols"><a target=\'_blank\'.+?href=\'(.+?)\'>',html, re.M|re.DOTALL)
try:
t = re.findall(r'rel="nofollow">((?!.*\.rar).*)</a>', html, re.I)
r = r+t
except: pass
if len(r) == 0:
addon.show_ok_dialog(['No Playable Streams Found,','It Might Be That They Are Still Being Uploaded,',
'Or They Are Unstreamable Archive Files'],'MashUP: TV-Release')
xbmcplugin.endOfDirectory(int(sys.argv[1]), False, False)
return
from urlparse import urlparse
for url in r:
url = url.replace("180upload.nl","180upload.com")
host = urlparse(url).hostname.replace('www.','').partition('.')[0]
if main.supportedHost(host):
main.addDown2(name.strip()+" [COLOR blue]"+host.upper()+"[/COLOR]",url,1005,art+'/hosts/'+host+'.png',art+'/hosts/'+host+'.png')
def superSearch(encode,type):
try:
if type == 'Movies': cat = 'Movies-XviD,Movies-720p,Movies-480p,Movies-Foreign,Movies-DVDR,'
else: cat = 'TV-XviD,TV-Mp4,TV-720p,TV-480p,TV-Foreign,'
surl ='http://tv-release.net/?s='+encode+'&cat='+cat
returnList=[]
link=main.OPENURL(surl,verbose=False)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
pattern = '<tr><td[^>]*?><a [^>]*?>([^<]*?)</a></td><td[^>]*?><a href=\'([^\']*?)\'[^>]*?>([^<]*?)<'
r = re.findall(pattern, link, re.I|re.M|re.DOTALL)
for tag, url, name in r:
if re.search('(?i)WEB-DL',name): tag = tag.strip() + " WEB-DL"
if re.findall('\d+p\s', name):
r = re.findall('(.+?)\s(\d+p)\s', name)
for name, quality in r:
tag = tag.replace('720p',quality)
pass
if re.findall('\ss\d+e\d+\s', name, re.I|re.DOTALL):
r = re.findall('(.+?)\ss(\d+)e(\d+)\s', name, re.I)
for name, season, episode in r:
name = name+' S'+season+'E'+episode
elif re.findall('\s\d{4}\s\d{2}\s\d{2}\s', name):
r = re.findall('(.+?)\s(\d{4})\s(\d{2})\s(\d{2})\s',name)
for name, year, month, day in r:
name = name+' '+year+' '+month+' '+day
elif re.findall('\shdtv\sx', name, re.I):
r = re.findall('(.+?)\shdtv\sx',name, re.I)
for name in r:
pass
name = name+' [COLOR red]'+re.sub('(?sim)^(TV-|Movies-)(.*)','\\2',tag)+'[/COLOR]'
returnList.append((main.CleanTitle(name),prettyName,url,'',1003,True))
return returnList
except: return []
def SEARCHhistory():
dialog = xbmcgui.Dialog()
ret = dialog.select('[B]Choose A Search Type[/B]',['[B]TV Shows[/B]','[B]Movies[/B]'])
if ret == -1:
xbmcplugin.endOfDirectory(int(sys.argv[1]), False, False)
if ret == 0:
searchType = 'tv'
seapath=os.path.join(main.datapath,'Search')
SeaFile=os.path.join(seapath,'SearchHistoryTv')
if not os.path.exists(SeaFile):
SEARCH(searchType)
else:
main.addDir('Search',searchType,1008,art+'/search.png')
main.addDir('Clear History',SeaFile,128,art+'/cleahis.png')
thumb=art+'/link.png'
searchis=re.compile('search="(.+?)",').findall(open(SeaFile,'r').read())
for seahis in reversed(searchis):
url=seahis
seahis=seahis.replace('%20',' ')
url = 'http://tv-release.net/?s='+url+'&cat=TV-XviD,TV-Mp4,TV-720p,TV-480p,TV-Foreign,'
main.addDir(seahis,url,1001,thumb)
if ret == 1:
searchType = 'movie'
seapath=os.path.join(main.datapath,'Search')
SeaFile=os.path.join(seapath,'SearchHistory25')
if not os.path.exists(SeaFile):
SEARCH(searchType)
else:
main.addDir('Search',searchType,1008,art+'/search.png')
main.addDir('Clear History',SeaFile,128,art+'/cleahis.png')
thumb=art+'/link.png'
searchis=re.compile('search="(.+?)",').findall(open(SeaFile,'r').read())
for seahis in reversed(searchis):
url=seahis
seahis=seahis.replace('%20',' ')
url = 'http://tv-release.net/?s='+url+'&cat=Movies-XviD,Movies-720p,Movies-480p,Movies-Foreign,Movies-DVDR,'
main.addDir(seahis,url,1001,thumb)
def SEARCH(murl):
if murl == 'tv':
encode = main.updateSearchFile(murl,'TV',defaultValue=murl,searchMsg='Search For Shows or Episodes')
if not encode: return False
url = 'http://tv-release.net/?s='+encode+'&cat=TV-XviD,TV-Mp4,TV-720p,TV-480p,TV-Foreign,'
INDEX(url)
elif murl=='movie':
encode = main.updateSearchFile(murl,'Movies',defaultValue=murl,searchMsg='Search For Movies')
if not encode: return False
url = 'http://tv-release.net/?s='+encode+'&cat=Movies-XviD,Movies-720p,Movies-480p,Movies-Foreign,Movies-DVDR,'
INDEX(url)
def TVPACKS(url):
html = GETHTML(url)
if html == None:
return
pattern = '(?sim)Tv/Pack</a></span>.+?<a href="([^"]+?)"><b><font size="2px">([^<]+?)<'
r = re.findall(pattern,html)
for url, name in r:
main.addDir(name, url, 1001,'')
def GOTOP(url):
default = url
r = url.rpartition(':')
url = re.findall('^(.+page=)\d+(.*)$',r[0])
total = r[2]
keyboard = xbmcgui.Dialog().numeric(0, '[B][I]Goto Page Number[/B][/I]')
if not keyboard:
xbmcplugin.endOfDirectory(int(sys.argv[1]), False, False)
return False
if int(keyboard) > int(total) or keyboard == '0':
addon.show_ok_dialog(['Please Do Not Enter a Page Number bigger than',''+total+', Enter A Number Between 1 and '+total+'',
''], 'MashUP: TV-Release')
GOTOP(default)
url = url[0][0]+keyboard+str(url[0][1])
INDEX(url)
def PLAYMEDIA(name,url):
ok = True
r = re.findall(r'(.+?)\[COLOR', name)
name = r[0]
r=re.findall('Season(.+?)Episode([^<]+)',name)
if r:
infoLabels =main.GETMETAEpiT(name,'','')
video_type='episode'
season=infoLabels['season']
episode=infoLabels['episode']
else:
infoLabels =main.GETMETAT(name,'','','')
video_type='movie'
season=''
episode=''
img=infoLabels['cover_url']
fanart =infoLabels['backdrop_url']
imdb_id=infoLabels['imdb_id']
infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) }
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(url)
infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre']}
# play with bookmark
from resources.universal import playbackengine
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type=video_type, title=str(infoLabels['title']),season=str(season), episode=str(episode), year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id)
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
from resources.universal import watchhistory
wh = watchhistory.WatchHistory(addon_id)
wh.add_item(name+' '+'[COLOR=FF67cc33]TvRelease[/COLOR]', sys.argv[0]+sys.argv[2], infolabels=infolabels, img=str(img), fanart=str(fanart), is_folder=False)
player.KeepAlive()
return ok
except:
return ok
def GETHTML(url):
try:
h = main.OPENURL(url.replace(' ','%20'))
if '<h2>Under Maintenance</h2>' in h:
addon.show_ok_dialog(['[COLOR=FF67cc33][B]TV-Release is Down For Maintenance,[/COLOR][/B]',
'[COLOR=FF67cc33][B]Please Try Again Later[/COLOR][/B]',''],'MashUP: TV-Release')
xbmcplugin.endOfDirectory(int(sys.argv[1]), False, False)
return
return h
except urllib2.URLError, e:
addon.show_small_popup('MashUP: Tv-Release','TV-Release Web Site Failed To Respond, Check Log For Details', 9000, error_logo)
addon.log_notice(str(e))
xbmcplugin.endOfDirectory(int(sys.argv[1]), False, False)
return
| gpl-2.0 | -4,429,823,489,364,764,700 | 47.760417 | 324 | 0.552304 | false |
QuantCrimAtLeeds/PredictCode | open_cp/prohotspot.py | 1 | 11749 | """
prohotspot
~~~~~~~~~~
Implements the "prospective hotspotting" technique from:
1. Bowers, Johnson, Pease,
"Prospective hot-spotting: The future of crime mapping?",
Brit. J. Criminol. (2004) 44 641--658. doi:10.1093/bjc/azh036
2. Johnson et al.,
"Prospective crime mapping in operational context",
Home Office Online Report 19/07
`Police online library <http://library.college.police.uk/docs/hordsolr/rdsolr1907.pdf>`_
The underlying idea is to start with a kernel / weight defined in space and
positive time. This typically has finite extent, and might be related to
discretised space and/or time. Weights used in the literature tend to be
of the form :math:`1/(1+d)`.
The classical algorithm assigns all events to cells in a gridding of space,
and a "grid" of time (typically the number of whole weeks before the current
time). Only events which are close enough in space and time to the grid cell
of interest are used. For these, the weight is evaluated on each one, and then
the sum taken.
It is important to note the coupling between the grid size used and the weight,
because it is the distance between grid cells which is used. Exactly what
"distance" here means is unclear, and we have provided a number of options.
Alternatively, we can just use the weight / kernel in a continuous kernel
density estimate scheme.
"""
from . import predictors as _predictors
import abc as _abc
import numpy as _np
class Weight(metaclass=_abc.ABCMeta):
"""Base class for weights / kernels. Classes implementing this algorithm
are responsible purely for providing weights. We leave the details of
possibly discretising data to other classes.
"""
@_abc.abstractmethod
def __call__(self, dt, dd):
"""Evaluate the weight given the potentially discretised input.
:param dt: The time distance from 0. May be a scalar or a numpy array;
should be of a number type, not `timedelta` or similar.
:param dd: Spatial distance. May be a scalar or a one-dimensional
numpy array.
:return: A scalar or one-dimensional numpy array as appropriate.
"""
pass
class ClassicWeight(Weight):
"""The classical weight, :math:`(1/(1+d))(1/(1+t))` where :math:`d` is
distance and :math:`t` is time. Default units are "grid cells" and "weeks",
respectively.
:param space_bandwidth: Distances greater than or equal to this set the
weight to 0.
:param time_bandwidth: Times greater than or equal to this set the weight
to 0.
"""
def __init__(self, space_bandwidth=8, time_bandwidth=8):
self.space_bandwidth = space_bandwidth
self.time_bandwidth = time_bandwidth
def __call__(self, dt, dd):
mask = (dt < self.time_bandwidth) & (dd < self.space_bandwidth)
return 1 / ( (1 + dd) * ( 1 + dt) ) * mask
def __repr__(self):
return "Classic(sb={}, tb={})".format(self.space_bandwidth, self.time_bandwidth)
@property
def args(self):
return "C{},{}".format(self.space_bandwidth, self.time_bandwidth)
class GridDistance(metaclass=_abc.ABCMeta):
"""Abstract base class to calculate the distance between grid cells"""
@_abc.abstractmethod
def __call__(self, x1, y1, x2, y2):
pass
class DistanceDiagonalsSame(GridDistance):
"""Distance in the grid. Diagonal distances are one, so (1,1) and
(2,2) are adjacent points. This equates to using an :math:`\ell^\infty`
norm.
"""
def __call__(self, x1, y1, x2, y2):
xx = _np.abs(x1 - x2)
yy = _np.abs(y1 - y2)
return _np.max(_np.vstack((xx, yy)), axis=0)
def __repr__(self):
return "DiagsSame"
class DistanceDiagonalsDifferent(GridDistance):
"""Distance in the grid. Now diagonal distances are two, so (1,1) and
(2,2) are two grid cells apart. This equates to using an :math:`\ell^1`
norm.
"""
def __call__(self, x1, y1, x2, y2):
return _np.abs(x1 - x2) + _np.abs(y1 - y2)
def __repr__(self):
return "DiagsDiff"
class DistanceCircle(GridDistance):
"""Distance in the grid using the usual Euclidean distance, i.e. the
:math:`\ell^2` norm. This will work better with the continuous version
of the predictor.
"""
def __call__(self, x1, y1, x2, y2):
return _np.sqrt((x1-x2)**2 + (y1-y2)**2)
def __repr__(self):
return "DiagsCircle"
class ProspectiveHotSpot(_predictors.DataTrainer):
"""Implements the classical, grid based algorithm. To calculate distances,
we consider the grid cell we are computing the risk intensity for, the grid
cell the event falls into, and then delegate to an instance of :class
GridDistance: to compute the distance. To compute time, we look at the
time difference between the prediction time and the timestamp of the event
and then divide by the :attr:`time_unit`, then round down to the
nearest whole number. So 6 days divided by 1 week is 0 whole units.
Set :attr:`distance` to change the computation of distance between
grid cells. Set :attr:`weight` to change the weight used.
:param region: The :class:`RectangularRegion` the data is in.
:param grid_size: The size of the grid to place the data into.
:param grid: Alternative to specifying the region and grid_size is to pass
a :class:`BoundedGrid` instance.
:param time_unit: A :class:`numpy.timedelta64` instance giving the time
unit.
"""
def __init__(self, region=None, grid_size=50, time_unit=_np.timedelta64(1, "W"), grid=None):
if grid is None:
self.grid = grid_size
self.region = region
else:
self.region = grid.region()
self.grid = grid.xsize
if grid.xsize != grid.ysize:
raise ValueError("Only supports *square* grid cells.")
self.time_unit = time_unit
self.weight = ClassicWeight()
self.distance = DistanceDiagonalsSame()
def _cell(self, x, y):
gridx = _np.floor((x - self.region.xmin) / self.grid)
gridy = _np.floor((y - self.region.ymin) / self.grid)
return gridx, gridy
def _total_weight(self, time_deltas, coords, cellx, celly):
gridx, gridy = self._cell(coords[0], coords[1])
distances = self.distance(gridx, gridy, cellx, celly)
return _np.sum(self.weight(time_deltas, distances))
def predict(self, cutoff_time, predict_time):
"""Calculate a grid based prediction.
:param cutoff_time: Ignore data with a timestamp after this time.
:param predict_time: Timestamp of the prediction. Used to calculate
the time difference between events and "now". Typically the same as
`cutoff_time`.
:return: An instance of :class:`GridPredictionArray`
"""
if not cutoff_time <= predict_time:
raise ValueError("Data cutoff point should be before prediction time")
events = self.data.events_before(cutoff_time)
time_deltas = _np.datetime64(predict_time) - events.timestamps
time_deltas = _np.floor(time_deltas / self.time_unit)
width = int(_np.rint((self.region.xmax - self.region.xmin) / self.grid))
height = int(_np.rint((self.region.ymax - self.region.ymin) / self.grid))
matrix = _np.empty((height, width))
for x in range(width):
for y in range(height):
matrix[y][x] = self._total_weight(time_deltas, events.coords, x, y)
return _predictors.GridPredictionArray(self.grid, self.grid, matrix,
self.region.xmin, self.region.ymin)
class ProspectiveHotSpotContinuous(_predictors.DataTrainer):
"""Implements the prospective hotspot algorithm as a kernel density
estimation. A copy of the space/time kernel / weight is laid down over
each event and the result is summed. To allow compatibility with the grid
based method, we set a time unit and a grid size, but these are purely used
to scale the data appropriately.
"""
def __init__(self, grid_size=50, time_unit=_np.timedelta64(1, "W")):
self.grid = grid_size
self.time_unit = time_unit
self.weight = ClassicWeight()
def predict(self, cutoff_time, predict_time):
"""Calculate a continuous prediction.
:param cutoff_time: Ignore data with a timestamp after this time.
:param predict_time: Timestamp of the prediction. Used to calculate
the time difference between events and "now". Typically the same as
`cutoff_time`.
:return: An instance of :class:`ContinuousPrediction`
"""
if not cutoff_time <= predict_time:
raise ValueError("Data cutoff point should be before prediction time")
events = self.data.events_before(cutoff_time)
time_deltas = (_np.datetime64(predict_time) - events.timestamps) / self.time_unit
def kernel(points):
points = _np.asarray(points)
xdeltas = (points[0][:,None] - events.coords[0][None,:]) / self.grid
ydeltas = (points[1][:,None] - events.coords[1][None,:]) / self.grid
distances = _np.sqrt(xdeltas**2 + ydeltas**2)
times = time_deltas[None,:]
r = _np.sum(self.weight(times, distances), axis=-1)
# Return a scalar if input as scalar
return r[0] if len(r)==1 else r
return _predictors.KernelRiskPredictor(kernel, cell_width=self.grid,
cell_height=self.grid)
def grid_predict(self, cutoff_time, start, end, grid, samples=None):
"""Directly calculate a grid prediction, by taking the mean value over
both time and space. We also normalise the resulting grid prediction.
(But be aware that if you subsequently "mask" the grid, you will then
need to re-normalise).
:param cutoff_time: Ignore data with a timestamp after this time.
:param start: The start of the prediction time window. Typically the
same as `cutoff_time`.
:param end: The end of the prediction window. We will average the
kernel between `start` and `end`.
:param grid: An instance of :class:`data.BoundedGrid` to use as a basis
for the prediction.
:param samples: Number of samples to use, or `None` for auto-compute
:return: An instance of :class:`GridPredictionArray`.
"""
if not cutoff_time <= start:
raise ValueError("Data cutoff point should be before prediction time")
events = self.data.events_before(cutoff_time)
start, end = _np.datetime64(start), _np.datetime64(end)
# Rather than copy'n'paste a lot of code, we do this...
def kernel(points):
points = _np.asarray(points)
xdeltas = (points[0][:,None] - events.coords[0][None,:]) / self.grid
ydeltas = (points[1][:,None] - events.coords[1][None,:]) / self.grid
distances = _np.sqrt(xdeltas**2 + ydeltas**2)
num_points = points.shape[1] if len(points.shape) > 1 else 1
time_deltas = (end - start) * _np.random.random(num_points) + start
times = (time_deltas[:,None] - events.timestamps[None,:]) / self.time_unit
r = _np.sum(self.weight(times, distances), axis=-1)
# Return a scalar if input as scalar
return r[0] if len(r)==1 else r
krp = _predictors.KernelRiskPredictor(kernel, cell_width=self.grid,
cell_height=self.grid, samples=samples)
return _predictors.GridPredictionArray.from_continuous_prediction_grid(krp, grid)
| artistic-2.0 | -8,806,066,455,696,376,000 | 41.568841 | 97 | 0.643204 | false |
matrix-org/synapse | tests/handlers/test_password_providers.py | 1 | 24717 | # Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the password_auth_provider interface"""
from typing import Any, Type, Union
from unittest.mock import Mock
from twisted.internet import defer
import synapse
from synapse.rest.client.v1 import login
from synapse.rest.client.v2_alpha import devices
from synapse.types import JsonDict
from tests import unittest
from tests.server import FakeChannel
from tests.unittest import override_config
# (possibly experimental) login flows we expect to appear in the list after the normal
# ones
ADDITIONAL_LOGIN_FLOWS = [{"type": "uk.half-shot.msc2778.login.application_service"}]
# a mock instance which the dummy auth providers delegate to, so we can see what's going
# on
mock_password_provider = Mock()
class PasswordOnlyAuthProvider:
"""A password_provider which only implements `check_password`."""
@staticmethod
def parse_config(self):
pass
def __init__(self, config, account_handler):
pass
def check_password(self, *args):
return mock_password_provider.check_password(*args)
class CustomAuthProvider:
"""A password_provider which implements a custom login type."""
@staticmethod
def parse_config(self):
pass
def __init__(self, config, account_handler):
pass
def get_supported_login_types(self):
return {"test.login_type": ["test_field"]}
def check_auth(self, *args):
return mock_password_provider.check_auth(*args)
class PasswordCustomAuthProvider:
"""A password_provider which implements password login via `check_auth`, as well
as a custom type."""
@staticmethod
def parse_config(self):
pass
def __init__(self, config, account_handler):
pass
def get_supported_login_types(self):
return {"m.login.password": ["password"], "test.login_type": ["test_field"]}
def check_auth(self, *args):
return mock_password_provider.check_auth(*args)
def providers_config(*providers: Type[Any]) -> dict:
"""Returns a config dict that will enable the given password auth providers"""
return {
"password_providers": [
{"module": "%s.%s" % (__name__, provider.__qualname__), "config": {}}
for provider in providers
]
}
class PasswordAuthProviderTests(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
devices.register_servlets,
]
def setUp(self):
# we use a global mock device, so make sure we are starting with a clean slate
mock_password_provider.reset_mock()
super().setUp()
@override_config(providers_config(PasswordOnlyAuthProvider))
def test_password_only_auth_provider_login(self):
# login flows should only have m.login.password
flows = self._get_login_flows()
self.assertEqual(flows, [{"type": "m.login.password"}] + ADDITIONAL_LOGIN_FLOWS)
# check_password must return an awaitable
mock_password_provider.check_password.return_value = defer.succeed(True)
channel = self._send_password_login("u", "p")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@u:test", channel.json_body["user_id"])
mock_password_provider.check_password.assert_called_once_with("@u:test", "p")
mock_password_provider.reset_mock()
# login with mxid should work too
channel = self._send_password_login("@u:bz", "p")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@u:bz", channel.json_body["user_id"])
mock_password_provider.check_password.assert_called_once_with("@u:bz", "p")
mock_password_provider.reset_mock()
# try a weird username / pass. Honestly it's unclear what we *expect* to happen
# in these cases, but at least we can guard against the API changing
# unexpectedly
channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"])
mock_password_provider.check_password.assert_called_once_with(
"@ USER🙂NAME :test", " pASS😢word "
)
@override_config(providers_config(PasswordOnlyAuthProvider))
def test_password_only_auth_provider_ui_auth(self):
"""UI Auth should delegate correctly to the password provider"""
# create the user, otherwise access doesn't work
module_api = self.hs.get_module_api()
self.get_success(module_api.register_user("u"))
# log in twice, to get two devices
mock_password_provider.check_password.return_value = defer.succeed(True)
tok1 = self.login("u", "p")
self.login("u", "p", device_id="dev2")
mock_password_provider.reset_mock()
# have the auth provider deny the request to start with
mock_password_provider.check_password.return_value = defer.succeed(False)
# make the initial request which returns a 401
session = self._start_delete_device_session(tok1, "dev2")
mock_password_provider.check_password.assert_not_called()
# Make another request providing the UI auth flow.
channel = self._authed_delete_device(tok1, "dev2", session, "u", "p")
self.assertEqual(channel.code, 401) # XXX why not a 403?
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
mock_password_provider.check_password.assert_called_once_with("@u:test", "p")
mock_password_provider.reset_mock()
# Finally, check the request goes through when we allow it
mock_password_provider.check_password.return_value = defer.succeed(True)
channel = self._authed_delete_device(tok1, "dev2", session, "u", "p")
self.assertEqual(channel.code, 200)
mock_password_provider.check_password.assert_called_once_with("@u:test", "p")
@override_config(providers_config(PasswordOnlyAuthProvider))
def test_local_user_fallback_login(self):
"""rejected login should fall back to local db"""
self.register_user("localuser", "localpass")
# check_password must return an awaitable
mock_password_provider.check_password.return_value = defer.succeed(False)
channel = self._send_password_login("u", "p")
self.assertEqual(channel.code, 403, channel.result)
channel = self._send_password_login("localuser", "localpass")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@localuser:test", channel.json_body["user_id"])
@override_config(providers_config(PasswordOnlyAuthProvider))
def test_local_user_fallback_ui_auth(self):
"""rejected login should fall back to local db"""
self.register_user("localuser", "localpass")
# have the auth provider deny the request
mock_password_provider.check_password.return_value = defer.succeed(False)
# log in twice, to get two devices
tok1 = self.login("localuser", "localpass")
self.login("localuser", "localpass", device_id="dev2")
mock_password_provider.check_password.reset_mock()
# first delete should give a 401
session = self._start_delete_device_session(tok1, "dev2")
mock_password_provider.check_password.assert_not_called()
# Wrong password
channel = self._authed_delete_device(tok1, "dev2", session, "localuser", "xxx")
self.assertEqual(channel.code, 401) # XXX why not a 403?
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
mock_password_provider.check_password.assert_called_once_with(
"@localuser:test", "xxx"
)
mock_password_provider.reset_mock()
# Right password
channel = self._authed_delete_device(
tok1, "dev2", session, "localuser", "localpass"
)
self.assertEqual(channel.code, 200)
mock_password_provider.check_password.assert_called_once_with(
"@localuser:test", "localpass"
)
@override_config(
{
**providers_config(PasswordOnlyAuthProvider),
"password_config": {"localdb_enabled": False},
}
)
def test_no_local_user_fallback_login(self):
"""localdb_enabled can block login with the local password"""
self.register_user("localuser", "localpass")
# check_password must return an awaitable
mock_password_provider.check_password.return_value = defer.succeed(False)
channel = self._send_password_login("localuser", "localpass")
self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
mock_password_provider.check_password.assert_called_once_with(
"@localuser:test", "localpass"
)
@override_config(
{
**providers_config(PasswordOnlyAuthProvider),
"password_config": {"localdb_enabled": False},
}
)
def test_no_local_user_fallback_ui_auth(self):
"""localdb_enabled can block ui auth with the local password"""
self.register_user("localuser", "localpass")
# allow login via the auth provider
mock_password_provider.check_password.return_value = defer.succeed(True)
# log in twice, to get two devices
tok1 = self.login("localuser", "p")
self.login("localuser", "p", device_id="dev2")
mock_password_provider.check_password.reset_mock()
# first delete should give a 401
channel = self._delete_device(tok1, "dev2")
self.assertEqual(channel.code, 401)
# m.login.password UIA is permitted because the auth provider allows it,
# even though the localdb does not.
self.assertEqual(channel.json_body["flows"], [{"stages": ["m.login.password"]}])
session = channel.json_body["session"]
mock_password_provider.check_password.assert_not_called()
# now try deleting with the local password
mock_password_provider.check_password.return_value = defer.succeed(False)
channel = self._authed_delete_device(
tok1, "dev2", session, "localuser", "localpass"
)
self.assertEqual(channel.code, 401) # XXX why not a 403?
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
mock_password_provider.check_password.assert_called_once_with(
"@localuser:test", "localpass"
)
@override_config(
{
**providers_config(PasswordOnlyAuthProvider),
"password_config": {"enabled": False},
}
)
def test_password_auth_disabled(self):
"""password auth doesn't work if it's disabled across the board"""
# login flows should be empty
flows = self._get_login_flows()
self.assertEqual(flows, ADDITIONAL_LOGIN_FLOWS)
# login shouldn't work and should be rejected with a 400 ("unknown login type")
channel = self._send_password_login("u", "p")
self.assertEqual(channel.code, 400, channel.result)
mock_password_provider.check_password.assert_not_called()
@override_config(providers_config(CustomAuthProvider))
def test_custom_auth_provider_login(self):
# login flows should have the custom flow and m.login.password, since we
# haven't disabled local password lookup.
# (password must come first, because reasons)
flows = self._get_login_flows()
self.assertEqual(
flows,
[{"type": "m.login.password"}, {"type": "test.login_type"}]
+ ADDITIONAL_LOGIN_FLOWS,
)
# login with missing param should be rejected
channel = self._send_login("test.login_type", "u")
self.assertEqual(channel.code, 400, channel.result)
mock_password_provider.check_auth.assert_not_called()
mock_password_provider.check_auth.return_value = defer.succeed("@user:bz")
channel = self._send_login("test.login_type", "u", test_field="y")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@user:bz", channel.json_body["user_id"])
mock_password_provider.check_auth.assert_called_once_with(
"u", "test.login_type", {"test_field": "y"}
)
mock_password_provider.reset_mock()
# try a weird username. Again, it's unclear what we *expect* to happen
# in these cases, but at least we can guard against the API changing
# unexpectedly
mock_password_provider.check_auth.return_value = defer.succeed(
"@ MALFORMED! :bz"
)
channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"])
mock_password_provider.check_auth.assert_called_once_with(
" USER🙂NAME ", "test.login_type", {"test_field": " abc "}
)
@override_config(providers_config(CustomAuthProvider))
def test_custom_auth_provider_ui_auth(self):
# register the user and log in twice, to get two devices
self.register_user("localuser", "localpass")
tok1 = self.login("localuser", "localpass")
self.login("localuser", "localpass", device_id="dev2")
# make the initial request which returns a 401
channel = self._delete_device(tok1, "dev2")
self.assertEqual(channel.code, 401)
# Ensure that flows are what is expected.
self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
self.assertIn({"stages": ["test.login_type"]}, channel.json_body["flows"])
session = channel.json_body["session"]
# missing param
body = {
"auth": {
"type": "test.login_type",
"identifier": {"type": "m.id.user", "user": "localuser"},
"session": session,
},
}
channel = self._delete_device(tok1, "dev2", body)
self.assertEqual(channel.code, 400)
# there's a perfectly good M_MISSING_PARAM errcode, but heaven forfend we should
# use it...
self.assertIn("Missing parameters", channel.json_body["error"])
mock_password_provider.check_auth.assert_not_called()
mock_password_provider.reset_mock()
# right params, but authing as the wrong user
mock_password_provider.check_auth.return_value = defer.succeed("@user:bz")
body["auth"]["test_field"] = "foo"
channel = self._delete_device(tok1, "dev2", body)
self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
mock_password_provider.check_auth.assert_called_once_with(
"localuser", "test.login_type", {"test_field": "foo"}
)
mock_password_provider.reset_mock()
# and finally, succeed
mock_password_provider.check_auth.return_value = defer.succeed(
"@localuser:test"
)
channel = self._delete_device(tok1, "dev2", body)
self.assertEqual(channel.code, 200)
mock_password_provider.check_auth.assert_called_once_with(
"localuser", "test.login_type", {"test_field": "foo"}
)
@override_config(providers_config(CustomAuthProvider))
def test_custom_auth_provider_callback(self):
callback = Mock(return_value=defer.succeed(None))
mock_password_provider.check_auth.return_value = defer.succeed(
("@user:bz", callback)
)
channel = self._send_login("test.login_type", "u", test_field="y")
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual("@user:bz", channel.json_body["user_id"])
mock_password_provider.check_auth.assert_called_once_with(
"u", "test.login_type", {"test_field": "y"}
)
# check the args to the callback
callback.assert_called_once()
call_args, call_kwargs = callback.call_args
# should be one positional arg
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0]["user_id"], "@user:bz")
for p in ["user_id", "access_token", "device_id", "home_server"]:
self.assertIn(p, call_args[0])
@override_config(
{**providers_config(CustomAuthProvider), "password_config": {"enabled": False}}
)
def test_custom_auth_password_disabled(self):
"""Test login with a custom auth provider where password login is disabled"""
self.register_user("localuser", "localpass")
flows = self._get_login_flows()
self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS)
# login shouldn't work and should be rejected with a 400 ("unknown login type")
channel = self._send_password_login("localuser", "localpass")
self.assertEqual(channel.code, 400, channel.result)
mock_password_provider.check_auth.assert_not_called()
@override_config(
{
**providers_config(CustomAuthProvider),
"password_config": {"enabled": False, "localdb_enabled": False},
}
)
def test_custom_auth_password_disabled_localdb_enabled(self):
"""Check the localdb_enabled == enabled == False
Regression test for https://github.com/matrix-org/synapse/issues/8914: check
that setting *both* `localdb_enabled` *and* `password: enabled` to False doesn't
cause an exception.
"""
self.register_user("localuser", "localpass")
flows = self._get_login_flows()
self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS)
# login shouldn't work and should be rejected with a 400 ("unknown login type")
channel = self._send_password_login("localuser", "localpass")
self.assertEqual(channel.code, 400, channel.result)
mock_password_provider.check_auth.assert_not_called()
@override_config(
{
**providers_config(PasswordCustomAuthProvider),
"password_config": {"enabled": False},
}
)
def test_password_custom_auth_password_disabled_login(self):
"""log in with a custom auth provider which implements password, but password
login is disabled"""
self.register_user("localuser", "localpass")
flows = self._get_login_flows()
self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS)
# login shouldn't work and should be rejected with a 400 ("unknown login type")
channel = self._send_password_login("localuser", "localpass")
self.assertEqual(channel.code, 400, channel.result)
mock_password_provider.check_auth.assert_not_called()
@override_config(
{
**providers_config(PasswordCustomAuthProvider),
"password_config": {"enabled": False},
}
)
def test_password_custom_auth_password_disabled_ui_auth(self):
"""UI Auth with a custom auth provider which implements password, but password
login is disabled"""
# register the user and log in twice via the test login type to get two devices,
self.register_user("localuser", "localpass")
mock_password_provider.check_auth.return_value = defer.succeed(
"@localuser:test"
)
channel = self._send_login("test.login_type", "localuser", test_field="")
self.assertEqual(channel.code, 200, channel.result)
tok1 = channel.json_body["access_token"]
channel = self._send_login(
"test.login_type", "localuser", test_field="", device_id="dev2"
)
self.assertEqual(channel.code, 200, channel.result)
# make the initial request which returns a 401
channel = self._delete_device(tok1, "dev2")
self.assertEqual(channel.code, 401)
# Ensure that flows are what is expected. In particular, "password" should *not*
# be present.
self.assertIn({"stages": ["test.login_type"]}, channel.json_body["flows"])
session = channel.json_body["session"]
mock_password_provider.reset_mock()
# check that auth with password is rejected
body = {
"auth": {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "localuser"},
"password": "localpass",
"session": session,
},
}
channel = self._delete_device(tok1, "dev2", body)
self.assertEqual(channel.code, 400)
self.assertEqual(
"Password login has been disabled.", channel.json_body["error"]
)
mock_password_provider.check_auth.assert_not_called()
mock_password_provider.reset_mock()
# successful auth
body["auth"]["type"] = "test.login_type"
body["auth"]["test_field"] = "x"
channel = self._delete_device(tok1, "dev2", body)
self.assertEqual(channel.code, 200)
mock_password_provider.check_auth.assert_called_once_with(
"localuser", "test.login_type", {"test_field": "x"}
)
@override_config(
{
**providers_config(CustomAuthProvider),
"password_config": {"localdb_enabled": False},
}
)
def test_custom_auth_no_local_user_fallback(self):
"""Test login with a custom auth provider where the local db is disabled"""
self.register_user("localuser", "localpass")
flows = self._get_login_flows()
self.assertEqual(flows, [{"type": "test.login_type"}] + ADDITIONAL_LOGIN_FLOWS)
# password login shouldn't work and should be rejected with a 400
# ("unknown login type")
channel = self._send_password_login("localuser", "localpass")
self.assertEqual(channel.code, 400, channel.result)
def _get_login_flows(self) -> JsonDict:
channel = self.make_request("GET", "/_matrix/client/r0/login")
self.assertEqual(channel.code, 200, channel.result)
return channel.json_body["flows"]
def _send_password_login(self, user: str, password: str) -> FakeChannel:
return self._send_login(type="m.login.password", user=user, password=password)
def _send_login(self, type, user, **params) -> FakeChannel:
params.update({"identifier": {"type": "m.id.user", "user": user}, "type": type})
channel = self.make_request("POST", "/_matrix/client/r0/login", params)
return channel
def _start_delete_device_session(self, access_token, device_id) -> str:
"""Make an initial delete device request, and return the UI Auth session ID"""
channel = self._delete_device(access_token, device_id)
self.assertEqual(channel.code, 401)
# Ensure that flows are what is expected.
self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
return channel.json_body["session"]
def _authed_delete_device(
self,
access_token: str,
device_id: str,
session: str,
user_id: str,
password: str,
) -> FakeChannel:
"""Make a delete device request, authenticating with the given uid/password"""
return self._delete_device(
access_token,
device_id,
{
"auth": {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": user_id},
"password": password,
"session": session,
},
},
)
def _delete_device(
self,
access_token: str,
device: str,
body: Union[JsonDict, bytes] = b"",
) -> FakeChannel:
"""Delete an individual device."""
channel = self.make_request(
"DELETE", "devices/" + device, body, access_token=access_token
)
return channel
| apache-2.0 | 640,461,721,356,082,600 | 40.028239 | 88 | 0.630633 | false |
zionist/landing | landing/apps/core/migrations/0001_initial.py | 1 | 2095 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Block',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('header', models.CharField(verbose_name='header', max_length=512)),
('content', models.CharField(verbose_name='content', max_length=1024)),
('block_image', models.ImageField(upload_to='', verbose_name='block_image')),
],
),
migrations.CreateModel(
name='EmailSettings',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('to', models.EmailField(verbose_name='to', max_length=1024)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
migrations.CreateModel(
name='LandingPage',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('header', models.CharField(verbose_name='header', max_length=2048)),
('content', models.TextField(verbose_name='content')),
('contacts', models.CharField(verbose_name='contacts', max_length=2048)),
('logo', models.ImageField(upload_to='', verbose_name='logo')),
('background', models.ImageField(upload_to='', verbose_name='background')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
migrations.AddField(
model_name='block',
name='landing_page',
field=models.ForeignKey(to='core.LandingPage'),
),
]
| gpl-3.0 | -2,710,787,936,435,316,700 | 41.755102 | 114 | 0.570883 | false |
tbpmig/mig-website | corporate/views.py | 1 | 17269 | from django.core.urlresolvers import reverse
from django.forms.models import modelformset_factory, modelform_factory
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import loader
from django_ajax.decorators import ajax
from corporate.auxiliary_scripts import update_resume_zips
from corporate.forms import AddContactForm, ContactFormSet
from corporate.models import CorporateTextField, CorporateResourceGuide
from corporate.models import CompanyContact, Company, JobField, CorporateEmail
from mig_main.utility import get_message_dict, Permissions
FORM_ERROR = 'Your submision contained errors, please correct and resubmit.'
def get_permissions(user):
permission_dict = {
'can_edit_corporate': Permissions.can_edit_corporate_page(user),
'can_add_contact': Permissions.can_add_corporate_contact(user),
'can_edit_contacts': Permissions.can_edit_corporate_page(user),
'can_add_company': Permissions.can_add_company(user),
}
return permission_dict
def get_common_context(request):
context_dict = get_message_dict(request)
contact_text = CorporateTextField.objects.filter(section='CT')
context_dict.update({
'request': request,
'contact_text': contact_text,
'main_nav': 'corporate',
})
return context_dict
def index(request):
request.session['current_page'] = request.path
template = loader.get_template('corporate/corporate.html')
involvement_text = CorporateTextField.objects.filter(section='OP')
context_dict = {
'involvement_text': involvement_text,
'subnav': 'index',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def resumes(request):
request.session['current_page'] = request.path
template = loader.get_template('corporate/resume_book.html')
context_dict = {
'by_major_zip': 'TBP_resumes_by_major.zip',
'by_year_zip': 'TBP_resumes_by_year.zip',
'subnav': 'resumes',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def update_corporate_page(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to edit the corporate page'
return redirect('corporate:index')
prefix = 'corporate_page'
CorporateTextForm = modelformset_factory(CorporateTextField,
extra=1, exclude=[])
formset = CorporateTextForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if formset.is_valid():
instances = formset.save()
request.session['success_message'] = 'Corporate page successfully updated.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'formset': formset,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Update Corporate Page',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Edit Corporate Page Text',
'help_text': ('The text shown on the corporate main page. This text '
'uses markdown syntax.'),
'can_add_row': False,
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_formset.html')
return HttpResponse(template.render(context_dict, request))
def update_resource_guide(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to edit the corporate page'
return redirect('corporate:index')
ResourceGuideForm = modelform_factory(CorporateResourceGuide, exclude=('active',))
if request.method == 'POST':
form = ResourceGuideForm(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)
previously_active_guides = CorporateResourceGuide.objects.filter(active=True)
for guide in previously_active_guides:
guide.active = False
guide.save()
instance.active = True
instance.save()
update_resume_zips()
request.session['success_message'] = 'Corporate resource guide successfully updated.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
else:
form = ResourceGuideForm()
context_dict = {
'form': form,
'subnav': 'index',
'has_files': True,
'submit_name': 'Update Corporate Resource Guide',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Edit Corporate Resource Guide',
'help_text': ('This guide is inluded in the resume zip files. Update '
'it when the information (or the officer) changes.'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def add_company_contact(request):
if not Permissions.can_add_corporate_contact(request.user):
request.session['error_message'] = 'You are not authorized to add company contacts'
return redirect('corporate:index')
prefix = 'corporate_page'
can_edit = Permissions.can_edit_corporate_page(request.user)
form = AddContactForm(request.POST or None,prefix=prefix,can_edit=can_edit)
if request.method == 'POST':
if form.is_valid():
if form.is_overdetermined():
request.session['warning_message'] = 'Name, email, phone, bio, and chapter are ignored when profile provided.'
instance = form.save()
request.session['success_message'] = 'Corporate contact successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
help_text = 'Add a contact to the company contacts database.'
if not can_edit:
help_text = help_text + (' Note: you are adding a suggested contact; '
'they will not be emailed unless approved by '
'the Corporate Relations Officer.')
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Add company contact',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Add company contact',
'help_text': help_text,
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def edit_company_contacts(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to add company contacts'
return redirect('corporate:index')
prefix = 'corporate_page'
formset = ContactFormSet(request.POST or None,prefix=prefix,initial=CompanyContact.get_contacts())
if request.method == 'POST':
if formset.is_valid():
overdetermined = formset.save()
if overdetermined:
request.session['warning_message'] = 'Name, email, phone, bio, and chapter are ignored when profile provided.'
request.session['success_message'] = 'Corporate contact successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'formset': formset,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Update company contacts',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Edit company contacts',
'help_text': ('Edit the list of company contacts. '
'Contact info is ignored if a profile is provided.'),
'can_add_row':True,
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_formset.html')
return HttpResponse(template.render(context_dict, request))
def add_company(request):
if not Permissions.can_add_company(request.user):
request.session['error_message'] = 'You are not authorized to add companies'
return redirect('corporate:index')
prefix = 'corporate_page'
AddCompanyForm = modelform_factory(Company, exclude=[])
form = AddCompanyForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if form.is_valid():
instance = form.save()
request.session['success_message'] = 'Company successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Add company',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Add company',
'help_text': ('Add company information. If the appropriate industry '
'is not present, you need to add that first'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def add_jobfield(request):
if not Permissions.can_add_company(request.user):
request.session['error_message'] = 'You are not authorized to add industries'
return redirect('corporate:index')
prefix = 'corporate_page'
AddIndustryForm = modelform_factory(JobField, exclude=[])
form = AddIndustryForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if form.is_valid():
instance = form.save()
request.session['success_message'] = 'Industry successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Add industry',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Add industry',
'help_text': ('Add industry information. Select all relevant majors.'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def view_company_contacts(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to view company contacts'
return redirect('corporate:index')
context_dict = {
'contacts': CompanyContact.get_contacts(),
'subnav': 'index',
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('corporate/contacts_table.html')
return HttpResponse(template.render(context_dict, request))
def view_and_send_email(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to email companies'
return redirect('corporate:index')
existing_email = CorporateEmail.objects.filter(active=True)
if existing_email.exists():
existing_email = existing_email[0]
else:
request.session['error_message'] = 'No email specified'
return redirect('corporate:index')
contacts = CompanyContact.get_contacts(gets_email=True)
context_dict = {
'contacts': contacts,
'email':existing_email.preview_email(),
'mig_alum_email':existing_email.preview_email(mig_alum=True),
'other_alum_email':existing_email.preview_email(other_alum=True),
'previous_contact_email':existing_email.preview_email(previous_contact=True),
'personal_contact_email':existing_email.preview_email(personal_contact=True),
'subnav': 'index',
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('corporate/view_and_send_email.html')
return HttpResponse(template.render(context_dict, request))
@ajax
def send_corporate_email(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to email companies'
return {'fragments':{'#ajax-message':r'''<div id="ajax-message" class="alert alert-danger">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Error:</strong>%s</div>'''%(request.session.pop('error_message'))}}
existing_email = CorporateEmail.objects.filter(active=True)
if existing_email.exists():
existing_email[0].send_corporate_email()
request.session['success_message']='Companies successfully emailed'
return {'fragments':{'#ajax-message':r'''<div id="ajax-message" class="alert alert-success">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Success:</strong>%s</div>'''%(request.session.pop('success_message'))}}
else:
request.session['error_message'] = 'Company email text does not exist'
return {'fragments':{'#ajax-message':r'''<div id="ajax-message" class="alert alert-danger">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Error:</strong>%s</div>'''%(request.session.pop('error_message'))}}
def update_corporate_email(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to email companies'
return redirect('corporate:index')
prefix = 'corporate_email'
existing_email = CorporateEmail.objects.filter(active=True)
UpdateEmailForm = modelform_factory(CorporateEmail, exclude=[])
if existing_email.exists():
form = UpdateEmailForm(request.POST or None,prefix=prefix,instance=existing_email[0])
else:
form = UpdateEmailForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if form.is_valid():
instance = form.save(commit=False)
instance.id=None
instance.pk=None
instance.save()
if existing_email.exists():
ex=existing_email[0]
ex.active=False
ex.save()
request.session['success_message'] = 'Company email successfully updated.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Update corporate email',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Update corporate email',
'help_text': ('Update the email sent to companies to encourage their'
'participation in TBP corporate events.\n\nUse '
'{{company_name}} in the subject line as a placeholder'
'and {{extra_text}} in the body as a placeholder for the'
'extra text to members or personal contacts.'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
| apache-2.0 | -8,681,007,862,481,104,000 | 43.622739 | 128 | 0.642423 | false |
Spiderlover/Toontown | toontown/battle/MovieDrop.py | 1 | 17995 | from direct.interval.IntervalGlobal import *
from BattleBase import *
from BattleProps import *
from BattleSounds import *
import MovieCamera
from direct.directnotify import DirectNotifyGlobal
import MovieUtil
import MovieNPCSOS
from MovieUtil import calcAvgSuitPos
from direct.showutil import Effects
notify = DirectNotifyGlobal.directNotify.newCategory('MovieDrop')
hitSoundFiles = ('AA_drop_flowerpot.ogg', 'AA_drop_sandbag.ogg', 'AA_drop_anvil.ogg', 'AA_drop_bigweight.ogg', 'AA_drop_safe.ogg', 'AA_drop_piano.ogg', 'AA_drop_boat.ogg')
missSoundFiles = ('AA_drop_flowerpot_miss.ogg', 'AA_drop_sandbag_miss.ogg', 'AA_drop_anvil_miss.ogg', 'AA_drop_bigweight_miss.ogg', 'AA_drop_safe_miss.ogg', 'AA_drop_piano_miss.ogg', 'AA_drop_boat_miss.ogg')
tDropShadow = 1.3
tSuitDodges = 2.45 + tDropShadow
tObjectAppears = 3.0 + tDropShadow
tButtonPressed = 2.44
dShrink = 0.3
dShrinkOnMiss = 0.1
dPropFall = 0.6
objects = ('flowerpot', 'sandbag', 'anvil', 'weight', 'safe', 'piano', 'ship')
objZOffsets = (0.75, 0.75, 0.0, 0.0, 0.0, 0.0, 0.0)
objStartingScales = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
landFrames = (12, 4, 1, 11, 11, 11, 2)
shoulderHeights = {'a': 13.28 / 4.0,
'b': 13.74 / 4.0,
'c': 10.02 / 4.0}
def doDrops(drops):
if len(drops) == 0:
return (None, None)
npcArrivals, npcDepartures, npcs = MovieNPCSOS.doNPCTeleports(drops)
suitDropsDict = {}
groupDrops = []
for drop in drops:
track = drop['track']
level = drop['level']
targets = drop['target']
if len(targets) == 1:
suitId = targets[0]['suit'].doId
if suitId in suitDropsDict:
suitDropsDict[suitId].append((drop, targets[0]))
else:
suitDropsDict[suitId] = [(drop, targets[0])]
elif level <= MAX_LEVEL_INDEX and attackAffectsGroup(track, level):
groupDrops.append(drop)
else:
for target in targets:
suitId = target['suit'].doId
if suitId in suitDropsDict:
otherDrops = suitDropsDict[suitId]
alreadyInList = 0
for oDrop in otherDrops:
if oDrop[0]['toon'] == drop['toon']:
alreadyInList = 1
if alreadyInList == 0:
suitDropsDict[suitId].append((drop, target))
else:
suitDropsDict[suitId] = [(drop, target)]
suitDrops = suitDropsDict.values()
def compFunc(a, b):
if len(a) > len(b):
return 1
elif len(a) < len(b):
return -1
return 0
suitDrops.sort(compFunc)
delay = 0.0
mtrack = Parallel(name='toplevel-drop')
npcDrops = {}
for st in suitDrops:
if len(st) > 0:
ival = __doSuitDrops(st, npcs, npcDrops)
if ival:
mtrack.append(Sequence(Wait(delay), ival))
delay = delay + TOON_DROP_SUIT_DELAY
dropTrack = Sequence(npcArrivals, mtrack, npcDepartures)
camDuration = mtrack.getDuration()
if groupDrops:
ival = __doGroupDrops(groupDrops)
dropTrack.append(ival)
camDuration += ival.getDuration()
enterDuration = npcArrivals.getDuration()
exitDuration = npcDepartures.getDuration()
camTrack = MovieCamera.chooseDropShot(drops, suitDropsDict, camDuration, enterDuration, exitDuration)
return (dropTrack, camTrack)
def __getSoundTrack(level, hitSuit, node = None):
if hitSuit:
soundEffect = globalBattleSoundCache.getSound(hitSoundFiles[level])
else:
soundEffect = globalBattleSoundCache.getSound(missSoundFiles[level])
soundTrack = Sequence()
if soundEffect:
buttonSound = globalBattleSoundCache.getSound('AA_drop_trigger_box.ogg')
fallingSound = None
buttonDelay = tButtonPressed - 0.3
fallingDuration = 1.5
if not level == UBER_GAG_LEVEL_INDEX:
fallingSound = globalBattleSoundCache.getSound('incoming_whistleALT.ogg')
soundTrack.append(Wait(buttonDelay))
soundTrack.append(SoundInterval(buttonSound, duration=0.67, node=node))
if fallingSound:
soundTrack.append(SoundInterval(fallingSound, duration=fallingDuration, node=node))
if not level == UBER_GAG_LEVEL_INDEX:
soundTrack.append(SoundInterval(soundEffect, node=node))
if level == UBER_GAG_LEVEL_INDEX:
if hitSuit:
uberDelay = tButtonPressed
else:
uberDelay = tButtonPressed - 0.1
oldSoundTrack = soundTrack
soundTrack = Parallel()
soundTrack.append(oldSoundTrack)
uberTrack = Sequence()
uberTrack.append(Wait(uberDelay))
uberTrack.append(SoundInterval(soundEffect, node=node))
soundTrack.append(uberTrack)
else:
soundTrack.append(Wait(0.1))
return soundTrack
def __doSuitDrops(dropTargetPairs, npcs, npcDrops):
toonTracks = Parallel()
delay = 0.0
alreadyDodged = 0
alreadyTeased = 0
for dropTargetPair in dropTargetPairs:
drop = dropTargetPair[0]
level = drop['level']
objName = objects[level]
target = dropTargetPair[1]
track = __dropObjectForSingle(drop, delay, objName, level, alreadyDodged, alreadyTeased, npcs, target, npcDrops)
if track:
toonTracks.append(track)
delay += TOON_DROP_DELAY
hp = target['hp']
if hp <= 0:
if level >= 3:
alreadyTeased = 1
else:
alreadyDodged = 1
return toonTracks
def __doGroupDrops(groupDrops):
toonTracks = Parallel()
delay = 0.0
alreadyDodged = 0
alreadyTeased = 0
for drop in groupDrops:
battle = drop['battle']
level = drop['level']
centerPos = calcAvgSuitPos(drop)
targets = drop['target']
numTargets = len(targets)
closestTarget = -1
nearestDistance = 100000.0
for i in xrange(numTargets):
suit = drop['target'][i]['suit']
suitPos = suit.getPos(battle)
displacement = Vec3(centerPos)
displacement -= suitPos
distance = displacement.lengthSquared()
if distance < nearestDistance:
closestTarget = i
nearestDistance = distance
track = __dropGroupObject(drop, delay, closestTarget, alreadyDodged, alreadyTeased)
if track:
toonTracks.append(track)
delay = delay + TOON_DROP_SUIT_DELAY
hp = drop['target'][closestTarget]['hp']
if hp <= 0:
if level >= 3:
alreadyTeased = 1
else:
alreadyDodged = 1
return toonTracks
def __dropGroupObject(drop, delay, closestTarget, alreadyDodged, alreadyTeased):
level = drop['level']
objName = objects[level]
target = drop['target'][closestTarget]
suit = drop['target'][closestTarget]['suit']
npcDrops = {}
npcs = []
returnedParallel = __dropObject(drop, delay, objName, level, alreadyDodged, alreadyTeased, npcs, target, npcDrops)
for i in xrange(len(drop['target'])):
target = drop['target'][i]
suitTrack = __createSuitTrack(drop, delay, level, alreadyDodged, alreadyTeased, target, npcs)
if suitTrack:
returnedParallel.append(suitTrack)
return returnedParallel
def __dropObjectForSingle(drop, delay, objName, level, alreadyDodged, alreadyTeased, npcs, target, npcDrops):
singleDropParallel = __dropObject(drop, delay, objName, level, alreadyDodged, alreadyTeased, npcs, target, npcDrops)
suitTrack = __createSuitTrack(drop, delay, level, alreadyDodged, alreadyTeased, target, npcs)
if suitTrack:
singleDropParallel.append(suitTrack)
return singleDropParallel
def __dropObject(drop, delay, objName, level, alreadyDodged, alreadyTeased, npcs, target, npcDrops):
toon = drop['toon']
repeatNPC = 0
battle = drop['battle']
if 'npc' in drop:
toon = drop['npc']
if toon in npcDrops:
repeatNPC = 1
else:
npcDrops[toon] = 1
origHpr = Vec3(0, 0, 0)
else:
origHpr = toon.getHpr(battle)
hpbonus = drop['hpbonus']
suit = target['suit']
hp = target['hp']
hitSuit = hp > 0
died = target['died']
leftSuits = target['leftSuits']
rightSuits = target['rightSuits']
kbbonus = target['kbbonus']
suitPos = suit.getPos(battle)
majorObject = level >= 3
if repeatNPC == 0:
button = globalPropPool.getProp('button')
buttonType = globalPropPool.getPropType('button')
button2 = MovieUtil.copyProp(button)
buttons = [button, button2]
hands = toon.getLeftHands()
object = globalPropPool.getProp(objName)
objectType = globalPropPool.getPropType(objName)
if objName == 'weight':
object.setScale(object.getScale() * 0.75)
elif objName == 'safe':
object.setScale(object.getScale() * 0.85)
node = object.node()
node.setBounds(OmniBoundingVolume())
node.setFinal(1)
soundTrack = __getSoundTrack(level, hitSuit, toon)
toonTrack = Sequence()
if repeatNPC == 0:
toonFace = Func(toon.headsUp, battle, suitPos)
toonTrack.append(Wait(delay))
toonTrack.append(toonFace)
toonTrack.append(ActorInterval(toon, 'pushbutton'))
toonTrack.append(Func(toon.loop, 'neutral'))
toonTrack.append(Func(toon.setHpr, battle, origHpr))
buttonTrack = Sequence()
if repeatNPC == 0:
buttonShow = Func(MovieUtil.showProps, buttons, hands)
buttonScaleUp = LerpScaleInterval(button, 1.0, button.getScale(), startScale=Point3(0.01, 0.01, 0.01))
buttonScaleDown = LerpScaleInterval(button, 1.0, Point3(0.01, 0.01, 0.01), startScale=button.getScale())
buttonHide = Func(MovieUtil.removeProps, buttons)
buttonTrack.append(Wait(delay))
buttonTrack.append(buttonShow)
buttonTrack.append(buttonScaleUp)
buttonTrack.append(Wait(2.5))
buttonTrack.append(buttonScaleDown)
buttonTrack.append(buttonHide)
objectTrack = Sequence()
def posObject(object, suit, level, majorObject, miss, battle = battle):
object.reparentTo(battle)
if battle.isSuitLured(suit):
suitPos, suitHpr = battle.getActorPosHpr(suit)
object.setPos(suitPos)
object.setHpr(suitHpr)
if level >= 3:
object.setY(object.getY() + 2)
else:
object.setPos(suit.getPos(battle))
object.setHpr(suit.getHpr(battle))
if miss and level >= 3:
object.setY(object.getY(battle) + 5)
if not majorObject:
if not miss:
shoulderHeight = shoulderHeights[suit.style.body] * suit.scale
object.setZ(object.getPos(battle)[2] + shoulderHeight)
object.setZ(object.getPos(battle)[2] + objZOffsets[level])
objectTrack.append(Func(battle.movie.needRestoreRenderProp, object))
objInit = Func(posObject, object, suit, level, majorObject, hp <= 0)
objectTrack.append(Wait(delay + tObjectAppears))
objectTrack.append(objInit)
if hp > 0 or level == 1 or level == 2:
if hasattr(object, 'getAnimControls'):
animProp = ActorInterval(object, objName)
shrinkProp = LerpScaleInterval(object, dShrink, Point3(0.01, 0.01, 0.01), startScale=object.getScale())
objAnimShrink = ParallelEndTogether(animProp, shrinkProp)
objectTrack.append(objAnimShrink)
else:
startingScale = objStartingScales[level]
object2 = MovieUtil.copyProp(object)
posObject(object2, suit, level, majorObject, hp <= 0)
endingPos = object2.getPos()
startPos = Point3(endingPos[0], endingPos[1], endingPos[2] + 5)
startHpr = object2.getHpr()
endHpr = Point3(startHpr[0] + 90, startHpr[1], startHpr[2])
animProp = LerpPosInterval(object, landFrames[level] / 24.0, endingPos, startPos=startPos)
shrinkProp = LerpScaleInterval(object, dShrink, Point3(0.01, 0.01, 0.01), startScale=startingScale)
bounceProp = Effects.createZBounce(object, 2, endingPos, 0.5, 1.5)
objAnimShrink = Sequence(Func(object.setScale, startingScale), Func(object.setH, endHpr[0]), animProp, bounceProp, Wait(1.5), shrinkProp)
objectTrack.append(objAnimShrink)
MovieUtil.removeProp(object2)
elif hasattr(object, 'getAnimControls'):
animProp = ActorInterval(object, objName, duration=landFrames[level] / 24.0)
def poseProp(prop, animName, level):
prop.pose(animName, landFrames[level])
poseProp = Func(poseProp, object, objName, level)
wait = Wait(1.0)
shrinkProp = LerpScaleInterval(object, dShrinkOnMiss, Point3(0.01, 0.01, 0.01), startScale=object.getScale())
objectTrack.append(animProp)
objectTrack.append(poseProp)
objectTrack.append(wait)
objectTrack.append(shrinkProp)
else:
startingScale = objStartingScales[level]
object2 = MovieUtil.copyProp(object)
posObject(object2, suit, level, majorObject, hp <= 0)
endingPos = object2.getPos()
startPos = Point3(endingPos[0], endingPos[1], endingPos[2] + 5)
startHpr = object2.getHpr()
endHpr = Point3(startHpr[0] + 90, startHpr[1], startHpr[2])
animProp = LerpPosInterval(object, landFrames[level] / 24.0, endingPos, startPos=startPos)
shrinkProp = LerpScaleInterval(object, dShrinkOnMiss, Point3(0.01, 0.01, 0.01), startScale=startingScale)
bounceProp = Effects.createZBounce(object, 2, endingPos, 0.5, 1.5)
objAnimShrink = Sequence(Func(object.setScale, startingScale), Func(object.setH, endHpr[0]), animProp, bounceProp, Wait(1.5), shrinkProp)
objectTrack.append(objAnimShrink)
MovieUtil.removeProp(object2)
objectTrack.append(Func(MovieUtil.removeProp, object))
objectTrack.append(Func(battle.movie.clearRenderProp, object))
dropShadow = MovieUtil.copyProp(suit.getShadowJoint())
if level == 0:
dropShadow.setScale(0.5)
elif level <= 2:
dropShadow.setScale(0.8)
elif level == 3:
dropShadow.setScale(2.0)
elif level == 4:
dropShadow.setScale(2.3)
else:
dropShadow.setScale(3.6)
def posShadow(dropShadow = dropShadow, suit = suit, battle = battle, hp = hp, level = level):
dropShadow.reparentTo(battle)
if battle.isSuitLured(suit):
suitPos, suitHpr = battle.getActorPosHpr(suit)
dropShadow.setPos(suitPos)
dropShadow.setHpr(suitHpr)
if level >= 3:
dropShadow.setY(dropShadow.getY() + 2)
else:
dropShadow.setPos(suit.getPos(battle))
dropShadow.setHpr(suit.getHpr(battle))
if hp <= 0 and level >= 3:
dropShadow.setY(dropShadow.getY(battle) + 5)
dropShadow.setZ(dropShadow.getZ() + 0.5)
shadowTrack = Sequence(Wait(delay + tButtonPressed), Func(battle.movie.needRestoreRenderProp, dropShadow), Func(posShadow), LerpScaleInterval(dropShadow, tObjectAppears - tButtonPressed, dropShadow.getScale(), startScale=Point3(0.01, 0.01, 0.01)), Wait(0.3), Func(MovieUtil.removeProp, dropShadow), Func(battle.movie.clearRenderProp, dropShadow))
return Parallel(toonTrack, soundTrack, buttonTrack, objectTrack, shadowTrack)
def __createSuitTrack(drop, delay, level, alreadyDodged, alreadyTeased, target, npcs):
toon = drop['toon']
if 'npc' in drop:
toon = drop['npc']
battle = drop['battle']
majorObject = level >= 3
suit = target['suit']
hp = target['hp']
hitSuit = hp > 0
died = target['died']
revived = target['revived']
leftSuits = target['leftSuits']
rightSuits = target['rightSuits']
kbbonus = target['kbbonus']
hpbonus = drop['hpbonus']
if hp > 0:
suitTrack = Sequence()
showDamage = Func(suit.showHpText, -hp, openEnded=0)
updateHealthBar = Func(suit.updateHealthBar, hp)
if majorObject:
anim = 'flatten'
else:
anim = 'drop-react'
suitReact = ActorInterval(suit, anim)
suitTrack.append(Wait(delay + tObjectAppears))
suitTrack.append(showDamage)
suitTrack.append(updateHealthBar)
suitGettingHit = Parallel(suitReact)
if level == UBER_GAG_LEVEL_INDEX:
gotHitSound = globalBattleSoundCache.getSound('AA_drop_boat_cog.ogg')
suitGettingHit.append(SoundInterval(gotHitSound, node=toon))
suitTrack.append(suitGettingHit)
bonusTrack = None
if hpbonus > 0:
bonusTrack = Sequence(Wait(delay + tObjectAppears + 0.75), Func(suit.showHpText, -hpbonus, 1, openEnded=0), Func(suit.updateHealthBar, hpbonus))
if revived != 0:
suitTrack.append(MovieUtil.createSuitReviveTrack(suit, toon, battle, npcs))
elif died != 0:
suitTrack.append(MovieUtil.createSuitDeathTrack(suit, toon, battle, npcs))
else:
suitTrack.append(Func(suit.loop, 'neutral'))
if bonusTrack != None:
suitTrack = Parallel(suitTrack, bonusTrack)
elif kbbonus == 0:
suitTrack = Sequence(Wait(delay + tObjectAppears), Func(MovieUtil.indicateMissed, suit, 0.6), Func(suit.loop, 'neutral'))
else:
if alreadyDodged > 0:
return
if level >= 3:
if alreadyTeased > 0:
return
else:
suitTrack = MovieUtil.createSuitTeaseMultiTrack(suit, delay=delay + tObjectAppears)
else:
suitTrack = MovieUtil.createSuitDodgeMultitrack(delay + tSuitDodges, suit, leftSuits, rightSuits)
return suitTrack
| mit | 155,016,294,914,021,400 | 40.367816 | 350 | 0.62973 | false |
ESOedX/edx-platform | cms/djangoapps/contentstore/views/tests/test_transcripts.py | 1 | 43261 | """Tests for items views."""
from __future__ import absolute_import
import copy
import json
import tempfile
import textwrap
from codecs import BOM_UTF8
from uuid import uuid4
import ddt
import six
from django.conf import settings
from django.test.utils import override_settings
from django.urls import reverse
from edxval.api import create_video
from mock import Mock, patch
from opaque_keys.edx.keys import UsageKey
from contentstore.tests.utils import CourseTestCase, mock_requests_get
from openedx.core.djangoapps.contentserver.caching import del_cached_content
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.video_module import VideoBlock
from xmodule.video_module.transcripts_utils import (
GetTranscriptsFromYouTubeException,
Transcript,
get_video_transcript_content,
remove_subs_from_store
)
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
SRT_TRANSCRIPT_CONTENT = b"""0
00:00:10,500 --> 00:00:13,000
Elephant's Dream
1
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""
SJSON_TRANSCRIPT_CONTENT = Transcript.convert(
SRT_TRANSCRIPT_CONTENT,
Transcript.SRT,
Transcript.SJSON,
)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class BaseTranscripts(CourseTestCase):
"""Base test class for transcripts tests."""
def clear_subs_content(self):
"""Remove, if transcripts content exists."""
for youtube_id in self.get_youtube_ids().values():
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.get_id())
except NotFoundError:
pass
def save_subs_to_store(self, subs, subs_id):
"""
Save transcripts into `StaticContent`.
"""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def setUp(self):
"""Create initial data."""
super(BaseTranscripts, self).setUp()
# Add video module
data = {
'parent_locator': six.text_type(self.course.location),
'category': 'video',
'type': 'video'
}
resp = self.client.ajax_post('/xblock/', data)
self.assertEqual(resp.status_code, 200)
self.video_usage_key = self._get_usage_key(resp)
self.item = modulestore().get_item(self.video_usage_key)
# hI10vDNYz4M - valid Youtube ID with transcripts.
# JMD_ifUUfsU, AKqURZnYqpk, DYpADpL7jAY - valid Youtube IDs without transcripts.
self.set_fields_from_xml(
self.item, '<video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
)
modulestore().update_item(self.item, self.user.id)
self.item = modulestore().get_item(self.video_usage_key)
# Remove all transcripts for current module.
self.clear_subs_content()
def _get_usage_key(self, resp):
""" Returns the usage key from the response returned by a create operation. """
usage_key_string = json.loads(resp.content.decode('utf-8')).get('locator')
return UsageKey.from_string(usage_key_string)
def get_youtube_ids(self):
"""Return youtube speeds and ids."""
item = modulestore().get_item(self.video_usage_key)
return {
0.75: item.youtube_id_0_75,
1: item.youtube_id_1_0,
1.25: item.youtube_id_1_25,
1.5: item.youtube_id_1_5
}
def create_non_video_module(self):
"""
Setup non video module for tests.
"""
data = {
'parent_locator': six.text_type(self.course.location),
'category': 'non_video',
'type': 'non_video'
}
response = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(response)
item = modulestore().get_item(usage_key)
self.set_fields_from_xml(self.item, '<non_video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M" />')
modulestore().update_item(item, self.user.id)
return usage_key
def assert_response(self, response, expected_status_code, expected_message):
response_content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, expected_status_code)
self.assertEqual(response_content['status'], expected_message)
def set_fields_from_xml(self, item, xml):
fields_data = VideoBlock.parse_video_xml(xml)
for key, value in fields_data.items():
setattr(item, key, value)
@ddt.ddt
class TestUploadTranscripts(BaseTranscripts):
"""
Tests for '/transcripts/upload' endpoint.
"""
def setUp(self):
super(TestUploadTranscripts, self).setUp()
self.contents = {
'good': SRT_TRANSCRIPT_CONTENT,
'bad': b'Some BAD data',
}
# Create temporary transcript files
self.good_srt_file = self.create_transcript_file(content=self.contents['good'], suffix='.srt')
self.bad_data_srt_file = self.create_transcript_file(content=self.contents['bad'], suffix='.srt')
self.bad_name_srt_file = self.create_transcript_file(content=self.contents['good'], suffix='.bad')
self.bom_srt_file = self.create_transcript_file(content=self.contents['good'], suffix='.srt', include_bom=True)
# Setup a VEDA produced video and persist `edx_video_id` in VAL.
create_video({
'edx_video_id': u'123-456-789',
'status': 'upload',
'client_video_id': u'Test Video',
'duration': 0,
'encoded_videos': [],
'courses': [six.text_type(self.course.id)]
})
# Add clean up handler
self.addCleanup(self.clean_temporary_transcripts)
def create_transcript_file(self, content, suffix, include_bom=False):
"""
Setup a transcript file with suffix and content.
"""
transcript_file = tempfile.NamedTemporaryFile(suffix=suffix)
wrapped_content = textwrap.dedent(content.decode('utf-8'))
if include_bom:
wrapped_content = wrapped_content.encode('utf-8-sig')
# Verify that ufeff(BOM) character is in content.
self.assertIn(BOM_UTF8, wrapped_content)
transcript_file.write(wrapped_content)
else:
transcript_file.write(wrapped_content.encode('utf-8'))
transcript_file.seek(0)
return transcript_file
def clean_temporary_transcripts(self):
"""
Close transcript files gracefully.
"""
self.good_srt_file.close()
self.bad_data_srt_file.close()
self.bad_name_srt_file.close()
self.bom_srt_file.close()
def upload_transcript(self, locator, transcript_file, edx_video_id=None):
"""
Uploads a transcript for a video
"""
payload = {}
if locator:
payload.update({'locator': locator})
if edx_video_id is not None:
payload.update({'edx_video_id': edx_video_id})
if transcript_file:
payload.update({'transcript-file': transcript_file})
upload_url = reverse('upload_transcripts')
response = self.client.post(upload_url, payload)
return response
@ddt.data(
(u'123-456-789', False),
(u'', False),
(u'123-456-789', True)
)
@ddt.unpack
def test_transcript_upload_success(self, edx_video_id, include_bom):
"""
Tests transcript file upload to video component works as
expected in case of following:
1. External video component
2. VEDA produced video component
3. Transcript content containing BOM character
"""
# In case of an external video component, the `edx_video_id` must be empty
# and VEDA produced video component will have `edx_video_id` set to VAL video ID.
self.item.edx_video_id = edx_video_id
modulestore().update_item(self.item, self.user.id)
# Upload a transcript
transcript_file = self.bom_srt_file if include_bom else self.good_srt_file
response = self.upload_transcript(self.video_usage_key, transcript_file, '')
# Verify the response
self.assert_response(response, expected_status_code=200, expected_message='Success')
# Verify the `edx_video_id` on the video component
json_response = json.loads(response.content.decode('utf-8'))
expected_edx_video_id = edx_video_id if edx_video_id else json_response['edx_video_id']
video = modulestore().get_item(self.video_usage_key)
self.assertEqual(video.edx_video_id, expected_edx_video_id)
# Verify transcript content
actual_transcript = get_video_transcript_content(video.edx_video_id, language_code=u'en')
actual_sjson_content = json.loads(actual_transcript['content'])
expected_sjson_content = json.loads(Transcript.convert(
self.contents['good'],
input_format=Transcript.SRT,
output_format=Transcript.SJSON
))
self.assertDictEqual(actual_sjson_content, expected_sjson_content)
def test_transcript_upload_without_locator(self):
"""
Test that transcript upload validation fails if the video locator is missing
"""
response = self.upload_transcript(locator=None, transcript_file=self.good_srt_file, edx_video_id='')
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Video locator is required.'
)
def test_transcript_upload_without_file(self):
"""
Test that transcript upload validation fails if transcript file is missing
"""
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=None, edx_video_id='')
self.assert_response(
response,
expected_status_code=400,
expected_message=u'A transcript file is required.'
)
def test_transcript_upload_bad_format(self):
"""
Test that transcript upload validation fails if transcript format is not SRT
"""
response = self.upload_transcript(
locator=self.video_usage_key,
transcript_file=self.bad_name_srt_file,
edx_video_id=''
)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'This transcript file type is not supported.'
)
def test_transcript_upload_bad_content(self):
"""
Test that transcript upload validation fails in case of bad transcript content.
"""
# Request to upload transcript for the video
response = self.upload_transcript(
locator=self.video_usage_key,
transcript_file=self.bad_data_srt_file,
edx_video_id=''
)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'There is a problem with this transcript file. Try to upload a different file.'
)
def test_transcript_upload_unknown_category(self):
"""
Test that transcript upload validation fails if item's category is other than video.
"""
# non_video module setup - i.e. an item whose category is not 'video'.
usage_key = self.create_non_video_module()
# Request to upload transcript for the item
response = self.upload_transcript(locator=usage_key, transcript_file=self.good_srt_file, edx_video_id='')
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Transcripts are supported only for "video" modules.'
)
def test_transcript_upload_non_existent_item(self):
"""
Test that transcript upload validation fails in case of invalid item's locator.
"""
# Request to upload transcript for the item
response = self.upload_transcript(
locator='non_existent_locator',
transcript_file=self.good_srt_file,
edx_video_id=''
)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Cannot find item by locator.'
)
def test_transcript_upload_without_edx_video_id(self):
"""
Test that transcript upload validation fails if the `edx_video_id` is missing
"""
response = self.upload_transcript(locator=self.video_usage_key, transcript_file=self.good_srt_file)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Video ID is required.'
)
def test_transcript_upload_with_non_existant_edx_video_id(self):
"""
Test that transcript upload works as expected if `edx_video_id` set on
video descriptor is different from `edx_video_id` received in POST request.
"""
non_existant_edx_video_id = '1111-2222-3333-4444'
# Upload with non-existant `edx_video_id`
response = self.upload_transcript(
locator=self.video_usage_key,
transcript_file=self.good_srt_file,
edx_video_id=non_existant_edx_video_id
)
# Verify the response
self.assert_response(response, expected_status_code=400, expected_message='Invalid Video ID')
# Verify transcript does not exist for non-existant `edx_video_id`
self.assertIsNone(get_video_transcript_content(non_existant_edx_video_id, language_code=u'en'))
@ddt.ddt
class TestChooseTranscripts(BaseTranscripts):
"""
Tests for '/transcripts/choose' endpoint.
"""
def setUp(self):
super(TestChooseTranscripts, self).setUp()
# Create test transcript in contentstore
self.chosen_html5_id = 'test_html5_subs'
self.sjson_subs = Transcript.convert(SRT_TRANSCRIPT_CONTENT, Transcript.SRT, Transcript.SJSON)
self.save_subs_to_store(json.loads(self.sjson_subs), self.chosen_html5_id)
# Setup a VEDA produced video and persist `edx_video_id` in VAL.
create_video({
'edx_video_id': u'123-456-789',
'status': 'upload',
'client_video_id': u'Test Video',
'duration': 0,
'encoded_videos': [],
'courses': [six.text_type(self.course.id)]
})
def choose_transcript(self, locator, chosen_html5_id):
"""
Make an endpoint call to choose transcript
"""
payload = {}
if locator:
payload.update({'locator': six.text_type(locator)})
if chosen_html5_id:
payload.update({'html5_id': chosen_html5_id})
choose_transcript_url = reverse('choose_transcripts')
response = self.client.get(choose_transcript_url, {'data': json.dumps(payload)})
return response
@ddt.data(u'123-456-789', u'')
def test_choose_transcript_success(self, edx_video_id):
"""
Verify that choosing transcript file in video component basic tab works as
expected in case of following:
1. External video component
2. VEDA produced video component
"""
# In case of an external video component, the `edx_video_id` must be empty
# and VEDA produced video component will have `edx_video_id` set to VAL video ID.
self.item.edx_video_id = edx_video_id
modulestore().update_item(self.item, self.user.id)
# Make call to choose a transcript
response = self.choose_transcript(self.video_usage_key, self.chosen_html5_id)
# Verify the response
self.assert_response(response, expected_status_code=200, expected_message='Success')
# Verify the `edx_video_id` on the video component
json_response = json.loads(response.content.decode('utf-8'))
expected_edx_video_id = edx_video_id if edx_video_id else json_response['edx_video_id']
video = modulestore().get_item(self.video_usage_key)
self.assertEqual(video.edx_video_id, expected_edx_video_id)
# Verify transcript content
actual_transcript = get_video_transcript_content(video.edx_video_id, language_code=u'en')
actual_sjson_content = json.loads(actual_transcript['content'])
expected_sjson_content = json.loads(self.sjson_subs)
self.assertDictEqual(actual_sjson_content, expected_sjson_content)
def test_choose_transcript_fails_without_data(self):
"""
Verify that choose transcript fails if we do not provide video data in request.
"""
response = self.choose_transcript(locator=None, chosen_html5_id=None)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Incoming video data is empty.'
)
def test_choose_transcript_fails_without_locator(self):
"""
Verify that choose transcript fails if video locator is missing in request.
"""
response = self.choose_transcript(locator=None, chosen_html5_id=self.chosen_html5_id)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Cannot find item by locator.'
)
def test_choose_transcript_with_no_html5_transcript(self):
"""
Verify that choose transcript fails if the chosen html5 ID don't
have any transcript associated in contentstore.
"""
response = self.choose_transcript(locator=self.video_usage_key, chosen_html5_id='non-existent-html5-id')
self.assert_response(
response,
expected_status_code=400,
expected_message=u"No such transcript."
)
def test_choose_transcript_fails_on_unknown_category(self):
"""
Test that transcript choose validation fails if item's category is other than video.
"""
# non_video module setup - i.e. an item whose category is not 'video'.
usage_key = self.create_non_video_module()
# Request to choose transcript for the item
response = self.choose_transcript(locator=usage_key, chosen_html5_id=self.chosen_html5_id)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Transcripts are supported only for "video" modules.'
)
@ddt.ddt
class TestRenameTranscripts(BaseTranscripts):
"""
Tests for '/transcripts/rename' endpoint.
"""
def setUp(self):
super(TestRenameTranscripts, self).setUp()
# Create test transcript in contentstore and update item's sub.
self.item.sub = 'test_video_subs'
self.sjson_subs = Transcript.convert(SRT_TRANSCRIPT_CONTENT, Transcript.SRT, Transcript.SJSON)
self.save_subs_to_store(json.loads(self.sjson_subs), self.item.sub)
modulestore().update_item(self.item, self.user.id)
# Setup a VEDA produced video and persist `edx_video_id` in VAL.
create_video({
'edx_video_id': u'123-456-789',
'status': 'upload',
'client_video_id': u'Test Video',
'duration': 0,
'encoded_videos': [],
'courses': [six.text_type(self.course.id)]
})
def rename_transcript(self, locator):
"""
Make an endpoint call to rename transcripts.
"""
payload = {}
if locator:
payload.update({'locator': six.text_type(locator)})
rename_transcript_url = reverse('rename_transcripts')
response = self.client.get(rename_transcript_url, {'data': json.dumps(payload)})
return response
@ddt.data(u'123-456-789', u'')
def test_rename_transcript_success(self, edx_video_id):
"""
Verify that "use current transcript" in video component basic tab works as
expected in case of following:
1. External video component
2. VEDA produced video component
"""
# In case of an external video component, the `edx_video_id` must be empty
# and VEDA produced video component will have `edx_video_id` set to VAL video ID.
self.item.edx_video_id = edx_video_id
modulestore().update_item(self.item, self.user.id)
# Make call to use current transcript from contentstore
response = self.rename_transcript(self.video_usage_key)
# Verify the response
self.assert_response(response, expected_status_code=200, expected_message='Success')
# Verify the `edx_video_id` on the video component
json_response = json.loads(response.content.decode('utf-8'))
expected_edx_video_id = edx_video_id if edx_video_id else json_response['edx_video_id']
video = modulestore().get_item(self.video_usage_key)
self.assertEqual(video.edx_video_id, expected_edx_video_id)
# Verify transcript content
actual_transcript = get_video_transcript_content(video.edx_video_id, language_code=u'en')
actual_sjson_content = json.loads(actual_transcript['content'])
expected_sjson_content = json.loads(self.sjson_subs)
self.assertDictEqual(actual_sjson_content, expected_sjson_content)
def test_rename_transcript_fails_without_data(self):
"""
Verify that use current transcript fails if we do not provide video data in request.
"""
response = self.rename_transcript(locator=None)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Incoming video data is empty.'
)
def test_rename_transcript_fails_with_invalid_locator(self):
"""
Verify that use current transcript fails if video locator is missing in request.
"""
response = self.rename_transcript(locator='non-existent-locator')
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Cannot find item by locator.'
)
def test_rename_transcript_with_non_existent_sub(self):
"""
Verify that rename transcript fails if the `item.sub` don't
have any transcript associated in contentstore.
"""
# Update item's sub to an id who does not have any
# transcript associated in contentstore.
self.item.sub = 'non-existent-sub'
modulestore().update_item(self.item, self.user.id)
response = self.rename_transcript(locator=self.video_usage_key)
self.assert_response(
response,
expected_status_code=400,
expected_message=u"No such transcript."
)
def test_rename_transcript_fails_on_unknown_category(self):
"""
Test that validation fails if item's category is other than video.
"""
# non_video module setup - i.e. an item whose category is not 'video'.
usage_key = self.create_non_video_module()
# Make call to use current transcript from contentstore.
response = self.rename_transcript(usage_key)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Transcripts are supported only for "video" modules.'
)
@ddt.ddt
@patch('contentstore.views.transcripts_ajax.download_youtube_subs', Mock(return_value=SJSON_TRANSCRIPT_CONTENT))
class TestReplaceTranscripts(BaseTranscripts):
"""
Tests for '/transcripts/replace' endpoint.
"""
def setUp(self):
super(TestReplaceTranscripts, self).setUp()
self.youtube_id = 'test_yt_id'
# Setup a VEDA produced video and persist `edx_video_id` in VAL.
create_video({
'edx_video_id': u'123-456-789',
'status': 'upload',
'client_video_id': u'Test Video',
'duration': 0,
'encoded_videos': [],
'courses': [six.text_type(self.course.id)]
})
def replace_transcript(self, locator, youtube_id):
"""
Make an endpoint call to replace transcripts with youtube ones.
"""
payload = {}
if locator:
payload.update({'locator': six.text_type(locator)})
if youtube_id:
payload.update({
'videos': [
{
'type': 'youtube',
'video': youtube_id
}
]
})
replace_transcript_url = reverse('replace_transcripts')
response = self.client.get(replace_transcript_url, {'data': json.dumps(payload)})
return response
@ddt.data(u'123-456-789', u'')
def test_replace_transcript_success(self, edx_video_id):
"""
Verify that "import from youtube" in video component basic tab works as
expected in case of following:
1. External video component
2. VEDA produced video component
"""
# In case of an external video component, the `edx_video_id` must be empty
# and VEDA produced video component will have `edx_video_id` set to VAL video ID.
self.item.edx_video_id = edx_video_id
modulestore().update_item(self.item, self.user.id)
# Make call to replace transcripts from youtube
response = self.replace_transcript(self.video_usage_key, self.youtube_id)
# Verify the response
self.assert_response(response, expected_status_code=200, expected_message='Success')
# Verify the `edx_video_id` on the video component
json_response = json.loads(response.content.decode('utf-8'))
expected_edx_video_id = edx_video_id if edx_video_id else json_response['edx_video_id']
video = modulestore().get_item(self.video_usage_key)
self.assertEqual(video.edx_video_id, expected_edx_video_id)
# Verify transcript content
actual_transcript = get_video_transcript_content(video.edx_video_id, language_code=u'en')
actual_sjson_content = json.loads(actual_transcript['content'])
expected_sjson_content = json.loads(SJSON_TRANSCRIPT_CONTENT)
self.assertDictEqual(actual_sjson_content, expected_sjson_content)
def test_replace_transcript_fails_without_data(self):
"""
Verify that replace transcript fails if we do not provide video data in request.
"""
response = self.replace_transcript(locator=None, youtube_id=None)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Incoming video data is empty.'
)
def test_replace_transcript_fails_with_invalid_locator(self):
"""
Verify that replace transcript fails if a video locator does not exist.
"""
response = self.replace_transcript(locator='non-existent-locator', youtube_id=self.youtube_id)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Cannot find item by locator.'
)
def test_replace_transcript_fails_without_yt_id(self):
"""
Verify that replace transcript fails if youtube id is not provided.
"""
response = self.replace_transcript(locator=self.video_usage_key, youtube_id=None)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'YouTube ID is required.'
)
def test_replace_transcript_no_transcript_on_yt(self):
"""
Verify that replace transcript fails if YouTube does not have transcript for the given youtube id.
"""
error_message = u'YT ID not found.'
with patch('contentstore.views.transcripts_ajax.download_youtube_subs') as mock_download_youtube_subs:
mock_download_youtube_subs.side_effect = GetTranscriptsFromYouTubeException(error_message)
response = self.replace_transcript(locator=self.video_usage_key, youtube_id='non-existent-yt-id')
self.assertContains(response, text=error_message, status_code=400)
def test_replace_transcript_fails_on_unknown_category(self):
"""
Test that validation fails if item's category is other than video.
"""
# non_video module setup - i.e. an item whose category is not 'video'.
usage_key = self.create_non_video_module()
response = self.replace_transcript(usage_key, youtube_id=self.youtube_id)
self.assert_response(
response,
expected_status_code=400,
expected_message=u'Transcripts are supported only for "video" modules.'
)
class TestDownloadTranscripts(BaseTranscripts):
"""
Tests for '/transcripts/download' url.
"""
def update_video_component(self, sub=None, youtube_id=None):
"""
Updates video component with `sub` and `youtube_id`.
"""
sjson_transcript = json.loads(SJSON_TRANSCRIPT_CONTENT)
self.item.sub = sub
if sub:
self.save_subs_to_store(sjson_transcript, sub)
self.item.youtube_id_1_0 = youtube_id
if youtube_id:
self.save_subs_to_store(sjson_transcript, youtube_id)
modulestore().update_item(self.item, self.user.id)
def download_transcript(self, locator):
"""
Makes a call to download transcripts.
"""
payload = {}
if locator:
payload.update({'locator': six.text_type(locator)})
download_transcript_url = reverse('download_transcripts')
response = self.client.get(download_transcript_url, payload)
return response
def assert_download_response(self, response, expected_status_code, expected_content=None):
"""
Verify transcript download response.
"""
self.assertEqual(response.status_code, expected_status_code)
if expected_content:
self.assertEqual(response.content, expected_content)
def test_download_youtube_transcript_success(self):
"""
Verify that the transcript associated to YT id is downloaded successfully.
"""
self.update_video_component(youtube_id='JMD_ifUUfsU')
response = self.download_transcript(locator=self.video_usage_key)
self.assert_download_response(response, expected_content=SRT_TRANSCRIPT_CONTENT, expected_status_code=200)
def test_download_non_youtube_transcript_success(self):
"""
Verify that the transcript associated to item's `sub` is downloaded successfully.
"""
self.update_video_component(sub='test_subs')
response = self.download_transcript(locator=self.video_usage_key)
self.assert_download_response(response, expected_content=SRT_TRANSCRIPT_CONTENT, expected_status_code=200)
def test_download_transcript_404_without_locator(self):
"""
Verify that download transcript returns 404 without locator.
"""
response = self.download_transcript(locator=None)
self.assert_download_response(response, expected_status_code=404)
def test_download_transcript_404_with_bad_locator(self):
"""
Verify that download transcript returns 404 with invalid locator.
"""
response = self.download_transcript(locator='invalid-locator')
self.assert_download_response(response, expected_status_code=404)
def test_download_transcript_404_for_non_video_module(self):
"""
Verify that download transcript returns 404 for a non video module.
"""
usage_key = self.create_non_video_module()
response = self.download_transcript(locator=usage_key)
self.assert_download_response(response, expected_status_code=404)
def test_download_transcript_404_for_no_yt_and_no_sub(self):
"""
Verify that download transcript returns 404 when video component
does not have sub and youtube id.
"""
self.update_video_component(sub=None, youtube_id=None)
response = self.download_transcript(locator=self.video_usage_key)
self.assert_download_response(response, expected_status_code=404)
@ddt.ddt
class TestCheckTranscripts(BaseTranscripts):
"""
Tests for '/transcripts/check' url.
"""
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.set_fields_from_xml(self.item, u"""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': six.text_type(self.video_usage_key),
'videos': [{
'type': 'html5',
'video': subs_id,
'mode': 'mp4',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content.decode('utf-8')),
{
u'status': u'Success',
u'youtube_local': False,
u'is_youtube_mode': False,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': six.text_type(subs_id),
u'youtube_diff': True,
u'html5_local': [six.text_type(subs_id)],
u'html5_equal': False,
}
)
remove_subs_from_store(subs_id, self.item)
def test_check_youtube(self):
self.set_fields_from_xml(self.item, '<video youtube="1:JMD_ifUUfsU" />')
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('check_transcripts')
data = {
'locator': six.text_type(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'JMD_ifUUfsU',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content.decode('utf-8')),
{
u'status': u'Success',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
@patch('xmodule.video_module.transcripts_utils.requests.get', side_effect=mock_requests_get)
def test_check_youtube_with_transcript_name(self, mock_get):
"""
Test that the transcripts are fetched correctly when the the transcript name is set
"""
self.set_fields_from_xml(self.item, '<video youtube="good_id_2" />')
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'good_id_2')
link = reverse('check_transcripts')
data = {
'locator': six.text_type(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'good_id_2',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
mock_get.assert_any_call(
'http://video.google.com/timedtext',
params={'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}
)
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content.decode('utf-8')),
{
u'status': u'Success',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': True,
u'command': u'replace',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
def test_fail_data_without_id(self):
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')).get('status'), "Can't find item by locator.")
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
data = {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# Not video module: setup
data = {
'parent_locator': six.text_type(self.course.location),
'category': 'not_video',
'type': 'not_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
self.set_fields_from_xml(self.item, (u"""
<not_video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</not_video>
""".format(subs_id)))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': six.text_type(usage_key),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(
json.loads(resp.content.decode('utf-8')).get('status'),
'Transcripts are supported only for "video" modules.',
)
@patch('xmodule.video_module.transcripts_utils.get_video_transcript_content')
def test_command_for_fallback_transcript(self, mock_get_video_transcript_content):
"""
Verify the command if a transcript is there in edx-val.
"""
mock_get_video_transcript_content.return_value = {
'content': json.dumps({
"start": [10],
"end": [100],
"text": ["Hi, welcome to Edx."],
}),
'file_name': 'edx.sjson'
}
# video_transcript_feature.return_value = feature_enabled
self.set_fields_from_xml(self.item, (u"""
<video youtube="" sub="" edx_video_id="123">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
"""))
modulestore().update_item(self.item, self.user.id)
# Make request to check transcript view
data = {
'locator': six.text_type(self.video_usage_key),
'videos': [{
'type': 'html5',
'video': "",
'mode': 'mp4',
}]
}
check_transcripts_url = reverse('check_transcripts')
response = self.client.get(check_transcripts_url, {'data': json.dumps(data)})
# Assert the response
self.assertEqual(response.status_code, 200)
self.assertDictEqual(
json.loads(response.content.decode('utf-8')),
{
u'status': u'Success',
u'youtube_local': False,
u'is_youtube_mode': False,
u'youtube_server': False,
u'command': 'found',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
| agpl-3.0 | -8,912,867,940,428,763,000 | 37.216431 | 119 | 0.59712 | false |
bhaveshAn/crisscross | crisscross/__init__.py | 1 | 3025 | '''
Crisscross
==========
'''
__all__ = ('accelerometer', 'audio', 'barometer', 'battery', 'call', 'camera',
'compass', 'email', 'filechooser', 'flash', 'gps', 'gravity',
'gyroscope', 'irblaster', 'light', 'orientation', 'notification',
'proximity', 'sms', 'tts', 'uniqueid', 'vibrator', 'wifi',
'temperature', 'bluetooth')
__version__ = '1.0.0dev'
from crisscross import facades
from crisscross.utils import Proxy
#: Accelerometer proxy to :class:`crisscross.facades.Accelerometer`
accelerometer = Proxy('accelerometer', facades.Accelerometer)
#: Audio proxy to :class:`crisscross.facades.Audio`
audio = Proxy('audio', facades.Audio)
#: Barometer proxy to :class:`crisscross.facades.Barometer`
barometer = Proxy('barometer', facades.Barometer)
#: Battery proxy to :class:`crisscross.facades.Battery`
battery = Proxy('battery', facades.Battery)
#: Call proxy to :class `crisscross.facades.Call`
call = Proxy('call', facades.Call)
#: Compass proxy to :class:`crisscross.facades.Compass`
compass = Proxy('compass', facades.Compass)
#: Camera proxy to :class:`crisscross.facades.Camera`
camera = Proxy('camera', facades.Camera)
#: Email proxy to :class:`crisscross.facades.Email`
email = Proxy('email', facades.Email)
#: FileChooser proxy to :class:`crisscross.facades.FileChooser`
filechooser = Proxy('filechooser', facades.FileChooser)
#: GPS proxy to :class:`crisscross.facades.GPS`
gps = Proxy('gps', facades.GPS)
#: Gravity proxy to :class:`crisscross.facades.Gravity`
gravity = Proxy('gravity', facades.Gravity)
#: Gyroscope proxy to :class:`crisscross.facades.Gyroscope`
gyroscope = Proxy('gyroscope', facades.Gyroscope)
#: IrBlaster proxy to :class:`crisscross.facades.IrBlaster`
irblaster = Proxy('irblaster', facades.IrBlaster)
#: Light proxy to :class:`crisscross.facades.Light`
light = Proxy('light', facades.Light)
#: Orientation proxy to :class:`crisscross.facades.Orientation`
orientation = Proxy('orientation', facades.Orientation)
#: Notification proxy to :class:`crisscross.facades.Notification`
notification = Proxy('notification', facades.Notification)
#: Proximity proxy to :class:`crisscross.facades.Proximity`
proximity = Proxy('proximity', facades.Proximity)
#: Sms proxy to :class:`crisscross.facades.Sms`
sms = Proxy('sms', facades.Sms)
#: TTS proxy to :class:`crisscross.facades.TTS`
tts = Proxy('tts', facades.TTS)
#: UniqueID proxy to :class:`crisscross.facades.UniqueID`
uniqueid = Proxy('uniqueid', facades.UniqueID)
#: Vibrator proxy to :class:`crisscross.facades.Vibrator`
vibrator = Proxy('vibrator', facades.Vibrator)
#: Flash proxy to :class:`crisscross.facades.Flash`
flash = Proxy('flash', facades.Flash)
#: Wifi proxy to :class:`plyer.facades.Wifi`
wifi = Proxy('wifi', facades.Wifi)
#: Temperature proxy to :class:`crisscross.facades.Temperature`
temperature = Proxy('temperature', facades.Temperature)
#: Bluetooth proxy to :class:`crisscross.facades.Bluetooth`
bluetooth = Proxy('bluetooth', facades.Bluetooth)
| mit | 587,959,898,850,672,500 | 31.880435 | 78 | 0.727603 | false |
Jozhogg/iris | lib/iris/fileformats/netcdf.py | 1 | 69253 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Module to support the loading of a NetCDF file into an Iris cube.
See also: `netCDF4 python <http://code.google.com/p/netcdf4-python/>`_.
Also refer to document 'NetCDF Climate and Forecast (CF) Metadata Conventions',
Version 1.4, 27 February 2009.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import filter, range
import collections
import os
import os.path
import string
import warnings
import biggus
import iris.proxy
iris.proxy.apply_proxy('netCDF4', globals())
import numpy as np
import numpy.ma as ma
from pyke import knowledge_engine
import iris.analysis
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory, \
OceanSigmaZFactory, OceanSigmaFactory, OceanSFactory, OceanSg1Factory, \
OceanSg2Factory
import iris.coord_systems
import iris.coords
import iris.cube
import iris.exceptions
import iris.fileformats.cf
import iris.fileformats._pyke_rules
import iris.io
import iris.unit
import iris.util
# Show Pyke inference engine statistics.
DEBUG = False
# Pyke CF related file names.
_PYKE_RULE_BASE = 'fc_rules_cf'
_PYKE_FACT_BASE = 'facts_cf'
# Standard CML spatio-temporal axis names.
SPATIO_TEMPORAL_AXES = ['t', 'z', 'y', 'x']
# Pass through CF attributes:
# - comment
# - Conventions
# - flag_masks
# - flag_meanings
# - flag_values
# - history
# - institution
# - reference
# - source
# - title
# - positive
#
_CF_ATTRS = ['add_offset', 'ancillary_variables', 'axis', 'bounds', 'calendar',
'cell_measures', 'cell_methods', 'climatology', 'compress',
'coordinates', '_FillValue', 'formula_terms', 'grid_mapping',
'leap_month', 'leap_year', 'long_name', 'missing_value',
'month_lengths', 'scale_factor', 'standard_error_multiplier',
'standard_name', 'units', 'valid_max', 'valid_min', 'valid_range']
# CF attributes that should not be global.
_CF_DATA_ATTRS = ['flag_masks', 'flag_meanings', 'flag_values',
'instance_dimension', 'sample_dimension',
'standard_error_multiplier']
# CF attributes that should only be global.
_CF_GLOBAL_ATTRS = ['conventions', 'featureType', 'history', 'title']
# UKMO specific attributes that should not be global.
_UKMO_DATA_ATTRS = ['STASH', 'um_stash_source', 'ukmo__process_flags']
CF_CONVENTIONS_VERSION = 'CF-1.5'
_FactoryDefn = collections.namedtuple('_FactoryDefn', ('primary', 'std_name',
'formula_terms_format'))
_FACTORY_DEFNS = {
HybridHeightFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_height_coordinate',
formula_terms_format='a: {delta} b: {sigma} orog: {orography}'),
HybridPressureFactory: _FactoryDefn(
primary='delta',
std_name='atmosphere_hybrid_sigma_pressure_coordinate',
formula_terms_format='ap: {delta} b: {sigma} '
'ps: {surface_air_pressure}'),
OceanSigmaZFactory: _FactoryDefn(
primary='zlev',
std_name='ocean_sigma_z_coordinate',
formula_terms_format='sigma: {sigma} eta: {eta} depth: {depth} '
'depth_c: {depth_c} nsigma: {nsigma} zlev: {zlev}'),
OceanSigmaFactory: _FactoryDefn(
primary='sigma',
std_name='ocean_sigma_coordinate',
formula_terms_format='sigma: {sigma} eta: {eta} depth: {depth}'),
OceanSFactory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate',
formula_terms_format='s: {s} eta: {eta} depth: {depth} a: {a} b: {b} '
'depth_c: {depth_c}'),
OceanSg1Factory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate_g1',
formula_terms_format='s: {s} c: {c} eta: {eta} depth: {depth} '
'depth_c: {depth_c}'),
OceanSg2Factory: _FactoryDefn(
primary='s',
std_name='ocean_s_coordinate_g2',
formula_terms_format='s: {s} c: {c} eta: {eta} depth: {depth} '
'depth_c: {depth_c}')
}
class CFNameCoordMap(object):
"""Provide a simple CF name to CF coordinate mapping."""
_Map = collections.namedtuple('_Map', ['name', 'coord'])
def __init__(self):
self._map = []
def append(self, name, coord):
"""
Append the given name and coordinate pair to the mapping.
Args:
* name:
CF name of the associated coordinate.
* coord:
The coordinate of the associated CF name.
Returns:
None.
"""
self._map.append(CFNameCoordMap._Map(name, coord))
@property
def names(self):
"""Return all the CF names."""
return [pair.name for pair in self._map]
@property
def coords(self):
"""Return all the coordinates."""
return [pair.coord for pair in self._map]
def name(self, coord):
"""
Return the CF name, given a coordinate
Args:
* coord:
The coordinate of the associated CF name.
Returns:
Coordinate.
"""
result = None
for pair in self._map:
if coord == pair.coord:
result = pair.name
break
if result is None:
msg = 'Coordinate is not mapped, {!r}'.format(coord)
raise KeyError(msg)
return result
def coord(self, name):
"""
Return the coordinate, given a CF name.
Args:
* name:
CF name of the associated coordinate.
Returns:
CF name.
"""
result = None
for pair in self._map:
if name == pair.name:
result = pair.coord
break
if result is None:
msg = 'Name is not mapped, {!r}'.format(name)
raise KeyError(msg)
return result
def _pyke_kb_engine():
"""Return the PyKE knowledge engine for CF->cube conversion."""
pyke_dir = os.path.join(os.path.dirname(__file__), '_pyke_rules')
compile_dir = os.path.join(pyke_dir, 'compiled_krb')
engine = None
if os.path.exists(compile_dir):
tmpvar = [os.path.getmtime(os.path.join(compile_dir, fname)) for
fname in os.listdir(compile_dir) if not
fname.startswith('_')]
if tmpvar:
oldest_pyke_compile_file = min(tmpvar)
rule_age = os.path.getmtime(
os.path.join(pyke_dir, _PYKE_RULE_BASE + '.krb'))
if oldest_pyke_compile_file >= rule_age:
# Initialise the pyke inference engine.
engine = knowledge_engine.engine(
(None, 'iris.fileformats._pyke_rules.compiled_krb'))
if engine is None:
engine = knowledge_engine.engine(iris.fileformats._pyke_rules)
return engine
class NetCDFDataProxy(object):
"""A reference to the data payload of a single NetCDF file variable."""
__slots__ = ('shape', 'dtype', 'path', 'variable_name', 'fill_value')
def __init__(self, shape, dtype, path, variable_name, fill_value):
self.shape = shape
self.dtype = dtype
self.path = path
self.variable_name = variable_name
self.fill_value = fill_value
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, keys):
dataset = netCDF4.Dataset(self.path)
try:
variable = dataset.variables[self.variable_name]
# Get the NetCDF variable data and slice.
data = variable[keys]
finally:
dataset.close()
return data
def __repr__(self):
fmt = '<{self.__class__.__name__} shape={self.shape}' \
' dtype={self.dtype!r} path={self.path!r}' \
' variable_name={self.variable_name!r}>'
return fmt.format(self=self)
def __getstate__(self):
return {attr: getattr(self, attr) for attr in self.__slots__}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def _assert_case_specific_facts(engine, cf, cf_group):
# Initialise pyke engine "provides" hooks.
engine.provides['coordinates'] = []
# Assert facts for CF coordinates.
for cf_name in cf_group.coordinates.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'coordinate',
(cf_name,))
# Assert facts for CF auxiliary coordinates.
for cf_name in cf_group.auxiliary_coordinates.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'auxiliary_coordinate',
(cf_name,))
# Assert facts for CF grid_mappings.
for cf_name in cf_group.grid_mappings.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'grid_mapping',
(cf_name,))
# Assert facts for CF labels.
for cf_name in cf_group.labels.iterkeys():
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'label',
(cf_name,))
# Assert facts for CF formula terms associated with the cf_group
# of the CF data variable.
formula_root = set()
for cf_var in cf.cf_group.formula_terms.itervalues():
for cf_root, cf_term in cf_var.cf_terms_by_root.iteritems():
# Only assert this fact if the formula root variable is
# defined in the CF group of the CF data variable.
if cf_root in cf_group:
formula_root.add(cf_root)
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_term',
(cf_var.cf_name, cf_root,
cf_term))
for cf_root in formula_root:
engine.add_case_specific_fact(_PYKE_FACT_BASE, 'formula_root',
(cf_root,))
def _pyke_stats(engine, cf_name):
if DEBUG:
print('-' * 80)
print('CF Data Variable: %r' % cf_name)
engine.print_stats()
print('Rules Triggered:')
for rule in sorted(list(engine.rule_triggered)):
print('\t%s' % rule)
print('Case Specific Facts:')
kb_facts = engine.get_kb(_PYKE_FACT_BASE)
for key in kb_facts.entity_lists.iterkeys():
for arg in kb_facts.entity_lists[key].case_specific_facts:
print('\t%s%s' % (key, arg))
def _set_attributes(attributes, key, value):
"""Set attributes dictionary, converting unicode strings appropriately."""
if isinstance(value, unicode):
try:
attributes[str(key)] = str(value)
except UnicodeEncodeError:
attributes[str(key)] = value
else:
attributes[str(key)] = value
def _load_cube(engine, cf, cf_var, filename):
"""Create the cube associated with the CF-netCDF data variable."""
# Figure out what the eventual data type will be after any scale/offset
# transforms.
dummy_data = np.zeros(1, dtype=cf_var.dtype)
if hasattr(cf_var, 'scale_factor'):
dummy_data = cf_var.scale_factor * dummy_data
if hasattr(cf_var, 'add_offset'):
dummy_data = cf_var.add_offset + dummy_data
# Create cube with deferred data, but no metadata
fill_value = getattr(cf_var.cf_data, '_FillValue',
netCDF4.default_fillvals[cf_var.dtype.str[1:]])
proxy = NetCDFDataProxy(cf_var.shape, dummy_data.dtype,
filename, cf_var.cf_name, fill_value)
data = biggus.OrthoArrayAdapter(proxy)
cube = iris.cube.Cube(data)
# Reset the pyke inference engine.
engine.reset()
# Initialise pyke engine rule processing hooks.
engine.cf_var = cf_var
engine.cube = cube
engine.provides = {}
engine.requires = {}
engine.rule_triggered = set()
engine.filename = filename
# Assert any case-specific facts.
_assert_case_specific_facts(engine, cf, cf_var.cf_group)
# Run pyke inference engine with forward chaining rules.
engine.activate(_PYKE_RULE_BASE)
# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get('coordinates', [])
attribute_predicate = lambda item: item[0] not in _CF_ATTRS
for coord, cf_var_name in coordinates:
tmpvar = filter(attribute_predicate,
cf.cf_group[cf_var_name].cf_attrs_unused())
for attr_name, attr_value in tmpvar:
_set_attributes(coord.attributes, attr_name, attr_value)
tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused())
# Attach untouched attributes of the associated CF-netCDF data variable to
# the cube.
for attr_name, attr_value in tmpvar:
_set_attributes(cube.attributes, attr_name, attr_value)
# Show pyke session statistics.
_pyke_stats(engine, cf_var.cf_name)
return cube
def _load_aux_factory(engine, cube):
"""
Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory.
"""
formula_type = engine.requires.get('formula_type')
if formula_type in ['atmosphere_hybrid_height_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'ocean_sigma_z_coordinate', 'ocean_sigma_coordinate',
'ocean_s_coordinate', 'ocean_s_coordinate_g1',
'ocean_s_coordinate_g2']:
def coord_from_term(term):
# Convert term names to coordinates (via netCDF variable names).
name = engine.requires['formula_terms'][term]
for coord, cf_var_name in engine.provides['coordinates']:
if cf_var_name == name:
return coord
warnings.warn('Unable to find coordinate for variable '
'{!r}'.format(name))
if formula_type == 'atmosphere_hybrid_height_coordinate':
delta = coord_from_term('a')
sigma = coord_from_term('b')
orography = coord_from_term('orog')
factory = HybridHeightFactory(delta, sigma, orography)
elif formula_type == 'atmosphere_hybrid_sigma_pressure_coordinate':
# Hybrid pressure has two valid versions of its formula terms:
# "p0: var1 a: var2 b: var3 ps: var4" or
# "ap: var1 b: var2 ps: var3" where "ap = p0 * a"
try:
# Attempt to get the "ap" term.
delta = coord_from_term('ap')
except (KeyError, ValueError):
# The "ap" term is unavailable, so try getting terms "p0"
# and "a" terms in order to derive an "ap" equivalent term.
coord_p0 = coord_from_term('p0')
if coord_p0.shape != (1,):
msg = 'Expecting {!r} to be a scalar reference pressure ' \
'coordinate, got shape {!r}'.format(coord_p0.var_name,
coord_p0.shape)
raise ValueError(msg)
if coord_p0.has_bounds():
msg = 'Ignoring atmosphere hybrid sigma pressure scalar ' \
'coordinate {!r} bounds.'.format(coord_p0.name())
warnings.warn(msg)
coord_a = coord_from_term('a')
delta = coord_a * coord_p0.points[0]
delta.units = coord_a.units * coord_p0.units
delta.rename('vertical pressure')
delta.var_name = 'ap'
cube.add_aux_coord(delta, cube.coord_dims(coord_a))
sigma = coord_from_term('b')
surface_air_pressure = coord_from_term('ps')
factory = HybridPressureFactory(delta, sigma, surface_air_pressure)
elif formula_type == 'ocean_sigma_z_coordinate':
sigma = coord_from_term('sigma')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
nsigma = coord_from_term('nsigma')
zlev = coord_from_term('zlev')
factory = OceanSigmaZFactory(sigma, eta, depth,
depth_c, nsigma, zlev)
elif formula_type == 'ocean_sigma_coordinate':
sigma = coord_from_term('sigma')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
factory = OceanSigmaFactory(sigma, eta, depth)
elif formula_type == 'ocean_s_coordinate':
s = coord_from_term('s')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
a = coord_from_term('a')
depth_c = coord_from_term('depth_c')
b = coord_from_term('b')
factory = OceanSFactory(s, eta, depth, a, b, depth_c)
elif formula_type == 'ocean_s_coordinate_g1':
s = coord_from_term('s')
c = coord_from_term('c')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
factory = OceanSg1Factory(s, c, eta, depth,
depth_c)
elif formula_type == 'ocean_s_coordinate_g2':
s = coord_from_term('s')
c = coord_from_term('c')
eta = coord_from_term('eta')
depth = coord_from_term('depth')
depth_c = coord_from_term('depth_c')
factory = OceanSg2Factory(s, c, eta, depth,
depth_c)
cube.add_aux_factory(factory)
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of NetCDF filenames/URLs.
Args:
* filenames (string/list):
One or more NetCDF filenames/DAP URLs to load from.
Kwargs:
* callback (callable function):
Function which can be passed on to :func:`iris.io.run_callback`.
Returns:
Generator of loaded NetCDF :class:`iris.cubes.Cube`.
"""
# Initialise the pyke inference engine.
engine = _pyke_kb_engine()
if isinstance(filenames, basestring):
filenames = [filenames]
for filename in filenames:
# Ingest the netCDF file.
cf = iris.fileformats.cf.CFReader(filename)
# Process each CF data variable.
data_variables = cf.cf_group.data_variables.values() + \
cf.cf_group.promoted.values()
for cf_var in data_variables:
cube = _load_cube(engine, cf, cf_var, filename)
# Process any associated formula terms and attach
# the corresponding AuxCoordFactory.
try:
_load_aux_factory(engine, cube)
except ValueError as e:
warnings.warn('{}'.format(e))
# Perform any user registered callback function.
cube = iris.io.run_callback(callback, cube, cf_var, filename)
# Callback mechanism may return None, which must not be yielded
if cube is None:
continue
yield cube
class Saver(object):
"""A manager for saving netcdf files."""
def __init__(self, filename, netcdf_format):
"""
A manager for saving netcdf files.
Args:
* filename (string):
Name of the netCDF file to save the cube.
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
Returns:
None.
For example::
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube)
"""
if netcdf_format not in ['NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
raise ValueError('Unknown netCDF file format, got %r' %
netcdf_format)
# All persistent variables
#: CF name mapping with iris coordinates
self._name_coord_map = CFNameCoordMap()
#: List of dimension coordinates added to the file
self._dim_coords = []
#: List of grid mappings added to the file
self._coord_systems = []
#: A dictionary, listing dimension names and corresponding length
self._existing_dim = {}
#: A dictionary, mapping formula terms to owner cf variable name
self._formula_terms_cache = {}
#: NetCDF dataset
try:
self._dataset = netCDF4.Dataset(filename, mode='w',
format=netcdf_format)
except RuntimeError:
dir_name = os.path.dirname(filename)
if not os.path.isdir(dir_name):
msg = 'No such file or directory: {}'.format(dir_name)
raise IOError(msg)
if not os.access(dir_name, os.R_OK | os.W_OK):
msg = 'Permission denied: {}'.format(filename)
raise IOError(msg)
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Flush any buffered data to the CF-netCDF file before closing."""
self._dataset.sync()
self._dataset.close()
def write(self, cube, local_keys=None, unlimited_dimensions=None,
zlib=False, complevel=4, shuffle=True, fletcher32=False,
contiguous=False, chunksizes=None, endian='native',
least_significant_digit=None):
"""
Wrapper for saving cubes to a NetCDF file.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
Explicit list of coordinate names (or coordinate objects)
corresponding to coordinate dimensions of `cube` to save with the
NetCDF dimension variable length 'UNLIMITED'. By default, the
outermost (first) dimension for each cube is used. Only the
'NETCDF4' format supports multiple 'UNLIMITED' dimensions. To save
no unlimited dimensions, use `unlimited_dimensions=[]` (an empty
list).
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using
gzip compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before
compressing the data (default `True`). This significantly improves
compression. Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk.
Default `False`. Setting to `True` for a variable with an unlimited
dimension will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of
the variable. A detailed discussion of HDF chunking and I/O
performance is available here:
http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html. Basically,
you want the chunk size for each dimension to match as closely as
possible the size of the data block that users will read from the
file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read
on a computer with the opposite format as the one used to create
the file, there may be some performance advantage to be gained by
setting the endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this
produces 'lossy', but significantly more efficient compression. For
example, if `least_significant_digit=1`, data will be quantized
using `numpy.around(scale*data)/scale`, where `scale = 2**bits`,
and `bits` is determined so that a precision of 0.1 is retained (in
this case `bits=4`). From
http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal
place in unpacked data that is a reliable value". Default is
`None`, or no quantization, or 'lossless' compression.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF
3 files that do not use HDF5.
.. deprecated:: 1.8.0
NetCDF default saving behaviour currently assigns the outermost
dimension as unlimited. This behaviour is to be deprecated, in
favour of no automatic assignment. To switch to the new behaviour,
set `iris.FUTURE.netcdf_no_unlimited` to True.
"""
if unlimited_dimensions is None:
if iris.FUTURE.netcdf_no_unlimited:
unlimited_dimensions = []
else:
_no_unlim_dep_warning()
cf_profile_available = (iris.site_configuration.get('cf_profile') not
in [None, False])
if cf_profile_available:
# Perform a CF profile of the cube. This may result in an exception
# being raised if mandatory requirements are not satisfied.
profile = iris.site_configuration['cf_profile'](cube)
# Get suitable dimension names.
dimension_names = self._get_dim_names(cube)
# Create the CF-netCDF data dimensions.
self._create_cf_dimensions(cube, dimension_names, unlimited_dimensions)
# Create the associated cube CF-netCDF data variable.
cf_var_cube = self._create_cf_data_variable(
cube, dimension_names, local_keys, zlib=zlib, complevel=complevel,
shuffle=shuffle, fletcher32=fletcher32, contiguous=contiguous,
chunksizes=chunksizes, endian=endian,
least_significant_digit=least_significant_digit)
# Add coordinate variables.
self._add_dim_coords(cube, dimension_names)
# Add the auxiliary coordinate variable names and associate the data
# variable to them
self._add_aux_coords(cube, cf_var_cube, dimension_names)
# Add the formula terms to the appropriate cf variables for each
# aux factory in the cube.
self._add_aux_factories(cube, cf_var_cube, dimension_names)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add global attributes taking into account local_keys.
global_attributes = {k: v for k, v in cube.attributes.iteritems() if k
not in local_keys and k.lower() != 'conventions'}
self.update_global_attributes(global_attributes)
if cf_profile_available:
cf_patch = iris.site_configuration.get('cf_patch')
if cf_patch is not None:
# Perform a CF patch of the dataset.
cf_patch(profile, self._dataset, cf_var_cube)
else:
msg = 'cf_profile is available but no {} defined.'.format(
'cf_patch')
warnings.warn(msg)
def update_global_attributes(self, attributes=None, **kwargs):
"""
Update the CF global attributes based on the provided
iterable/dictionary and/or keyword arguments.
Args:
* attributes (dict or iterable of key, value pairs):
CF global attributes to be updated.
"""
if attributes is not None:
# Handle sequence e.g. [('fruit', 'apple'), ...].
if not hasattr(attributes, 'keys'):
attributes = dict(attributes)
for attr_name in sorted(attributes):
self._dataset.setncattr(attr_name, attributes[attr_name])
for attr_name in sorted(kwargs):
self._dataset.setncattr(attr_name, kwargs[attr_name])
def _create_cf_dimensions(self, cube, dimension_names,
unlimited_dimensions=None):
"""
Create the CF-netCDF data dimensions.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` in which to lookup coordinates.
Kwargs:
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
List of coordinates to make unlimited. By default, the
outermost dimension is made unlimited.
Returns:
None.
"""
unlimited_dim_names = []
if (unlimited_dimensions is None and
not iris.FUTURE.netcdf_no_unlimited):
if dimension_names:
unlimited_dim_names.append(dimension_names[0])
else:
for coord in unlimited_dimensions:
try:
coord = cube.coord(name_or_coord=coord, dim_coords=True)
except iris.exceptions.CoordinateNotFoundError:
# coordinate isn't used for this cube, but it might be
# used for a different one
pass
else:
dim_name = self._get_coord_variable_name(cube, coord)
unlimited_dim_names.append(dim_name)
for dim_name in dimension_names:
if dim_name not in self._dataset.dimensions:
if dim_name in unlimited_dim_names:
size = None
else:
size = self._existing_dim[dim_name]
self._dataset.createDimension(dim_name, size)
def _add_aux_coords(self, cube, cf_var_cube, dimension_names):
"""
Add aux. coordinate to the dataset and associate with the data variable
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
auxiliary_coordinate_names = []
# Add CF-netCDF variables for the associated auxiliary coordinates.
for coord in sorted(cube.aux_coords, key=lambda coord: coord.name()):
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord)
self._name_coord_map.append(cf_name, coord)
else:
cf_name = self._name_coord_map.name(coord)
if cf_name is not None:
auxiliary_coordinate_names.append(cf_name)
# Add CF-netCDF auxiliary coordinate variable references to the
# CF-netCDF data variable.
if auxiliary_coordinate_names:
cf_var_cube.coordinates = ' '.join(
sorted(auxiliary_coordinate_names))
def _add_dim_coords(self, cube, dimension_names):
"""
Add coordinate variables to NetCDF dataset.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
# Ensure we create the netCDF coordinate variables first.
for coord in cube.dim_coords:
# Create the associated coordinate CF-netCDF variable.
if coord not in self._name_coord_map.coords:
cf_name = self._create_cf_variable(cube, dimension_names,
coord)
self._name_coord_map.append(cf_name, coord)
def _add_aux_factories(self, cube, cf_var_cube, dimension_names):
"""
Modifies the variables of the NetCDF dataset to represent
the presence of dimensionless vertical coordinates based on
the aux factories of the cube (if any).
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`)
CF variable cube representation.
* dimension_names (list):
Names associated with the dimensions of the cube.
"""
primaries = []
for factory in cube.aux_factories:
factory_defn = _FACTORY_DEFNS.get(type(factory), None)
if factory_defn is None:
msg = 'Unable to determine formula terms ' \
'for AuxFactory: {!r}'.format(factory)
warnings.warn(msg)
else:
# Override `standard_name`, `long_name`, and `axis` of the
# primary coord that signals the presense of a dimensionless
# vertical coord, then set the `formula_terms` attribute.
primary_coord = factory.dependencies[factory_defn.primary]
if primary_coord in primaries:
msg = 'Cube {!r} has multiple aux factories that share ' \
'a common primary coordinate {!r}. Unable to save ' \
'to netCDF as having multiple formula terms on a ' \
'single coordinate is not supported.'
raise ValueError(msg.format(cube, primary_coord.name()))
primaries.append(primary_coord)
cf_name = self._name_coord_map.name(primary_coord)
cf_var = self._dataset.variables[cf_name]
names = {key: self._name_coord_map.name(coord) for
key, coord in factory.dependencies.iteritems()}
formula_terms = factory_defn.formula_terms_format.format(
**names)
std_name = factory_defn.std_name
if hasattr(cf_var, 'formula_terms'):
if cf_var.formula_terms != formula_terms or \
cf_var.standard_name != std_name:
# TODO: We need to resolve this corner-case where
# the dimensionless vertical coordinate containing the
# formula_terms is a dimension coordinate of the
# associated cube and a new alternatively named
# dimensionless vertical coordinate is required with
# new formula_terms and a renamed dimension.
if cf_name in dimension_names:
msg = 'Unable to create dimensonless vertical ' \
'coordinate.'
raise ValueError(msg)
key = (cf_name, std_name, formula_terms)
name = self._formula_terms_cache.get(key)
if name is None:
# Create a new variable
name = self._create_cf_variable(cube,
dimension_names,
primary_coord)
cf_var = self._dataset.variables[name]
cf_var.standard_name = std_name
cf_var.axis = 'Z'
# Update the formula terms.
ft = formula_terms.split()
ft = [name if t == cf_name else t for t in ft]
cf_var.formula_terms = ' '.join(ft)
# Update the cache.
self._formula_terms_cache[key] = name
# Update the associated cube variable.
coords = cf_var_cube.coordinates.split()
coords = [name if c == cf_name else c for c in coords]
cf_var_cube.coordinates = ' '.join(coords)
else:
cf_var.standard_name = std_name
cf_var.axis = 'Z'
cf_var.formula_terms = formula_terms
def _get_dim_names(self, cube):
"""
Determine suitable CF-netCDF data dimension names.
Args:
* cube (:class:`iris.cube.Cube`):
A :class:`iris.cube.Cube` to be saved to a netCDF file.
Returns:
List of dimension names with length equal the number of dimensions
in the cube.
"""
dimension_names = []
for dim in range(cube.ndim):
coords = cube.coords(dimensions=dim, dim_coords=True)
if coords:
coord = coords[0]
dim_name = self._get_coord_variable_name(cube, coord)
# Add only dimensions that have not already been added.
if coord not in self._dim_coords:
# Determine unique dimension name
while (dim_name in self._existing_dim or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update names added, current cube dim names used and
# unique coordinates added.
self._existing_dim[dim_name] = coord.shape[0]
dimension_names.append(dim_name)
self._dim_coords.append(coord)
else:
# Return the dim_name associated with the existing
# coordinate.
dim_name = self._name_coord_map.name(coord)
dimension_names.append(dim_name)
else:
# No CF-netCDF coordinates describe this data dimension.
dim_name = 'dim%d' % dim
if dim_name in self._existing_dim:
# Increment name if conflicted with one already existing.
if self._existing_dim[dim_name] != cube.shape[dim]:
while (dim_name in self._existing_dim and
self._existing_dim[dim_name] !=
cube.shape[dim] or
dim_name in self._name_coord_map.names):
dim_name = self._increment_name(dim_name)
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
else:
# Update dictionary with new entry
self._existing_dim[dim_name] = cube.shape[dim]
dimension_names.append(dim_name)
return dimension_names
def _cf_coord_identity(self, coord):
"""
Determine a suitable units from a given coordinate.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
Returns:
The (standard_name, long_name, unit) of the given
:class:`iris.coords.Coord` instance.
"""
units = str(coord.units)
# TODO: Use #61 to get the units.
if isinstance(coord.coord_system, iris.coord_systems.GeogCS):
if "latitude" in coord.standard_name:
units = 'degrees_north'
elif "longitude" in coord.standard_name:
units = 'degrees_east'
elif isinstance(coord.coord_system, iris.coord_systems.RotatedGeogCS):
units = 'degrees'
elif isinstance(coord.coord_system,
iris.coord_systems.TransverseMercator):
units = 'm'
return coord.standard_name, coord.long_name, units
def _ensure_valid_dtype(self, values, src_name, src_object):
# NetCDF3 does not support int64 or unsigned ints, so we check
# if we can store them as int32 instead.
if ((np.issubdtype(values.dtype, np.int64) or
np.issubdtype(values.dtype, np.unsignedinteger)) and
self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Cast to an integer type supported by netCDF3.
if not np.can_cast(values.max(), np.int32) or \
not np.can_cast(values.min(), np.int32):
msg = 'The data type of {} {!r} is not supported by {} and' \
' its values cannot be safely cast to a supported' \
' integer type.'
msg = msg.format(src_name, src_object,
self._dataset.file_format)
raise ValueError(msg)
values = values.astype(np.int32)
return values
def _create_cf_bounds(self, coord, cf_var, cf_name):
"""
Create the associated CF-netCDF bounds variable.
Args:
* coord (:class:`iris.coords.Coord`):
A coordinate of a cube.
* cf_var:
CF-netCDF variable
* cf_name (string):
name of the CF-NetCDF variable.
Returns:
None
"""
if coord.has_bounds():
# Get the values in a form which is valid for the file format.
bounds = self._ensure_valid_dtype(coord.bounds,
'the bounds of coordinate',
coord)
n_bounds = bounds.shape[-1]
if n_bounds == 2:
bounds_dimension_name = 'bnds'
else:
bounds_dimension_name = 'bnds_%s' % n_bounds
if bounds_dimension_name not in self._dataset.dimensions:
# Create the bounds dimension with the appropriate extent.
self._dataset.createDimension(bounds_dimension_name, n_bounds)
cf_var.bounds = cf_name + '_bnds'
cf_var_bounds = self._dataset.createVariable(
cf_var.bounds, bounds.dtype.newbyteorder('='),
cf_var.dimensions + (bounds_dimension_name,))
cf_var_bounds[:] = bounds
def _get_cube_variable_name(self, cube):
"""
Returns a CF-netCDF variable name for the given cube.
Args:
* cube (class:`iris.cube.Cube`):
An instance of a cube for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if cube.var_name is not None:
cf_name = cube.var_name
else:
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(cube.name().lower().split())
return cf_name
def _get_coord_variable_name(self, cube, coord):
"""
Returns a CF-netCDF variable name for the given coordinate.
Args:
* cube (:class:`iris.cube.Cube`):
The cube that contains the given coordinate.
* coord (:class:`iris.coords.Coord`):
An instance of a coordinate for which a CF-netCDF variable
name is required.
Returns:
A CF-netCDF variable name as a string.
"""
if coord.var_name is not None:
cf_name = coord.var_name
else:
name = coord.standard_name or coord.long_name
if not name or set(name).intersection(string.whitespace):
# Auto-generate name based on associated dimensions.
name = ''
for dim in cube.coord_dims(coord):
name += 'dim{}'.format(dim)
# Handle scalar coordinate (dims == ()).
if not name:
name = 'unknown_scalar'
# Convert to lower case and replace whitespace by underscores.
cf_name = '_'.join(name.lower().split())
return cf_name
def _create_cf_variable(self, cube, dimension_names, coord):
"""
Create the associated CF-netCDF variable in the netCDF dataset for the
given coordinate. If required, also create the CF-netCDF bounds
variable and associated dimension.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
Names for each dimension of the cube.
* coord (:class:`iris.coords.Coord`):
The coordinate to be saved to CF-netCDF file.
Returns:
The string name of the associated CF-netCDF variable saved.
"""
cf_name = self._get_coord_variable_name(cube, coord)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# Derive the data dimension names for the coordinate.
cf_dimensions = [dimension_names[dim] for dim in
cube.coord_dims(coord)]
if np.issubdtype(coord.points.dtype, np.str):
string_dimension_depth = coord.points.dtype.itemsize
string_dimension_name = 'string%d' % string_dimension_depth
# Determine whether to create the string length dimension.
if string_dimension_name not in self._dataset.dimensions:
self._dataset.createDimension(string_dimension_name,
string_dimension_depth)
# Add the string length dimension to dimension names.
cf_dimensions.append(string_dimension_name)
# Create the label coordinate variable.
cf_var = self._dataset.createVariable(cf_name, '|S1',
cf_dimensions)
# Add the payload to the label coordinate variable.
if len(cf_dimensions) == 1:
cf_var[:] = list('%- *s' % (string_dimension_depth,
coord.points[0]))
else:
for index in np.ndindex(coord.points.shape):
index_slice = tuple(list(index) + [slice(None, None)])
cf_var[index_slice] = list('%- *s' %
(string_dimension_depth,
coord.points[index]))
else:
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
if coord in cf_coordinates:
# By definition of a CF-netCDF coordinate variable this
# coordinate must be 1-D and the name of the CF-netCDF variable
# must be the same as its dimension name.
cf_name = cf_dimensions[0]
# Get the values in a form which is valid for the file format.
points = self._ensure_valid_dtype(coord.points, 'coordinate',
coord)
# Create the CF-netCDF variable.
cf_var = self._dataset.createVariable(
cf_name, points.dtype.newbyteorder('='), cf_dimensions)
# Add the axis attribute for spatio-temporal CF-netCDF coordinates.
if coord in cf_coordinates:
axis = iris.util.guess_coord_axis(coord)
if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
cf_var.axis = axis.upper()
# Add the data to the CF-netCDF variable.
cf_var[:] = points
# Create the associated CF-netCDF bounds variable.
self._create_cf_bounds(coord, cf_var, cf_name)
# Deal with CF-netCDF units and standard name.
standard_name, long_name, units = self._cf_coord_identity(coord)
if units != 'unknown':
cf_var.units = units
if standard_name is not None:
cf_var.standard_name = standard_name
if long_name is not None:
cf_var.long_name = long_name
# Add the CF-netCDF calendar attribute.
if coord.units.calendar:
cf_var.calendar = coord.units.calendar
# Add any other custom coordinate attributes.
for name in sorted(coord.attributes):
value = coord.attributes[name]
if name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
name = 'um_stash_source'
value = str(value)
# Don't clobber existing attributes.
if not hasattr(cf_var, name):
setattr(cf_var, name, value)
return cf_name
def _create_cf_cell_methods(self, cube, dimension_names):
"""
Create CF-netCDF string representation of a cube cell methods.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* dimension_names (list):
Names associated with the dimensions of the cube.
Returns:
CF-netCDF string representation of a cube cell methods.
"""
cell_methods = []
# Identify the collection of coordinates that represent CF-netCDF
# coordinate variables.
cf_coordinates = cube.dim_coords
for cm in cube.cell_methods:
names = ''
for name in cm.coord_names:
coord = cube.coords(name)
if coord:
coord = coord[0]
if coord in cf_coordinates:
name = dimension_names[cube.coord_dims(coord)[0]]
names += '%s: ' % name
interval = ' '.join(['interval: %s' % interval for interval in
cm.intervals or []])
comment = ' '.join(['comment: %s' % comment for comment in
cm.comments or []])
extra = ' '.join([interval, comment]).strip()
if extra:
extra = ' (%s)' % extra
cell_methods.append(names + cm.method + extra)
return ' '.join(cell_methods)
def _create_cf_grid_mapping(self, cube, cf_var_cube):
"""
Create CF-netCDF grid mapping variable and associated CF-netCDF
data variable grid mapping attribute.
Args:
* cube (:class:`iris.cube.Cube`) or cubelist
(:class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
cubes to be saved to a netCDF file.
* cf_var_cube (:class:`netcdf.netcdf_variable`):
cf variable cube representation.
Returns:
None
"""
cs = cube.coord_system('CoordSystem')
if cs is not None:
# Grid var not yet created?
if cs not in self._coord_systems:
while cs.grid_mapping_name in self._dataset.variables:
cs.grid_mapping_name = (
self._increment_name(cs.grid_mapping_name))
cf_var_grid = self._dataset.createVariable(
cs.grid_mapping_name, np.int32)
cf_var_grid.grid_mapping_name = cs.grid_mapping_name
def add_ellipsoid(ellipsoid):
cf_var_grid.longitude_of_prime_meridian = (
ellipsoid.longitude_of_prime_meridian)
semi_major = ellipsoid.semi_major_axis
semi_minor = ellipsoid.semi_minor_axis
if semi_minor == semi_major:
cf_var_grid.earth_radius = semi_major
else:
cf_var_grid.semi_major_axis = semi_major
cf_var_grid.semi_minor_axis = semi_minor
# latlon
if isinstance(cs, iris.coord_systems.GeogCS):
add_ellipsoid(cs)
# rotated latlon
elif isinstance(cs, iris.coord_systems.RotatedGeogCS):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.grid_north_pole_latitude = (
cs.grid_north_pole_latitude)
cf_var_grid.grid_north_pole_longitude = (
cs.grid_north_pole_longitude)
cf_var_grid.north_pole_grid_longitude = (
cs.north_pole_grid_longitude)
# tmerc
elif isinstance(cs, iris.coord_systems.TransverseMercator):
if cs.ellipsoid:
add_ellipsoid(cs.ellipsoid)
cf_var_grid.longitude_of_central_meridian = (
cs.longitude_of_central_meridian)
cf_var_grid.latitude_of_projection_origin = (
cs.latitude_of_projection_origin)
cf_var_grid.false_easting = cs.false_easting
cf_var_grid.false_northing = cs.false_northing
cf_var_grid.scale_factor_at_central_meridian = (
cs.scale_factor_at_central_meridian)
# osgb (a specific tmerc)
elif isinstance(cs, iris.coord_systems.OSGB):
warnings.warn('OSGB coordinate system not yet handled')
# other
else:
warnings.warn('Unable to represent the horizontal '
'coordinate system. The coordinate system '
'type %r is not yet implemented.' % type(cs))
self._coord_systems.append(cs)
# Refer to grid var
cf_var_cube.grid_mapping = cs.grid_mapping_name
def _create_cf_data_variable(self, cube, dimension_names, local_keys=None,
**kwargs):
"""
Create CF-netCDF data variable for the cube and any associated grid
mapping.
Args:
* cube (:class:`iris.cube.Cube`):
The associated cube being saved to CF-netCDF file.
* dimension_names (list):
String names for each dimension of the cube.
Kwargs:
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes
with matching keys will become attributes on the data variable.
All other keywords are passed through to the dataset's `createVariable`
method.
Returns:
The newly created CF-netCDF data variable.
"""
cf_name = self._get_cube_variable_name(cube)
while cf_name in self._dataset.variables:
cf_name = self._increment_name(cf_name)
# if netcdf3 avoid streaming due to dtype handling
if (not cube.has_lazy_data()
or self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Determine whether there is a cube MDI value.
fill_value = None
if isinstance(cube.data, ma.core.MaskedArray):
fill_value = cube.data.fill_value
# Get the values in a form which is valid for the file format.
data = self._ensure_valid_dtype(cube.data, 'cube', cube)
# Create the cube CF-netCDF data variable with data payload.
cf_var = self._dataset.createVariable(
cf_name, data.dtype.newbyteorder('='), dimension_names,
fill_value=fill_value, **kwargs)
cf_var[:] = data
else:
# Create the cube CF-netCDF data variable.
# Explicitly assign the fill_value, which will be the type default
# in the case of an unmasked array.
cf_var = self._dataset.createVariable(
cf_name, cube.lazy_data().dtype.newbyteorder('='),
dimension_names, fill_value=cube.lazy_data().fill_value,
**kwargs)
# stream the data
biggus.save([cube.lazy_data()], [cf_var], masked=True)
if cube.standard_name:
cf_var.standard_name = cube.standard_name
if cube.long_name:
cf_var.long_name = cube.long_name
if cube.units != 'unknown':
cf_var.units = str(cube.units)
# Add data variable-only attribute names to local_keys.
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
local_keys.update(_CF_DATA_ATTRS, _UKMO_DATA_ATTRS)
# Add any cube attributes whose keys are in local_keys as
# CF-netCDF data variable attributes.
attr_names = set(cube.attributes).intersection(local_keys)
for attr_name in sorted(attr_names):
# Do not output 'conventions' attribute.
if attr_name.lower() == 'conventions':
continue
value = cube.attributes[attr_name]
if attr_name == 'STASH':
# Adopting provisional Metadata Conventions for representing MO
# Scientific Data encoded in NetCDF Format.
attr_name = 'um_stash_source'
value = str(value)
if attr_name == "ukmo__process_flags":
value = " ".join([x.replace(" ", "_") for x in value])
if attr_name in _CF_GLOBAL_ATTRS:
msg = '{attr_name!r} is being added as CF data variable ' \
'attribute, but {attr_name!r} should only be a CF ' \
'global attribute.'.format(attr_name=attr_name)
warnings.warn(msg)
setattr(cf_var, attr_name, value)
# Create the CF-netCDF data variable cell method attribute.
cell_methods = self._create_cf_cell_methods(cube, dimension_names)
if cell_methods:
cf_var.cell_methods = cell_methods
# Create the CF-netCDF grid mapping.
self._create_cf_grid_mapping(cube, cf_var)
return cf_var
def _increment_name(self, varname):
"""
Increment string name or begin increment.
Avoidance of conflicts between variable names, where the name is
incremented to distinguish it from others.
Args:
* varname (string):
Variable name to increment.
Returns:
Incremented varname.
"""
num = 0
try:
name, endnum = varname.rsplit('_', 1)
if endnum.isdigit():
num = int(endnum) + 1
varname = name
except ValueError:
pass
return '{}_{}'.format(varname, num)
def save(cube, filename, netcdf_format='NETCDF4', local_keys=None,
unlimited_dimensions=None, zlib=False, complevel=4, shuffle=True,
fletcher32=False, contiguous=False, chunksizes=None, endian='native',
least_significant_digit=None):
"""
Save cube(s) to a netCDF file, given the cube and the filename.
* Iris will write CF 1.5 compliant NetCDF files.
* The attributes dictionaries on each cube in the saved cube list
will be compared and common attributes saved as NetCDF global
attributes where appropriate.
* Keyword arguments specifying how to save the data are applied
to each cube. To use different settings for different cubes, use
the NetCDF Context manager (:class:`~Saver`) directly.
* The save process will stream the data payload to the file using biggus,
enabling large data payloads to be saved and maintaining the 'lazy'
status of the cube's data payload, unless the netcdf_format is explicitly
specified to be 'NETCDF3' or 'NETCDF3_CLASSIC'.
Args:
* cube (:class:`iris.cube.Cube` or :class:`iris.cube.CubeList`):
A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or other
iterable of cubes to be saved to a netCDF file.
* filename (string):
Name of the netCDF file to save the cube(s).
Kwargs:
* netcdf_format (string):
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
* local_keys (iterable of strings):
An interable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
* unlimited_dimensions (iterable of strings and/or
:class:`iris.coords.Coord` objects):
Explicit list of coordinate names (or coordinate objects) corresponding
to coordinate dimensions of `cube` to save with the NetCDF dimension
variable length 'UNLIMITED'. By default, the outermost (first)
dimension for each cube is used. Only the 'NETCDF4' format supports
multiple 'UNLIMITED' dimensions. To save no unlimited dimensions, use
`unlimited_dimensions=[]` (an empty list).
* zlib (bool):
If `True`, the data will be compressed in the netCDF file using gzip
compression (default `False`).
* complevel (int):
An integer between 1 and 9 describing the level of compression desired
(default 4). Ignored if `zlib=False`.
* shuffle (bool):
If `True`, the HDF5 shuffle filter will be applied before compressing
the data (default `True`). This significantly improves compression.
Ignored if `zlib=False`.
* fletcher32 (bool):
If `True`, the Fletcher32 HDF5 checksum algorithm is activated to
detect errors. Default `False`.
* contiguous (bool):
If `True`, the variable data is stored contiguously on disk. Default
`False`. Setting to `True` for a variable with an unlimited dimension
will trigger an error.
* chunksizes (tuple of int):
Used to manually specify the HDF5 chunksizes for each dimension of the
variable. A detailed discussion of HDF chunking and I/O performance is
available here: http://www.hdfgroup.org/HDF5/doc/H5.user/Chunking.html.
Basically, you want the chunk size for each dimension to match as
closely as possible the size of the data block that users will read
from the file. `chunksizes` cannot be set if `contiguous=True`.
* endian (string):
Used to control whether the data is stored in little or big endian
format on disk. Possible values are 'little', 'big' or 'native'
(default). The library will automatically handle endian conversions
when the data is read, but if the data is always going to be read on a
computer with the opposite format as the one used to create the file,
there may be some performance advantage to be gained by setting the
endian-ness.
* least_significant_digit (int):
If `least_significant_digit` is specified, variable data will be
truncated (quantized). In conjunction with `zlib=True` this produces
'lossy', but significantly more efficient compression. For example, if
`least_significant_digit=1`, data will be quantized using
`numpy.around(scale*data)/scale`, where `scale = 2**bits`, and `bits`
is determined so that a precision of 0.1 is retained (in this case
`bits=4`). From
http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml:
"least_significant_digit -- power of ten of the smallest decimal place
in unpacked data that is a reliable value". Default is `None`, or no
quantization, or 'lossless' compression.
Returns:
None.
.. note::
The `zlib`, `complevel`, `shuffle`, `fletcher32`, `contiguous`,
`chunksizes` and `endian` keywords are silently ignored for netCDF 3
files that do not use HDF5.
.. seealso::
NetCDF Context manager (:class:`~Saver`).
.. deprecated:: 1.8.0
NetCDF default saving behaviour currently assigns the outermost
dimensions to unlimited. This behaviour is to be deprecated, in
favour of no automatic assignment. To switch to the new behaviour,
set `iris.FUTURE.netcdf_no_unlimited` to True.
"""
if unlimited_dimensions is None:
if iris.FUTURE.netcdf_no_unlimited:
unlimited_dimensions = []
else:
_no_unlim_dep_warning()
if isinstance(cube, iris.cube.Cube):
cubes = iris.cube.CubeList()
cubes.append(cube)
else:
cubes = cube
if local_keys is None:
local_keys = set()
else:
local_keys = set(local_keys)
# Determine the attribute keys that are common across all cubes and
# thereby extend the collection of local_keys for attributes
# that should be attributes on data variables.
attributes = cubes[0].attributes
common_keys = set(attributes)
for cube in cubes[1:]:
keys = set(cube.attributes)
local_keys.update(keys.symmetric_difference(common_keys))
common_keys.intersection_update(keys)
different_value_keys = []
for key in common_keys:
if np.any(attributes[key] != cube.attributes[key]):
different_value_keys.append(key)
common_keys.difference_update(different_value_keys)
local_keys.update(different_value_keys)
# Initialise Manager for saving
with Saver(filename, netcdf_format) as sman:
# Iterate through the cubelist.
for cube in cubes:
sman.write(cube, local_keys, unlimited_dimensions, zlib, complevel,
shuffle, fletcher32, contiguous, chunksizes, endian,
least_significant_digit)
conventions = CF_CONVENTIONS_VERSION
# Perform a CF patch of the conventions attribute.
cf_profile_available = (iris.site_configuration.get('cf_profile') not
in [None, False])
if cf_profile_available:
conventions_patch = iris.site_configuration.get(
'cf_patch_conventions')
if conventions_patch is not None:
conventions = conventions_patch(conventions)
else:
msg = 'cf_profile is available but no {} defined.'.format(
'cf_patch_conventions')
warnings.warn(msg)
# Add conventions attribute.
sman.update_global_attributes(Conventions=conventions)
def _no_unlim_dep_warning():
msg = ('NetCDF default saving behaviour currently assigns the '
'outermost dimensions to unlimited. This behaviour is to be '
'deprecated, in favour of no automatic assignment. To switch '
'to the new behaviour, set iris.FUTURE.netcdf_no_unlimited to '
'True.')
warnings.warn(msg)
| lgpl-3.0 | -8,139,096,805,184,263,000 | 37.797199 | 79 | 0.569073 | false |
winking324/ngxtop_rtmp_hls | ngxtop/rtmptop.py | 1 | 9398 | """
Nginx-rtmp-module stat parser.
Need to install nginx-rtmp-module first.
"""
import xml.dom.minidom
import urllib2
if __package__ is None:
from utils import error_exit
else:
from .utils import error_exit
STAT_URL = "http://127.0.0.1:8080/stat"
def pass_for_node_value(root, node_name):
child = root.getElementsByTagName(node_name)
if len(child) >= 1 and child[0].firstChild:
return child[0].firstChild.data
return 0
class MetaInfo(object):
def __init__(self):
self.video_width = None
self.video_height = None
self.video_frame_rate = None
self.video_codec = None
self.video_profile = None
self.video_compat = None
self.video_level = None
self.audio_codec = None
self.audio_profile = None
self.audio_channels = None
self.audio_sample_rate = None
def parse_info(self, meta_root):
video_child = meta_root.getElementsByTagName('video')[0]
self.video_width = int(pass_for_node_value(video_child, 'width'))
self.video_height = int(pass_for_node_value(video_child, 'height'))
self.video_frame_rate = int(pass_for_node_value(video_child, 'frame_rate'))
self.video_codec = pass_for_node_value(video_child, 'codec')
self.video_profile = pass_for_node_value(video_child, 'profile')
self.video_compat = int(pass_for_node_value(video_child, 'compat'))
self.video_level = float(pass_for_node_value(video_child, 'level'))
audio_child = meta_root.getElementsByTagName('audio')[0]
self.audio_codec = pass_for_node_value(audio_child, 'codec')
self.audio_profile = pass_for_node_value(audio_child, 'profile')
self.audio_channels = int(pass_for_node_value(audio_child, 'channels'))
self.audio_sample_rate = int(pass_for_node_value(audio_child, 'sample_rate'))
def print_info(self, output):
output.append('\t\tVideo Meta: width %d, height %d, frame_rate %d, codec %s, profile %s, compat %d, level %f' %
(self.video_width, self.video_height, self.video_frame_rate, self.video_codec, self.video_profile,
self.video_compat, self.video_level))
output.append('\t\tAudio Meta: codec %s, profile %s, channels %d, sample rate %d' %
(self.audio_codec, self.audio_profile, self.audio_channels, self.audio_sample_rate))
class ClientInfo(object):
def __init__(self, client_root):
self.id = int(pass_for_node_value(client_root, 'id'))
self.address = pass_for_node_value(client_root, 'address')
self.time = int(pass_for_node_value(client_root, 'time'))
self.flashver = pass_for_node_value(client_root, 'flashver')
self.pageurl = None
self.swfurl = None
self.dropped = int(pass_for_node_value(client_root, 'dropped'))
self.avsync = int(pass_for_node_value(client_root, 'avsync'))
self.timestamp = int(pass_for_node_value(client_root, 'timestamp'))
self.is_publisher = False
def parse_info(self, client_root):
publish_child = client_root.getElementsByTagName('publishing')
if publish_child.length > 0:
self.is_publisher = True
if not self.is_publisher:
self.pageurl = pass_for_node_value(client_root, 'pageurl')
self.swfurl = pass_for_node_value(client_root, 'swfurl')
def print_info(self, output):
if self.is_publisher:
output.append('\t\tServer: addr %s, flashver %s' % (self.address, self.flashver))
else:
output.append('\t\tClient: addr %s, flashver %s, page %s, swf %s' %
(self.address, self.flashver, self.pageurl, self.swfurl))
class StreamInfo(object):
def __init__(self, stream_root):
self.name = pass_for_node_value(stream_root, 'name')
self.time = int(pass_for_node_value(stream_root, 'time'))
self.bw_in = int(pass_for_node_value(stream_root, 'bw_in'))
self.bytes_in = int(pass_for_node_value(stream_root, 'bytes_in'))
self.bw_out = int(pass_for_node_value(stream_root, 'bw_out'))
self.bytes_out = int(pass_for_node_value(stream_root, 'bytes_out'))
self.bw_audio = int(pass_for_node_value(stream_root, 'bw_audio'))
self.bw_video = int(pass_for_node_value(stream_root, 'bw_video'))
self.nclients = int(pass_for_node_value(stream_root, 'nclients'))
self.meta_info = None
self.clients = {}
def parse_info(self, stream_root):
meta_child = stream_root.getElementsByTagName('meta')
if meta_child.length > 0:
self.meta_info = MetaInfo()
self.meta_info.parse_info(meta_child[0])
client_child = stream_root.getElementsByTagName('client')
for client in client_child:
client_info = ClientInfo(client)
client_info.parse_info(client)
self.clients[client_info.id] = client_info
def print_info(self, output):
output.append('\tStream %s: time %d, bw_in %d, bytes_in %f, bw_out %d, '
'bytes_out %f, bw_audio %d, bs_video %d, clients %d' %
(self.name, self.time, self.bw_in, self.bytes_in, self.bw_out,
self.bytes_out, self.bw_audio, self.bw_video, self.nclients))
output.append('\tMeta info:')
if self.meta_info:
self.meta_info.print_info(output)
else:
output.append('\t\tStream Idel')
output.append('\t\tClient Info:')
for client in self.clients.itervalues():
client.print_info(output)
class NginxRtmpInfo(object):
def __init__(self, arguments):
self.arguments = arguments
self.processor = None
self.rtmp_url = STAT_URL
self.nginx_version = None
self.rtmp_version = None
self.compiler = None
self.built = None
self.pid = None
self.uptime = None
self.accepted = None
self.bw_in = None
self.bw_out = None
self.bytes_in = None
self.bytes_out = None
self.stream_infos = {}
def set_processor(self, processor):
self.processor = processor
def get_rtmp_url(self):
rtmp_url = self.arguments['--rtmp-stat-url']
if rtmp_url:
self.rtmp_url = rtmp_url
return self.rtmp_url
def processor_process(self):
if self.processor is None:
return
records = {}
for stream_info in self.stream_infos.itervalues():
records['request'] = stream_info.name
records['in_bytes'] = stream_info.bytes_in
records['in_bw'] = stream_info.bw_in
records['out_bytes'] = stream_info.bytes_out
records['out_bw'] = stream_info.bw_out
for client in stream_info.clients.itervalues():
records['remote_addr'] = client.address
records['time'] = client.time
records['http_user_agent'] = client.flashver
self.processor.process(records)
def parse_info(self):
self.get_rtmp_url()
try:
response = urllib2.urlopen(self.rtmp_url)
except urllib2.URLError:
error_exit('Cannot access RTMP URL: %s' % self.rtmp_url)
dom = xml.dom.minidom.parseString(response.read())
root = dom.documentElement
self.nginx_version = pass_for_node_value(root, 'nginx_version')
self.rtmp_version = pass_for_node_value(root, 'nginx_rtmp_version')
self.compiler = pass_for_node_value(root, 'compiler')
self.built = pass_for_node_value(root, 'built')
self.pid = int(pass_for_node_value(root, 'pid'))
self.uptime = int(pass_for_node_value(root, 'uptime'))
self.accepted = int(pass_for_node_value(root, 'naccepted'))
self.bw_in = int(pass_for_node_value(root, 'bw_in'))
self.bw_out = int(pass_for_node_value(root, 'bw_out'))
self.bytes_in = int(pass_for_node_value(root, 'bytes_in'))
self.bytes_out = int(pass_for_node_value(root, 'bytes_out'))
live_child = root.getElementsByTagName('server')[0].getElementsByTagName(
'application')[0].getElementsByTagName('live')[0]
for stream_child in live_child.getElementsByTagName('stream'):
stream_info = StreamInfo(stream_child)
stream_info.parse_info(stream_child)
self.stream_infos[stream_info.name] = stream_info
self.processor_process()
def print_info(self):
output = list()
output.append('Summary:')
output.append('\tNginx version: %s, RTMP version: %s, Compiler: %s, Built: %s, PID: %d, Uptime: %ds.' %
(self.nginx_version, self.rtmp_version, self.compiler, self.built, self.pid, self.uptime))
output.append('\tAccepted: %d, bw_in: %f Kbit/s, bytes_in: %02f MByte, '
'bw_out: %02f Kbit/s, bytes_out: %02f MByte' %
(self.accepted, self.bw_in / 1024.0, self.bytes_in / 1024.0 / 1024,
self.bw_out / 1024.0, self.bytes_out / 1024.0 / 1024))
output.append('Detail:')
output.append('\tStreams: %d' % len(self.stream_infos))
for stream in self.stream_infos.itervalues():
stream.print_info(output)
return output
| mit | -818,754,363,569,974,100 | 38.991489 | 120 | 0.60183 | false |
srgblnch/Rijndael | Testing/_FIPS197_AES192.py | 1 | 9943 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__email__ = "[email protected]"
__copyright__ = "Copyright 2016 Sergi Blanch-Torne"
__license__ = "GPLv3+"
__status__ = "development"
"""
This file stores the test vectors provided by the fips-197 for the AES192.
"""
aes192 = {}
aes192['key'] = 0x000102030405060708090a0b0c0d0e0f1011121314151617
aes192['input'] = 0x00112233445566778899aabbccddeeff
aes192['output'] = 0xdda97ca4864cdfe06eaf70a0ec0d7191
aes192_round = {}
aes192_round[0] = {}
aes192_round[0]['start'] = aes192['input']
aes192_round[0]['k_sch'] = 0x000102030405060708090a0b0c0d0e0f
aes192_round[0]['end'] = 0x00102030405060708090a0b0c0d0e0f0
aes192_round[1] = {}
aes192_round[1]['start'] = aes192_round[0]['end']
aes192_round[1]['s_box'] = 0x63cab7040953d051cd60e0e7ba70e18c
aes192_round[1]['s_row'] = 0x6353e08c0960e104cd70b751bacad0e7
aes192_round[1]['m_col'] = 0x5f72641557f5bc92f7be3b291db9f91a
aes192_round[1]['k_sch'] = 0x10111213141516175846f2f95c43f4fe
aes192_round[1]['end'] = 0x4f63760643e0aa85aff8c9d041fa0de4
aes192_round[2] = {}
aes192_round[2]['start'] = aes192_round[1]['end']
aes192_round[2]['s_box'] = 0x84fb386f1ae1ac977941dd70832dd769
aes192_round[2]['s_row'] = 0x84e1dd691a41d76f792d389783fbac70
aes192_round[2]['m_col'] = 0x9f487f794f955f662afc86abd7f1ab29
aes192_round[2]['k_sch'] = 0x544afef55847f0fa4856e2e95c43f4fe
aes192_round[2]['end'] = 0xcb02818c17d2af9c62aa64428bb25fd7
aes192_round[3] = {}
aes192_round[3]['start'] = aes192_round[2]['end']
aes192_round[3]['s_box'] = 0x1f770c64f0b579deaaac432c3d37cf0e
aes192_round[3]['s_row'] = 0x1fb5430ef0accf64aa370cde3d77792c
aes192_round[3]['m_col'] = 0xb7a53ecbbf9d75a0c40efc79b674cc11
aes192_round[3]['k_sch'] = 0x40f949b31cbabd4d48f043b810b7b342
aes192_round[3]['end'] = 0xf75c7778a327c8ed8cfebfc1a6c37f53
aes192_round[4] = {}
aes192_round[4]['start'] = aes192_round[3]['end']
aes192_round[4]['s_box'] = 0x684af5bc0acce85564bb0878242ed2ed
aes192_round[4]['s_row'] = 0x68cc08ed0abbd2bc642ef555244ae878
aes192_round[4]['m_col'] = 0x7a1e98bdacb6d1141a6944dd06eb2d3e
aes192_round[4]['k_sch'] = 0x58e151ab04a2a5557effb5416245080c
aes192_round[4]['end'] = 0x22ffc916a81474416496f19c64ae2532
aes192_round[5] = {}
aes192_round[5]['start'] = aes192_round[4]['end']
aes192_round[5]['s_box'] = 0x9316dd47c2fa92834390a1de43e43f23
aes192_round[5]['s_row'] = 0x93faa123c2903f4743e4dd83431692de
aes192_round[5]['m_col'] = 0xaaa755b34cffe57cef6f98e1f01c13e6
aes192_round[5]['k_sch'] = 0x2ab54bb43a02f8f662e3a95d66410c08
aes192_round[5]['end'] = 0x80121e0776fd1d8a8d8c31bc965d1fee
aes192_round[6] = {}
aes192_round[6]['start'] = aes192_round[5]['end']
aes192_round[6]['s_box'] = 0xcdc972c53854a47e5d64c765904cc028
aes192_round[6]['s_row'] = 0xcd54c7283864c0c55d4c727e90c9a465
aes192_round[6]['m_col'] = 0x921f748fd96e937d622d7725ba8ba50c
aes192_round[6]['k_sch'] = 0xf501857297448d7ebdf1c6ca87f33e3c
aes192_round[6]['end'] = 0x671ef1fd4e2a1e03dfdcb1ef3d789b30
aes192_round[7] = {}
aes192_round[7]['start'] = aes192_round[6]['end']
aes192_round[7]['s_box'] = 0x8572a1542fe5727b9e86c8df27bc1404
aes192_round[7]['s_row'] = 0x85e5c8042f8614549ebca17b277272df
aes192_round[7]['m_col'] = 0xe913e7b18f507d4b227ef652758acbcc
aes192_round[7]['k_sch'] = 0xe510976183519b6934157c9ea351f1e0
aes192_round[7]['end'] = 0x0c0370d00c01e622166b8accd6db3a2c
aes192_round[8] = {}
aes192_round[8]['start'] = aes192_round[7]['end']
aes192_round[8]['s_box'] = 0xfe7b5170fe7c8e93477f7e4bf6b98071
aes192_round[8]['s_row'] = 0xfe7c7e71fe7f807047b95193f67b8e4b
aes192_round[8]['m_col'] = 0x6cf5edf996eb0a069c4ef21cbfc25762
aes192_round[8]['k_sch'] = 0x1ea0372a995309167c439e77ff12051e
aes192_round[8]['end'] = 0x7255dad30fb80310e00d6c6b40d0527c
aes192_round[9] = {}
aes192_round[9]['start'] = aes192_round[8]['end']
aes192_round[9]['s_box'] = 0x40fc5766766c7bcae1d7507f09700010
aes192_round[9]['s_row'] = 0x406c501076d70066e17057ca09fc7b7f
aes192_round[9]['m_col'] = 0x7478bcdce8a50b81d4327a9009188262
aes192_round[9]['k_sch'] = 0xdd7e0e887e2fff68608fc842f9dcc154
aes192_round[9]['end'] = 0xa906b254968af4e9b4bdb2d2f0c44336
aes192_round[10] = {}
aes192_round[10]['start'] = aes192_round[9]['end']
aes192_round[10]['s_box'] = 0xd36f3720907ebf1e8d7a37b58c1c1a05
aes192_round[10]['s_row'] = 0xd37e3705907a1a208d1c371e8c6fbfb5
aes192_round[10]['m_col'] = 0x0d73cc2d8f6abe8b0cf2dd9bb83d422e
aes192_round[10]['k_sch'] = 0x859f5f237a8d5a3dc0c02952beefd63a
aes192_round[10]['end'] = 0x88ec930ef5e7e4b6cc32f4c906d29414
aes192_round[11] = {}
aes192_round[11]['start'] = aes192_round[10]['end']
aes192_round[11]['s_box'] = 0xc4cedcabe694694e4b23bfdd6fb522fa
aes192_round[11]['s_row'] = 0xc494bffae62322ab4bb5dc4e6fce69dd
aes192_round[11]['m_col'] = 0x71d720933b6d677dc00b8f28238e0fb7
aes192_round[11]['k_sch'] = 0xde601e7827bcdf2ca223800fd8aeda32
aes192_round[11]['end'] = 0xafb73eeb1cd1b85162280f27fb20d585
aes192_round[12] = {}
aes192_round[12]['start'] = aes192_round[11]['end']
aes192_round[12]['s_box'] = 0x79a9b2e99c3e6cd1aa3476cc0fb70397
aes192_round[12]['s_row'] = 0x793e76979c3403e9aab7b2d10fa96ccc
aes192_round[12]['k_sch'] = 0xa4970a331a78dc09c418c271e3a41d5d
aes192_round[12]['end'] = aes192['output']
aes192_round[0]['iinput'] = aes192['output']
aes192_round[0]['ik_sch'] = aes192_round[12]['k_sch']
aes192_round[0]['ik_add'] = aes192_round[12]['s_row']
aes192_round[0]['iend'] = aes192_round[12]['s_row']
aes192_round[1]['istart'] = aes192_round[0]['iend']
aes192_round[1]['is_row'] = aes192_round[12]['s_box']
aes192_round[1]['is_box'] = aes192_round[11]['end']
aes192_round[1]['ik_sch'] = aes192_round[11]['k_sch']
aes192_round[1]['ik_add'] = aes192_round[11]['m_col']
aes192_round[1]['iend'] = aes192_round[11]['s_row']
aes192_round[2]['istart'] = aes192_round[1]['iend']
aes192_round[2]['is_row'] = aes192_round[11]['s_box']
aes192_round[2]['is_box'] = aes192_round[10]['end']
aes192_round[2]['ik_sch'] = aes192_round[10]['k_sch']
aes192_round[2]['ik_add'] = aes192_round[10]['m_col']
aes192_round[2]['iend'] = aes192_round[10]['s_row']
aes192_round[3]['istart'] = aes192_round[2]['iend']
aes192_round[3]['is_row'] = aes192_round[10]['s_box']
aes192_round[3]['is_box'] = aes192_round[9]['end']
aes192_round[3]['ik_sch'] = aes192_round[9]['k_sch']
aes192_round[3]['ik_add'] = aes192_round[9]['m_col']
aes192_round[3]['iend'] = aes192_round[9]['s_row']
aes192_round[4]['istart'] = aes192_round[3]['iend']
aes192_round[4]['is_row'] = aes192_round[9]['s_box']
aes192_round[4]['is_box'] = aes192_round[8]['end']
aes192_round[4]['ik_sch'] = aes192_round[8]['k_sch']
aes192_round[4]['ik_add'] = aes192_round[8]['m_col']
aes192_round[4]['iend'] = aes192_round[8]['s_row']
aes192_round[5]['istart'] = aes192_round[4]['iend']
aes192_round[5]['is_row'] = aes192_round[8]['s_box']
aes192_round[5]['is_box'] = aes192_round[7]['end']
aes192_round[5]['ik_sch'] = aes192_round[7]['k_sch']
aes192_round[5]['ik_add'] = aes192_round[7]['m_col']
aes192_round[5]['iend'] = aes192_round[7]['s_row']
aes192_round[6]['istart'] = aes192_round[5]['iend']
aes192_round[6]['is_row'] = aes192_round[7]['s_box']
aes192_round[6]['is_box'] = aes192_round[6]['end']
aes192_round[6]['ik_sch'] = aes192_round[6]['k_sch']
aes192_round[6]['ik_add'] = aes192_round[6]['m_col']
aes192_round[6]['iend'] = aes192_round[6]['s_row']
aes192_round[7]['istart'] = aes192_round[6]['iend']
aes192_round[7]['is_row'] = aes192_round[6]['s_box']
aes192_round[7]['is_box'] = aes192_round[5]['end']
aes192_round[7]['ik_sch'] = aes192_round[5]['k_sch']
aes192_round[7]['ik_add'] = aes192_round[5]['m_col']
aes192_round[7]['iend'] = aes192_round[5]['s_row']
aes192_round[8]['istart'] = aes192_round[7]['iend']
aes192_round[8]['is_row'] = aes192_round[5]['s_box']
aes192_round[8]['is_box'] = aes192_round[4]['end']
aes192_round[8]['ik_sch'] = aes192_round[4]['k_sch']
aes192_round[8]['ik_add'] = aes192_round[4]['m_col']
aes192_round[8]['iend'] = aes192_round[4]['s_row']
aes192_round[9]['istart'] = aes192_round[8]['iend']
aes192_round[9]['is_row'] = aes192_round[4]['s_box']
aes192_round[9]['is_box'] = aes192_round[3]['end']
aes192_round[9]['ik_sch'] = aes192_round[3]['k_sch']
aes192_round[9]['ik_add'] = aes192_round[3]['m_col']
aes192_round[9]['iend'] = aes192_round[3]['s_row']
aes192_round[10]['istart'] = aes192_round[9]['iend']
aes192_round[10]['is_row'] = aes192_round[3]['s_box']
aes192_round[10]['is_box'] = aes192_round[2]['end']
aes192_round[10]['ik_sch'] = aes192_round[2]['k_sch']
aes192_round[10]['ik_add'] = aes192_round[2]['m_col']
aes192_round[10]['iend'] = aes192_round[2]['s_row']
aes192_round[11]['istart'] = aes192_round[10]['iend']
aes192_round[11]['is_row'] = aes192_round[2]['s_box']
aes192_round[11]['is_box'] = aes192_round[1]['end']
aes192_round[11]['ik_sch'] = aes192_round[1]['k_sch']
aes192_round[11]['ik_add'] = aes192_round[1]['m_col']
aes192_round[11]['iend'] = aes192_round[1]['s_row']
aes192_round[12]['istart'] = aes192_round[11]['iend']
aes192_round[12]['is_row'] = aes192_round[1]['s_box']
aes192_round[12]['is_box'] = aes192_round[0]['end']
aes192_round[12]['ik_sch'] = aes192_round[0]['k_sch']
aes192_round[12]['ik_add'] = aes192['input']
aes192_round[12]['ioutput'] = aes192['input']
| gpl-3.0 | 6,142,322,919,003,080,000 | 43.788288 | 78 | 0.708941 | false |
assencess/myshop | shop/views.py | 1 | 2754 | from django.shortcuts import render, get_object_or_404
from .models import Category, Product
from .forms import EmailContactForm
from cart.forms import CartAddProductForm
from django.core.mail import send_mail
from django.views import View
from django.views.generic import DetailView, ListView
class ProductListView(ListView):
model = Category
template_name = 'shop/product/list.html'
def get_context_data(self, **kwargs):
# get context
context = super(ProductListView, self).get_context_data(**kwargs)
if not 'category_slug' in self.kwargs:
self.kwargs['category_slug'] = None
category = None
categories = Category.objects.all()
products = Product.objects.all().filter(available=True)
if self.kwargs['category_slug']:
category = get_object_or_404(Category,
slug=self.kwargs['category_slug'])
products = products.filter(category=category)
context['category'] = category
context['categories'] = categories
context['products'] = products
return context
class ProductDetailView(DetailView):
model = Product
template_name = 'shop/product/detail.html'
def get_context_data(self, **kwargs):
# get context
context = super(ProductDetailView, self).get_context_data(**kwargs)
product = get_object_or_404(Product, id=self.kwargs['id'],
slug=self.kwargs['slug'],
available=True)
cart_product_form = CartAddProductForm()
context['product'] = product
context['cart_product_form'] = cart_product_form
return context
class ContactView(View):
form_class = EmailContactForm
initial = {'form': form_class()}
template_name = 'shop/contact.htmlss'
sent = False
def get(self, request, *args, **kwargs):
return render(request, 'shop/contact.html',
{'form': self.form_class()})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
cd = form.cleaned_data
# send message to client
client_email = cd['email']
subject = 'Support from www.localhost'
message = '{} your messages was sent to support of {}' \
.format(cd['name'], 'www.localhost')
send_mail(subject, message, 'www.localhost', [client_email])
# send message to support of localhost
subject = 'From client {}'.format(client_email)
send_mail(subject, cd['comments'], client_email,
['[email protected]'])
sent = True
return render(request, 'shop/contact.html', {'form': form})
| gpl-3.0 | -1,827,950,606,063,933,700 | 35.72 | 75 | 0.617284 | false |
mclaughlin6464/pearce | bin/nn_emu/test_tf_nn_novel.py | 1 | 9691 | from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
print get_available_gpus()
#xi gg
training_file = '/scratch/users/swmclau2/xi_zheng07_cosmo_lowmsat/PearceRedMagicXiCosmoFixedNd.hdf5'
#test_file= '/scratch/users/swmclau2/xi_zheng07_cosmo_test_lowmsat2/'
test_file = '/scratch/users/swmclau2/xi_zheng07_cosmo_test_lowmsat2/PearceRedMagicXiCosmoFixedNd_Test.hdf5'
em_method = 'nn'
split_method = 'random'
a = 1.0
z = 1.0/a - 1.0
fixed_params = {'z':z, 'r': 24.06822623}
# This is just me loading the data with my own code, nothing to do with NN
n_leaves, n_overlap = 50, 1
# TODO ostrich module that just gets the data in the write format.
emu = OriginalRecipe(training_file, method = em_method, fixed_params=fixed_params,
hyperparams = {'hidden_layer_sizes': (10),
'activation': 'relu', 'verbose': True,
'tol': 1e-8, 'learning_rate_init':1e-4,\
'max_iter':10, 'alpha':0, 'early_stopping':False, 'validation_fraction':0.3})
x_train, y_train,yerr_train = emu.x ,emu.y ,emu.yerr
_x_test, y_test, _, info = emu.get_data(test_file, fixed_params, None, False)
# whtien the data
x_test = (_x_test - _x_test.mean(axis = 0))/_x_test.std(axis = 0)
y_train_mean, y_train_std = y_train.mean(axis = 0), y_train.std(axis =0)
#y_test_mean, y_test_std = y_test.mean(axis = 0), y_test.std(axis = 0)
y_train = (y_train- y_train_mean)/(y_train_std)
#y_test = (y_test - y_test_mean)/(y_test_std)
yerr_train[yerr_train==0] = 1
# function that defines a Fully connected network with N layers
def n_layer_fc(x, hidden_sizes, training=False, l = 1e-8, p=0.1):
initializer = tf.variance_scaling_initializer(scale=2.0)
regularizer = tf.contrib.layers.l2_regularizer(l)
fc_output = tf.layers.dense(x, hidden_sizes[0], activation=tf.nn.relu,
kernel_initializer = initializer, kernel_regularizer = regularizer)
#kernel_regularizer = tf.nn.l2_loss)
#fc2_output = tf.layers.dense(fc1_output, hidden_sizes[1], activation=tf.nn.relu,
# kernel_initializer = initializer, kernel_regularizer = regularizer)
for size in hidden_sizes[1:]:
fc_output = tf.layers.dense(fc_output, size, kernel_initializer=initializer,
kernel_regularizer = regularizer)
bd_out = tf.layers.dropout(fc_output, p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
fc_output = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
pred = tf.layers.dense(fc_output, 1, kernel_initializer=initializer,
kernel_regularizer = regularizer)[:,0]#,
return pred
# network similar to the above but with an architecture that splits cosmo and hod inputs
def novel_fc(x, hidden_sizes, training=False, l = (1e-6, 1e-6, 1e-6), p = (0.5, 0.5, 0.5),\
n_cosmo_params = 7, n_hod_params = 4):
cosmo_sizes, hod_sizes, cap_sizes = hidden_sizes
if type(l) is float:
cosmo_l, hod_l, cap_l = l, l, l
else:
cosmo_l, hod_l, cap_l = l
if type(p) is float:
cosmo_p, hod_p, cap_p = p,p,p
else:
cosmo_p, hod_p, cap_p = p
initializer = tf.variance_scaling_initializer(scale=2.0)
#onlly for duplicating r
n_params = n_cosmo_params+n_hod_params
cosmo_x = tf.slice(x, [0,0], [-1, n_cosmo_params])
cosmo_x = tf.concat(values=[cosmo_x, tf.slice(x, [0, n_params-1], [-1, -1]) ], axis = 1)
#print tf.shape(cosmo_x)
#print tf.shape(tf.slice(x, [0, n_params-1], [-1, -1]))
hod_x = tf.slice(x, [0, n_cosmo_params], [-1, -1])
cosmo_regularizer = tf.contrib.layers.l1_regularizer(cosmo_l)
cosmo_out = cosmo_x
for size in cosmo_sizes:
fc_output = tf.layers.dense(cosmo_out, size,
kernel_initializer = initializer,\
kernel_regularizer = cosmo_regularizer)
bd_out = tf.layers.dropout(fc_output, cosmo_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
cosmo_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
hod_regularizer = tf.contrib.layers.l1_regularizer(hod_l)
hod_out = hod_x
for size in hod_sizes:
fc_output = tf.layers.dense(hod_out, size,
kernel_initializer = initializer,\
kernel_regularizer = hod_regularizer)
bd_out = tf.layers.dropout(fc_output, hod_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
hod_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
cap_out=tf.concat(values=[cosmo_out, hod_out], axis = 1)
cap_regularizer = tf.contrib.layers.l1_regularizer(cap_l)
for size in cap_sizes:
fc_output = tf.layers.dense(cap_out, size,
kernel_initializer = initializer,\
kernel_regularizer = cap_regularizer)
bd_out = tf.layers.dropout(fc_output, cap_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
cap_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
pred = tf.layers.dense(cap_out, 1, kernel_initializer=initializer,
kernel_regularizer = cap_regularizer)[:,0]#,
return pred
# In[84]:
# Optimizier function
def optimizer_init_fn(learning_rate = 1e-4):
return tf.train.AdamOptimizer(learning_rate)#, beta1=0.9, beta2=0.999, epsilon=1e-6)
from sklearn.metrics import r2_score, mean_squared_error
# check the loss and % accuracy of the predictions
def check_accuracy(sess, val_data,batch_size, x, weights, preds, is_training=None):
val_x, val_y = val_data
perc_acc, scores = [],[]
for idx in xrange(0, val_x.shape[0], batch_size):
feed_dict = {x: val_x[idx:idx+batch_size],
is_training: 0}
y_pred = sess.run(preds, feed_dict=feed_dict)*y_train_std + y_train_mean
score = r2_score(val_y[idx:idx+batch_size], y_pred)
scores.append(score)
perc_acc = np.mean(np.abs(10**(val_y[idx:idx+batch_size])-10**(y_pred))/ 10**(val_y[idx:idx+batch_size]) )
print 'Val score: %.6f, %.2f %% Loss'%(np.mean(np.array(scores)), 100*np.mean(np.array(perc_acc)))
#device = '/cpu:0'
# main training function
def train(model_init_fn, optimizer_init_fn,num_params, train_data, val_data, hidden_sizes, num_epochs=1, batch_size = 200, l = 1e-6, p = 0.5, print_every=10):
tf.reset_default_graph()
# Construct the computational graph we will use to train the model. We
# use the model_init_fn to construct the model, declare placeholders for
# the data and labels
x = tf.placeholder(tf.float32, [None,num_params])
y = tf.placeholder(tf.float32, [None])
weights = tf.placeholder(tf.float32, [None])
is_training = tf.placeholder(tf.bool, name='is_training')
preds = model_init_fn(x, hidden_sizes, is_training, l = l)#, p=p)
# Compute the loss like we did in Part II
#loss = tf.reduce_mean(loss)
loss = tf.losses.mean_squared_error(labels=y, predictions=preds, weights = weights)#weights?
#loss = tf.losses.absolute_difference(labels=y, predictions=preds, weights = weights)#weights?
optimizer = optimizer_init_fn()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#t = 0
train_x, train_y, train_yerr = train_data
rand_idxs = range(train_x.shape[0])
for epoch in range(num_epochs):
#print('Starting epoch %d' % epoch)
np.random.shuffle(rand_idxs)
losses = []
for idx in xrange(0, train_x.shape[0], batch_size):
feed_dict = {x: train_x[rand_idxs[idx:idx+batch_size]], y: train_y[rand_idxs[idx:idx+batch_size]], weights: 1/train_yerr[rand_idxs[idx:idx+batch_size]], is_training:1}
loss_np, _, preds_np = sess.run([loss, train_op, preds], feed_dict=feed_dict)
losses.append(loss_np)
if epoch % print_every == 0:
loss_avg = np.mean(np.array(losses))
print('Epoch %d, loss = %e' % (epoch, loss_avg))
check_accuracy(sess, val_data, batch_size, x, weights, preds, is_training=is_training)
#t += 1
print fixed_params
#sizes = [100, 250, 500, 250, 100]#, 2000, 1000]#, 100]
#sizes = [100, 500, 500, 100]#,10]
sizes = ([100, 100], [500, 500], [500])
bs = 10
l, p = (0,0,0), (0.5, 0.2, 0.3)
print 'Sizes', sizes
print 'Batch size', bs
print 'Regularization, Dowsample:', l, p
train(novel_fc, optimizer_init_fn, x_train.shape[1], (x_train, y_train, yerr_train), (x_test, y_test), sizes, num_epochs= 500, batch_size = bs, l = l, p = p, print_every = 10)
| mit | -8,051,416,717,175,750,000 | 43.05 | 267 | 0.604272 | false |
rmariano/dotfiles | git-hooks/branch_ticket_name.py | 1 | 2784 | #!/usr/bin/python3
"""
From: https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks
The commit-msg hook takes one parameter, which again is the path to a
temporary file that contains the commit message written by the developer.
If this script exits non-zero, Git aborts the commit process, so you can use
it to validate your project state or commit message before
allowing a commit to go through.
argv[1]: path to the temp file where to write the commit message
argv[2]: Type of commit
argv[3]: SHA-1 of commit, if it is an amend.
"""
import subprocess
import sys
def current_branch_name():
return subprocess.check_output(
('git', 'rev-parse', '--abbrev-ref', 'HEAD')).decode().strip('\n')
def ticket_name(branch_name):
"""
Assume the naming convention <ticket_no><underscore><description>
and return <ticket_no>
Where: <underscore> -> "_"
The delimiter is an <underscore>
In case this is not respected it will return the token
up to the first <underscore>, or everything if none is found.
:param str branch_name: name of the branch we are currently in
:return: ticket number from the branch
"""
ticket_no, _, _ = branch_name.partition("_")
return ticket_no
def ticket_from_branch():
return ticket_name(current_branch_name())
def header():
"""
Return the string that will compose the header of the commit msg
"""
ticket = ticket_from_branch()
return """{0}:""".format(ticket)
def is_merge():
"""
Must check that the second parameters indicates merge, and there is no more
parameters (last index is 2, hence length 3).
"""
try:
commit_type = sys.argv[2]
except IndexError:
return False
else:
return commit_type.lower() == "merge" and len(sys.argv) == 3
def is_ammend():
"""
If the commit is an amend, it's SHA-1 is passed in sys.argv[3], hence
the length is 4.
"""
return len(sys.argv) == 4
def should_write_header():
return not (is_merge() or is_ammend())
def write_commit_msg_template(commit_msg_file, header, content):
"""
:param file commit_msg_file: the file where to dump the new content
:param str header: the first line (title) in the commit msg
:param str content: Original content from the base template of the
commit msg.
"""
if should_write_header():
commit_msg_file.write(header)
commit_msg_file.write(content)
if __name__ == '__main__':
commit_msg_filename = sys.argv[1]
with open(commit_msg_filename, "r") as original:
content = original.read()
with open(commit_msg_filename, "w") as commit_msg_file:
write_commit_msg_template(commit_msg_file, header(), content)
| mit | -6,879,392,724,612,010,000 | 26.564356 | 79 | 0.651221 | false |
google-research/google-research | cfq/evaluate_main.py | 1 | 2144 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Given a list of questions, compare golden answers with inferred answers.
Writes accuracy (fraction of answers correct), and writes all correct and
incorrect output.
"""
import os
from absl import app
from absl import flags
from cfq import evaluate as evaluator
FLAGS = flags.FLAGS
flags.DEFINE_string('questions_path', None, 'Path to the input questions.')
flags.DEFINE_string('golden_answers_path', None,
'Path to the expected (golden) answers.')
flags.DEFINE_string('inferred_answers_path', None,
'Path to the inferred answers.')
flags.DEFINE_string('output_path', None, 'Path to write evaluation results to')
flags.mark_flag_as_required('output_path')
flags.register_validator('questions_path', os.path.exists,
'Questions path not found.')
flags.register_validator('golden_answers_path', os.path.exists,
'Golden answers path not found.')
flags.register_validator('inferred_answers_path', os.path.exists,
'Inferred answers path not found.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
accuracy_result = evaluator.get_accuracy_result(FLAGS.questions_path,
FLAGS.golden_answers_path,
FLAGS.inferred_answers_path)
evaluator.write_accuracy_result(
accuracy_result, FLAGS.output_path, print_output=True)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 6,893,265,948,132,583,000 | 35.965517 | 79 | 0.680037 | false |
jiwanlimbu/aura | keystone/token/persistence/backends/sql.py | 1 | 12737 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import functools
from oslo_log import log
from oslo_utils import timeutils
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.i18n import _LI
from keystone import token
from keystone.token.providers import common
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class TokenModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'token'
attributes = ['id', 'expires', 'user_id', 'trust_id']
id = sql.Column(sql.String(64), primary_key=True)
expires = sql.Column(sql.DateTime(), default=None)
extra = sql.Column(sql.JsonBlob())
valid = sql.Column(sql.Boolean(), default=True, nullable=False)
user_id = sql.Column(sql.String(64))
trust_id = sql.Column(sql.String(64))
__table_args__ = (
sql.Index('ix_token_expires', 'expires'),
sql.Index('ix_token_expires_valid', 'expires', 'valid'),
sql.Index('ix_token_user_id', 'user_id'),
sql.Index('ix_token_trust_id', 'trust_id')
)
def _expiry_upper_bound_func():
# don't flush anything within the grace window
sec = datetime.timedelta(seconds=CONF.token.allow_expired_window)
return timeutils.utcnow() - sec
def _expiry_range_batched(session, upper_bound_func, batch_size):
"""Return the stop point of the next batch for expiration.
Return the timestamp of the next token that is `batch_size` rows from
being the oldest expired token.
"""
# This expiry strategy splits the tokens into roughly equal sized batches
# to be deleted. It does this by finding the timestamp of a token
# `batch_size` rows from the oldest token and yielding that to the caller.
# It's expected that the caller will then delete all rows with a timestamp
# equal to or older than the one yielded. This may delete slightly more
# tokens than the batch_size, but that should be ok in almost all cases.
LOG.debug('Token expiration batch size: %d', batch_size)
query = session.query(TokenModel.expires)
query = query.filter(TokenModel.expires < upper_bound_func())
query = query.order_by(TokenModel.expires)
query = query.offset(batch_size - 1)
query = query.limit(1)
while True:
try:
next_expiration = query.one()[0]
except sql.NotFound:
# There are less than `batch_size` rows remaining, so fall
# through to the normal delete
break
yield next_expiration
yield upper_bound_func()
def _expiry_range_all(session, upper_bound_func):
"""Expire all tokens in one pass."""
yield upper_bound_func()
class Token(token.persistence.TokenDriverBase):
# Public interface
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id=token_id)
with sql.session_for_read() as session:
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
return token_ref.to_dict()
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
if not data_copy.get('expires'):
data_copy['expires'] = common.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
token_ref = TokenModel.from_dict(data_copy)
token_ref.valid = True
with sql.session_for_write() as session:
session.add(token_ref)
return token_ref.to_dict()
def delete_token(self, token_id):
with sql.session_for_write() as session:
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
token_ref.valid = False
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Delete all tokens in one session.
The user_id will be ignored if the trust_id is specified. user_id
will always be specified.
If using a trust, the token's user_id is set to the trustee's user ID
or the trustor's user ID, so will use trust_id to query the tokens.
"""
token_list = []
with sql.session_for_write() as session:
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter_by(valid=True)
query = query.filter(TokenModel.expires > now)
if trust_id:
query = query.filter(TokenModel.trust_id == trust_id)
else:
query = query.filter(TokenModel.user_id == user_id)
for token_ref in query.all():
if tenant_id:
token_ref_dict = token_ref.to_dict()
if not self._tenant_matches(tenant_id, token_ref_dict):
continue
if consumer_id:
token_ref_dict = token_ref.to_dict()
if not self._consumer_matches(consumer_id, token_ref_dict):
continue
token_ref.valid = False
token_list.append(token_ref.id)
return token_list
def _tenant_matches(self, tenant_id, token_ref_dict):
return ((tenant_id is None) or
(token_ref_dict.get('tenant') and
token_ref_dict['tenant'].get('id') == tenant_id))
def _consumer_matches(self, consumer_id, ref):
if consumer_id is None:
return True
else:
try:
oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
return oauth and oauth['consumer_id'] == consumer_id
except KeyError:
return False
def _list_tokens_for_trust(self, trust_id):
with sql.session_for_read() as session:
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.trust_id == trust_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens_for_user(self, user_id, tenant_id=None):
with sql.session_for_read() as session:
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._tenant_matches(tenant_id, token_ref_dict):
tokens.append(token_ref['id'])
return tokens
def _list_tokens_for_consumer(self, user_id, consumer_id):
tokens = []
with sql.session_for_write() as session:
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._consumer_matches(consumer_id, token_ref_dict):
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if not CONF.token.revoke_by_id:
return []
if trust_id:
return self._list_tokens_for_trust(trust_id)
if consumer_id:
return self._list_tokens_for_consumer(user_id, consumer_id)
else:
return self._list_tokens_for_user(user_id, tenant_id)
def list_revoked_tokens(self):
with sql.session_for_read() as session:
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel.id, TokenModel.expires,
TokenModel.extra)
query = query.filter(TokenModel.expires > now)
token_references = query.filter_by(valid=False)
for token_ref in token_references:
token_data = token_ref[2]['token_data']
if 'access' in token_data:
# It's a v2 token.
audit_ids = token_data['access']['token']['audit_ids']
else:
# It's a v3 token.
audit_ids = token_data['token']['audit_ids']
record = {
'id': token_ref[0],
'expires': token_ref[1],
'audit_id': audit_ids[0],
}
tokens.append(record)
return tokens
def _expiry_range_strategy(self, dialect):
"""Choose a token range expiration strategy.
Based on the DB dialect, select an expiry range callable that is
appropriate.
"""
# DB2 and MySQL can both benefit from a batched strategy. On DB2 the
# transaction log can fill up and on MySQL w/Galera, large
# transactions can exceed the maximum write set size.
if dialect == 'ibm_db_sa':
# Limit of 100 is known to not fill a transaction log
# of default maximum size while not significantly
# impacting the performance of large token purges on
# systems where the maximum transaction log size has
# been increased beyond the default.
return functools.partial(_expiry_range_batched,
batch_size=100)
elif dialect == 'mysql':
# We want somewhat more than 100, since Galera replication delay is
# at least RTT*2. This can be a significant amount of time if
# doing replication across a WAN.
return functools.partial(_expiry_range_batched,
batch_size=1000)
return _expiry_range_all
def flush_expired_tokens(self):
# The DBAPI itself is in a "never autocommit" mode,
# BEGIN is emitted automatically as soon as any work is done,
# COMMIT is emitted when SQLAlchemy invokes commit() on the
# underlying DBAPI connection. So SQLAlchemy is only simulating
# "begin" here in any case, it is in fact automatic by the DBAPI.
with sql.session_for_write() as session: # Calls session.begin()
dialect = session.bind.dialect.name
expiry_range_func = self._expiry_range_strategy(dialect)
query = session.query(TokenModel.expires)
total_removed = 0
upper_bound_func = _expiry_upper_bound_func
for expiry_time in expiry_range_func(session, upper_bound_func):
delete_query = query.filter(TokenModel.expires <=
expiry_time)
row_count = delete_query.delete(synchronize_session=False)
# Explicitly commit each batch so as to free up
# resources early. We do not actually need
# transactional semantics here.
session.commit() # Emits connection.commit() on DBAPI
# Tells SQLAlchemy to "begin", e.g. hold a new connection
# open in a transaction
session.begin()
total_removed += row_count
LOG.debug('Removed %d total expired tokens', total_removed)
# When the "with: " block ends, the final "session.commit()"
# is emitted by enginefacade
session.flush()
LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
| apache-2.0 | 6,549,671,619,302,245,000 | 40.488599 | 79 | 0.593546 | false |
mattclark/osf.io | tests/test_webtests.py | 1 | 50803 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import datetime as dt
import httplib as http
import logging
import unittest
import markupsafe
import mock
import pytest
from nose.tools import * # noqa: F403
import re
from django.utils import timezone
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (
fake_email,
AuthUserFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
SubjectFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
)
from osf.utils import permissions
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page_name = 'home'
wiki_content = 'Kittens'
wiki_page = WikiFactory(
user=self.user,
node=project,
)
wiki = WikiVersionFactory(
wiki_page=wiki_page,
content=wiki_content
)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page_name,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
WikiPage.objects.create_for_node(project, 'WöRlÐé', 'new content', Auth(self.user))
wv = WikiVersion.objects.get_for_node(project, non_ascii)
assert wv.wiki_page.page_name.upper() == non_ascii.decode('utf-8').upper()
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
@pytest.mark.enable_bookmark_creation
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
@pytest.mark.enable_bookmark_creation
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in('Anonymous Contributors', res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': 'not_valid'},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
@pytest.mark.enable_bookmark_creation
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = WikiFactory(
user=self.user,
node=self.component,
)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_implicit_clean
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake_email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake_email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake_email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip('as long as E-mails cannot be changed')
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = '[email protected]'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = '[email protected]'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
def test_claim_user_registered_preprint_with_correct_password(self):
preprint = PreprintFactory(creator=self.referrer)
name, email = fake.name(), fake_email()
unreg_user = preprint.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = unreg_user.get_claim_url(preprint._id)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
preprint.reload()
unreg_user.reload()
# user is now a contributor to the project
assert_in(reg_user, preprint.contributors)
# the unregistered user (unreg_user) is removed as a contributor, and their
assert_not_in(unreg_user, preprint.contributors)
# unclaimed record for the project has been deleted
assert_not_in(preprint, unreg_user.unclaimed_records)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body)
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = '[email protected]'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.disable_account()
self.user.save()
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
@pytest.mark.enable_bookmark_creation
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.read_contrib = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_one.add_contributor(self.write_contrib, permissions.WRITE)
self.project_one.add_contributor(self.read_contrib, permissions.READ)
self.subject_one = SubjectFactory()
self.preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.preprint.add_contributor(self.write_contrib, permissions.WRITE)
self.preprint.add_contributor(self.read_contrib, permissions.READ)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('Has supplemental materials for', res.body)
def test_public_project_abandoned_preprint(self):
self.preprint.machine_state = 'initial'
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_deleted_preprint(self):
self.preprint.deleted = timezone.now()
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_private_preprint(self):
self.preprint.is_public = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_orphaned_preprint(self):
self.preprint.primary_file = None
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_unpublished_preprint(self):
self.preprint.is_published = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_pending_preprint_post_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('on {}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('on {}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
def test_implicit_admins_can_see_project_status(self):
project = ProjectFactory(creator=self.admin)
component = NodeFactory(creator=self.admin, parent=project)
project.add_contributor(self.write_contrib, permissions.ADMIN)
project.save()
preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=component, is_published=True)
preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
preprint.provider = provider
preprint.save()
url = component.web_url_for('view_project')
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
def test_public_project_pending_preprint_pre_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='pre-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,935,636,752,297,277,000 | 38.716185 | 188 | 0.637931 | false |
phalt/dj-twiml | tests/test_views.py | 1 | 1400 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dj-twiml-views
------------
Tests for `dj-twiml-views` views module.
"""
from hmac import new
from hashlib import sha1
from base64 import encodestring
from django.test import Client, TestCase, RequestFactory
from django.conf import settings
from dj_twiml import views
class TestDj_twiml(TestCase):
fixtures = ['dj_twiml.json']
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
self.uri = 'http://testserver/twiml/'
self.t1_uri = '/twiml/1/'
settings.TWILIO_AUTH_TOKEN = 'xxx'
settings.TWILIO_ACCOUNT_SID = 'xxx'
self.signature = encodestring(
new(settings.TWILIO_AUTH_TOKEN,
'%s1/' % self.uri, sha1).digest()).strip()
def test_detail_forgery_off(self):
request = self.factory.post(
self.t1_uri, HTTP_X_TWILIO_SIGNATURE=self.signature)
deets = views.detail(request, twiml_id=1)
self.assertIn('<Response><Dial>', deets)
def test_detail_forgery_on(self):
''' Same as above but with forgery protection on'''
settings.DJANGO_TWILIO_FORGERY_PROTECTION = True
request = self.factory.post(
self.t1_uri, HTTP_X_TWILIO_SIGNATURE=self.signature)
deets = views.detail(request, twiml_id=1)
self.assertIn('<Response><Dial>', deets)
| bsd-3-clause | 2,297,203,071,420,323,000 | 24.925926 | 64 | 0.628571 | false |
klmitch/keystone | keystone/trust/controllers.py | 1 | 11243 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_utils import timeutils
import six
from keystone import assignment
from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone.common import validation
from keystone import exception
from keystone.i18n import _
from keystone import notifications
from keystone.trust import schema
def _trustor_trustee_only(trust, user_id):
if user_id not in [trust.get('trustee_user_id'),
trust.get('trustor_user_id')]:
raise exception.Forbidden()
def _admin_trustor_only(context, trust, user_id):
if user_id != trust.get('trustor_user_id') and not context['is_admin']:
raise exception.Forbidden()
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'role_api', 'token_provider_api', 'trust_api')
class TrustV3(controller.V3Controller):
collection_name = "trusts"
member_name = "trust"
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
# NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-TRUST/' + cls.collection_name
return super(TrustV3, cls).base_url(context, path=path)
def _get_user_id(self, context):
try:
token_ref = utils.get_token_ref(context)
except exception.Unauthorized:
return None
return token_ref.user_id
def get_trust(self, context, trust_id):
user_id = self._get_user_id(context)
trust = self.trust_api.get_trust(trust_id)
_trustor_trustee_only(trust, user_id)
self._fill_in_roles(context, trust,
self.role_api.list_roles())
return TrustV3.wrap_member(context, trust)
def _fill_in_roles(self, context, trust, all_roles):
if trust.get('expires_at') is not None:
trust['expires_at'] = (utils.isotime
(trust['expires_at'],
subsecond=True))
if 'roles' not in trust:
trust['roles'] = []
trust_full_roles = []
for trust_role in trust['roles']:
if isinstance(trust_role, six.string_types):
trust_role = {'id': trust_role}
matching_roles = [x for x in all_roles
if x['id'] == trust_role['id']]
if matching_roles:
full_role = assignment.controllers.RoleV3.wrap_member(
context, matching_roles[0])['role']
trust_full_roles.append(full_role)
trust['roles'] = trust_full_roles
trust['roles_links'] = {
'self': (self.base_url(context) + "/%s/roles" % trust['id']),
'next': None,
'previous': None}
def _normalize_role_list(self, trust, all_roles):
trust_roles = []
all_role_names = {r['name']: r for r in all_roles}
for role in trust.get('roles', []):
if 'id' in role:
trust_roles.append({'id': role['id']})
elif 'name' in role:
rolename = role['name']
if rolename in all_role_names:
trust_roles.append({'id':
all_role_names[rolename]['id']})
else:
raise exception.RoleNotFound(_("role %s is not defined") %
rolename)
else:
raise exception.ValidationError(attribute='id or name',
target='roles')
return trust_roles
@controller.protected()
@validation.validated(schema.trust_create, 'trust')
def create_trust(self, context, trust):
"""Create a new trust.
The user creating the trust must be the trustor.
"""
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
# Check if delegated via trust
if auth_context.get('is_delegated_auth'):
# Redelegation case
src_trust_id = auth_context['trust_id']
if not src_trust_id:
raise exception.Forbidden(
_('Redelegation allowed for delegated by trust only'))
redelegated_trust = self.trust_api.get_trust(src_trust_id)
else:
redelegated_trust = None
if trust.get('project_id'):
self._require_role(trust)
self._require_user_is_trustor(context, trust)
self._require_trustee_exists(trust['trustee_user_id'])
all_roles = self.role_api.list_roles()
# Normalize roles
normalized_roles = self._normalize_role_list(trust, all_roles)
trust['roles'] = normalized_roles
self._require_trustor_has_role_in_project(trust)
trust['expires_at'] = self._parse_expiration_date(
trust.get('expires_at'))
trust_id = uuid.uuid4().hex
initiator = notifications._get_request_audit_info(context)
new_trust = self.trust_api.create_trust(trust_id, trust,
normalized_roles,
redelegated_trust,
initiator)
self._fill_in_roles(context, new_trust, all_roles)
return TrustV3.wrap_member(context, new_trust)
def _require_trustee_exists(self, trustee_user_id):
self.identity_api.get_user(trustee_user_id)
def _require_user_is_trustor(self, context, trust):
user_id = self._get_user_id(context)
if user_id != trust.get('trustor_user_id'):
raise exception.Forbidden(
_("The authenticated user should match the trustor."))
def _require_role(self, trust):
if not trust.get('roles'):
raise exception.Forbidden(
_('At least one role should be specified.'))
def _get_trustor_roles(self, trust):
original_trust = trust.copy()
while original_trust.get('redelegated_trust_id'):
original_trust = self.trust_api.get_trust(
original_trust['redelegated_trust_id'])
if not self._attribute_is_empty(trust, 'project_id'):
self.resource_api.get_project(original_trust['project_id'])
# Get a list of roles including any domain specific roles
assignment_list = self.assignment_api.list_role_assignments(
user_id=original_trust['trustor_user_id'],
project_id=original_trust['project_id'],
effective=True, strip_domain_roles=False)
return list(set([x['role_id'] for x in assignment_list]))
else:
return []
def _require_trustor_has_role_in_project(self, trust):
trustor_roles = self._get_trustor_roles(trust)
for trust_role in trust['roles']:
matching_roles = [x for x in trustor_roles
if x == trust_role['id']]
if not matching_roles:
raise exception.RoleNotFound(role_id=trust_role['id'])
def _parse_expiration_date(self, expiration_date):
if expiration_date is None:
return None
if not expiration_date.endswith('Z'):
expiration_date += 'Z'
try:
expiration_time = timeutils.parse_isotime(expiration_date)
except ValueError:
raise exception.ValidationTimeStampError()
if timeutils.is_older_than(expiration_time, 0):
raise exception.ValidationExpirationError()
return expiration_time
def _check_role_for_trust(self, context, trust_id, role_id):
"""Checks if a role has been assigned to a trust."""
trust = self.trust_api.get_trust(trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
if not any(role['id'] == role_id for role in trust['roles']):
raise exception.RoleNotFound(role_id=role_id)
@controller.protected()
def list_trusts(self, context):
query = context['query_string']
trusts = []
if not query:
self.assert_admin(context)
trusts += self.trust_api.list_trusts()
if 'trustor_user_id' in query:
user_id = query['trustor_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += (self.trust_api.
list_trusts_for_trustor(user_id))
if 'trustee_user_id' in query:
user_id = query['trustee_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += self.trust_api.list_trusts_for_trustee(user_id)
for trust in trusts:
# get_trust returns roles, list_trusts does not
# It seems in some circumstances, roles does not
# exist in the query response, so check first
if 'roles' in trust:
del trust['roles']
if trust.get('expires_at') is not None:
trust['expires_at'] = (utils.isotime
(trust['expires_at'],
subsecond=True))
return TrustV3.wrap_collection(context, trusts)
@controller.protected()
def delete_trust(self, context, trust_id):
trust = self.trust_api.get_trust(trust_id)
user_id = self._get_user_id(context)
_admin_trustor_only(context, trust, user_id)
initiator = notifications._get_request_audit_info(context)
self.trust_api.delete_trust(trust_id, initiator)
@controller.protected()
def list_roles_for_trust(self, context, trust_id):
trust = self.get_trust(context, trust_id)['trust']
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
return {'roles': trust['roles'],
'links': trust['roles_links']}
@controller.protected()
def get_role_for_trust(self, context, trust_id, role_id):
"""Get a role that has been assigned to a trust."""
self._check_role_for_trust(context, trust_id, role_id)
role = self.role_api.get_role(role_id)
return assignment.controllers.RoleV3.wrap_member(context, role)
| apache-2.0 | 7,273,288,418,649,957,000 | 40.640741 | 78 | 0.579383 | false |
anjos/popster | popster/qnap.py | 1 | 8677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utilities to use the QNAP Container Station API
See documentation here: http://qnap-dev.github.io/container-station-api/index.html
"""
import os
import sys
import pickle
import requests
import getpass
import warnings
import contextlib
import logging
logger = logging.getLogger(__name__)
import pkg_resources
SESSION_FILE = os.path.expanduser("~/.qnap-auth.pickle")
@contextlib.contextmanager
def no_ssl_warnings(verify):
if not verify:
warnings.filterwarnings("ignore", "Unverified HTTPS request")
yield
if not verify:
warnings.resetwarnings()
def api(session, server, url, verb="get", data=None, json=None, verify=False):
"""Calls the container station API with a given url and data dictionary
Parameters:
session (requests.Session): A previously opened session with the
authentication cookies to use
server (str): The server to reach
url (str): The URL to call on the container station API, relative to the
the address ``/containerstation/api/v1", which is always
prepended.
verb (str, Optional): One of the HTTP verbs to query the URL with. If not
specified, defaults to ``get``. Any verb available in
:py:class:`requests.Session` is acceptable.
data (dict, Optional): A dictionary containing parameters to pass to the
API
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
int: The returned status code
requests.Result: An object that contains the reply from the HTTP API call
"""
url = server + "/containerstation/api/v1" + url
logger.debug(f"{verb.upper()} {url}")
with no_ssl_warnings(verify):
return getattr(session, verb)(url, data=data, json=json, verify=verify)
def login(server, username, password, verify=False):
"""Logs-in the server, if a session file is not available yet.
Parameters:
server (str): The server to reach
username (str): The user identifier to use for login
password (str): The user password for login
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
requests.Session: A restored or new session after successfuly
authenticating to the server
"""
if os.path.exists(SESSION_FILE):
logger.debug(f"Session file ({SESSION_FILE}) exists - trying to use it")
with open(SESSION_FILE, "rb") as f:
session = pickle.load(f)
result = api(session, server, "/login_refresh", verify=verify)
if "error" in result.json():
logout(server, verify=verify)
if not os.path.exists(SESSION_FILE):
logger.debug(f"Session file ({SESSION_FILE}) does not exist - logging-in")
session = requests.Session()
data = dict(username=username, password=password)
result = api(
session, server, "/login", verb="post", data=data, verify=verify
)
if result.status_code != 200:
raise RuntimeError(
"Login request failed with status code %d" % result.status_code
)
response = result.json()
if response.get("username") != username:
raise RuntimeError(
"Login request for user %s failed (%s is "
"logged in)" % (username, response.get("username"))
)
with open(SESSION_FILE, "wb") as f:
pickle.dump(session, f)
return session
def logout(server, verify=False):
"""Logs the user out
Parameters:
server (str): The server to reach
verify (bool, Optional): If should use ``verify=True`` for requests calls
"""
if not os.path.exists(SESSION_FILE):
logger.error(f"No session file exists at {SESSION_FILE} - not logging out")
return
logger.debug("Logging out...")
with open(SESSION_FILE, "rb") as f:
session = pickle.load(f)
result = api(session, server, "/logout", verb="put", verify=verify)
response = result.json()
if os.path.exists(SESSION_FILE):
logger.debug(f"Removing {SESSION_FILE}...")
os.unlink(SESSION_FILE)
session.close() # close all connections
@contextlib.contextmanager
def session(server, username, password, verify=False):
"""Context manager that opens and closes a connection to the NAS"""
yield login(server, username, password, verify=verify)
logout(server, verify=verify)
def system(server, session=None, verify=False):
"""Checks system information
Parameters:
server (str): The server to reach
session (requests.Session): A previously opened session you'd like to close
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
dict: A valid JSON object, decoded into Python
"""
return api(session, server, "/system", verify=verify).json()
def get_containers(session, server, verify=False):
"""Gets all information on available containers
Parameters:
session (requests.Session): A previously opened session you'd like to close
server (str): The server to reach
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
list of dict: Containing information about all running containers
"""
return api(session, server, "/container", verify=verify).json()
def inspect_container(session, server, id_, verify=False):
"""Gets all information on the container with the given identifier
Parameters:
session (requests.Session): A previously opened session you'd like to close
server (str): The server to reach
id_ (str): The identify of the container to inspect
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
list: A valid JSON object, decoded into Python
"""
return api(
session, server, "/container/docker/%s/inspect" % id_, verify=verify
).json()
def stop_container(session, server, id_, verify=False):
"""Stops the container with the given identifier
Parameters:
session (requests.Session): A previously opened session you'd like to close
server (str): The server to reach
id_ (str): The identify of the container to stop
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
list: A valid JSON object, decoded into Python
"""
return api(
session,
server,
"/container/docker/%s/stop" % id_,
verb="put",
verify=verify,
).json()
def remove_container(session, server, id_, verify=False):
"""Removes the container with the given identifier
Parameters:
session (requests.Session): A previously opened session you'd like to close
server (str): The server to reach
id_ (str): The identify of the container to be removed
verify (bool, Optional): If should use ``verify=True`` for requests calls
Returns:
list: A valid JSON object, decoded into Python
"""
return api(
session,
server,
"/container/docker/%s" % id_,
verb="delete",
verify=verify,
).json()
def create_container(
session,
server,
name,
options,
image="anjos/popster",
tag="v%s" % pkg_resources.require("popster")[0].version,
verify=False,
):
"""Creates a container with an existing image
Parameters:
session (requests.Session): A previously opened session you'd like to close
server (str): The server to reach
name (str): The name of the container to update
image (str): The name of the image to use for the update (e.g.:
'anjos/popster')
tag (str): Tag to be used for the above image (e.g.: 'v1.2.3')
options (dict): A dictionary of options that will be passed to the API
verify (bool, Optional): If should use ``verify=True`` for requests calls
"""
info = dict(type="docker", name=name, image=image, tag=tag,)
# prepares new container information
info.update(options)
response = api(
session, server, "/container", verb="post", json=info, verify=verify
)
return response.json()
def retrieve_logs(session, server, id_, tail=1000, verify=False):
"""Retrieves the logs from container
Parameters:
session (requests.Session): A previously opened session you'd like to close
server (str): The server to reach
id_ (str): The identifier of the container to retrieve logs from
verify (bool, Optional): If should use ``verify=True`` for requests calls
"""
return api(
session,
server,
"/container/docker/%s/logs?tail=%d" % (id_, tail),
verify=verify,
).json()
| gpl-3.0 | -3,796,160,623,227,330,000 | 22.837912 | 83 | 0.659329 | false |
ionelmc/virtualenv | virtualenv/_compat.py | 1 | 1342 | from __future__ import absolute_import, division, print_function
try:
FileNotFoundError = FileNotFoundError
except NameError: # pragma: no cover
FileNotFoundError = OSError
# Python 2.6 does not have check_output, so we'll backport this just for
# Python 2.6
import subprocess
try:
from subprocess import check_output
except ImportError: # pragma: no cover
def check_output(*popenargs, **kwargs):
if "stdout" in kwargs:
raise ValueError(
"stdout argument not allowed, it will be overridden."
)
if "input" in kwargs:
if "stdin" in kwargs:
raise ValueError(
"stdin and input arguments may not both be used."
)
inputdata = kwargs["input"]
del kwargs["input"]
kwargs["stdin"] = subprocess.PIPE
else:
inputdata = None
process = subprocess.Popen(
*popenargs,
stdout=subprocess.PIPE,
**kwargs
)
try:
output, unused_err = process.communicate(inputdata)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise subprocess.CalledProcessError(retcode, output)
return output
| mit | 4,296,613,407,612,718,600 | 29.5 | 72 | 0.5693 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.