repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pedroluislopez/candidaturas | candidatos/migrations/0001_initial.py | 1 | 1696 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Candidato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=50, verbose_name=b'Nombre')),
('apellidos', models.CharField(max_length=50, verbose_name=b'Apellidos')),
('email', models.CharField(unique=True, max_length=50, verbose_name=b'Email')),
('nif', models.CharField(unique=True, max_length=9, verbose_name=b'NIF')),
('telefono', models.CharField(max_length=9, verbose_name=b'Tel\xc3\xa9fono')),
('imagen', models.ImageField(upload_to=b'profile_images', verbose_name=b'Imagen', blank=True)),
('secretario', models.BooleanField(default=False, verbose_name=b'Secretario General')),
('consejo', models.BooleanField(default=False, verbose_name=b'Consejo Ciudadano')),
('biografia', models.TextField(max_length=2000, verbose_name=b'Biograf\xc3\xada', blank=True)),
('motivacion', models.TextField(max_length=2000, verbose_name=b'Motivaci\xc3\xb3n', blank=True)),
('youtube', models.CharField(max_length=50, verbose_name=b'V\xc3\xaddeo youtube', blank=True)),
('activo', models.BooleanField(default=True, verbose_name=b'Activo')),
],
options={
},
bases=(models.Model,),
),
]
| gpl-3.0 | -897,410,396,831,920,000 | 48.882353 | 114 | 0.599057 | false | 3.631692 | false | false | false |
muchu1983/findfine | findfine_crawler/crawlerForGYG.py | 2 | 15857 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2016, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
import time
import logging
import re
import random
import datetime
import urllib.parse
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from bennu.filesystemutility import FileSystemUtility as FilesysUtility
from findfine_crawler.utility import Utility as FfUtility
from findfine_crawler.localdb import LocalDbForGYG
"""
爬取 GetYourGuide 資料存至 資料庫
"""
class CrawlerForGYG:
#建構子
def __init__(self):
self.dicSubCommandHandler = {
"index":self.crawlIndexPage,
"city":self.crawlCityPage,
"product":self.crawlProductPage
}
self.ffUtil = FfUtility()
self.fileUtil = FilesysUtility()
self.db = LocalDbForGYG()
self.lstDicParsedProductJson = [] #product.json 資料
self.intProductJsonIndex = 1
self.driver = None
#取得 spider 使用資訊
def getUseageMessage(self):
return (
"- GetYourGuide -\n"
"useage:\n"
"index - crawl index page of GetYourGuide \n"
"city - crawl not obtained city page \n"
"product [city_page_1_url] - crawl not obtained product page [of given city_page_1_url] \n"
)
#取得 selenium driver 物件
def getDriver(self):
chromeDriverExeFilePath = self.fileUtil.getPackageResourcePath(strPackageName="findfine_crawler.resource", strResourceName="chromedriver.exe")
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(executable_path=chromeDriverExeFilePath, chrome_options=options)
return driver
#初始化 selenium driver 物件
def initDriver(self):
if not self.driver:
self.driver = self.getDriver()
#終止 selenium driver 物件
def quitDriver(self):
self.driver.quit()
self.driver = None
#重啟 selenium driver 物件
def restartDriver(self):
self.quitDriver()
time.sleep(5)
self.initDriver()
#執行 crawler
def runCrawler(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
self.initDriver() #init selenium driver
self.dicSubCommandHandler[strSubcommand](strArg1)
self.quitDriver() #quit selenium driver
#爬取 index 頁面
def crawlIndexPage(self, uselessArg1=None):
logging.info("crawl index page")
#GetYourGuide index 頁面
self.driver.get("https://www.getyourguide.com/")
#點開 show more cities
elesMoreBtn = self.driver.find_elements_by_css_selector("div.section-navigation button.cities-show-more")
while len(elesMoreBtn) > 0:
time.sleep(3)
elesMoreBtn[0].click()
time.sleep(5)
elesMoreBtn = self.driver.find_elements_by_css_selector("div.section-navigation button.cities-show-more")
time.sleep(3)
#解析 city 超連結
lstEleCityA = self.driver.find_elements_by_css_selector("div.top-destinations div.top-destination a.cities-image-box")
for eleCityA in lstEleCityA:
strCityHref = eleCityA.get_attribute("href")
#儲存 city 超連結至 localdb
if strCityHref.startswith("https://www.getyourguide.com/"):
lstStrCityKeyWord = re.sub("https://www.getyourguide.com/", "", strCityHref).split("-")
strQ = u"q=" + u"%20".join(lstStrCityKeyWord[0:-1])
strLc = u"lc=l" + re.sub("[^0-9]", "", lstStrCityKeyWord[-1])
strCityPage1Url = u"https://www.getyourguide.com/s/?" + strQ + u"&" + strLc
self.db.insertCityIfNotExists(strCityPage1Url=strCityPage1Url)
logging.info("save city url: %s"%strCityPage1Url)
#解析 city 頁面
def parseCityPage(self, strCityPage1Url=None):
#找尋 product 超連結
elesProductA = self.driver.find_elements_by_css_selector("article a.activity-card-link")
for eleProductA in elesProductA:
strProductUrl = eleProductA.get_attribute("href")
#儲存 product 超連結至 localdb
if strProductUrl.startswith("https://www.getyourguide.com/"):
logging.info("insert product url: %s"%strProductUrl)
self.db.insertProductUrlIfNotExists(strProductUrl=strProductUrl, strCityPage1Url=strCityPage1Url)
#爬取 city 頁面
def crawlCityPage(self, uselessArg1=None):
logging.info("crawl city page")
#取得 Db 中尚未下載的 city url
lstStrNotObtainedCityPage1Url = self.db.fetchallNotObtainedCityUrl()
for strNotObtainedCityPage1Url in lstStrNotObtainedCityPage1Url:
#re 找出 city 名稱
strCityName = re.sub("%20", " ", re.match("^https://www\.getyourguide\.com/s/\?q=(.*)&lc=l[\d]+$", strNotObtainedCityPage1Url).group(1))
#city 頁面
intCityPageNum = 1
#city 第1頁
time.sleep(random.randint(2,5)) #sleep random time
self.driver.get(strNotObtainedCityPage1Url)
time.sleep(60)
#解析 product 超連結
self.parseCityPage(strCityPage1Url=strNotObtainedCityPage1Url) #parse 第一頁
#點開 show more activities
elesShowMoreBtn = self.driver.find_elements_by_css_selector(".activities-show-more .btn")
while len(elesShowMoreBtn) > 0 and elesShowMoreBtn[0].is_displayed():
eleShowMoreBtn = elesShowMoreBtn[0]
time.sleep(random.randint(5,8)) #sleep random time
intCityPageNum = intCityPageNum+1
eleShowMoreBtn.click()
time.sleep(60) #wait click action complete
#解析 product 超連結
self.parseCityPage(strCityPage1Url=strNotObtainedCityPage1Url) #parse 第二三四...n-1 頁
#檢查 city page 有無 show more activities
elesShowMoreBtn = self.driver.find_elements_by_css_selector(".activities-show-more .btn")
#解析 product 超連結
self.parseCityPage(strCityPage1Url=strNotObtainedCityPage1Url) #parse 最後一頁
#更新 country DB 為已抓取 (isGot = 1)
self.db.updateCityStatusIsGot(strCityPage1Url=strNotObtainedCityPage1Url)
logging.info("got city %s find %d pages"%(strCityName, intCityPageNum))
#解析 product 頁面
def parseProductPage(self, strProductUrl=None, strCityName=None):
dicProductJson = {}
#strSource
dicProductJson["strSource"] = "GetYourGuide"
#strOriginUrl
dicProductJson["strOriginUrl"] = strProductUrl + u"?partner_id=JOIL1TN"
#strUpdateStatus
dicProductJson["strUpdateStatus"] = "up-to-date"
#strUpdateTime
dicProductJson["strUpdateTime"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#strImageUrl
strImageUrl = None
elesImg = self.driver.find_elements_by_css_selector("#photos div.photo-viewer-slider img.photo-item")
for eleImg in elesImg:
strImgSrc = eleImg.get_attribute("src")
if strImgSrc.startswith("https://cdn.getyourguide.com/img/"):
strImageUrl = strImgSrc
break
dicProductJson["strImageUrl"] = strImageUrl
#strTitle
strTitle = self.driver.find_element_by_css_selector("h1#activity-title").text
dicProductJson["strTitle"] = strTitle.strip()
#strLocation
dicProductJson["strLocation"] = strCityName
#intUsdCost
intUsdCost = 0
if len(self.driver.find_elements_by_css_selector("header.header p.total-price")) > 0:
strUsdCost = self.driver.find_element_by_css_selector("header.header p.total-price").text.strip()
if strUsdCost == "Sold out": #已售完
intUsdCost = 0
else:
elesDealPriceSpan = self.driver.find_elements_by_css_selector("header.header p.total-price span.deal-price")
isDealPriceExists = True if len(elesDealPriceSpan) > 0 else False
if isDealPriceExists: #特價
intUsdCost = int(float(re.sub("[^0-9\.]", "", elesDealPriceSpan[0].text)))
else: #標價
intUsdCost = int(float(re.sub("[^0-9\.]", "", strUsdCost)))
elif len(self.driver.find_elements_by_css_selector("div.activity-column-minor p.price strong.price-actual")) > 0:
strUsdCost = self.driver.find_element_by_css_selector("div.activity-column-minor p.price strong.price-actual").text.strip()
intUsdCost = int(float(re.sub("[^0-9\.]", "", strUsdCost)))
elif len(self.driver.find_elements_by_css_selector("div.price-detail p.price strong.price-actual")) > 0:
strUsdCost = self.driver.find_element_by_css_selector("div.price-detail p.price strong.price-actual").text.strip()
intUsdCost = int(float(re.sub("[^0-9\.]", "", strUsdCost)))
else:
pass
dicProductJson["intUsdCost"] = intUsdCost
#intReviewStar
intReviewStar = 0
if len(self.driver.find_elements_by_css_selector("div.activity-rating span.rating")) > 0:
strRatingTitle = self.driver.find_element_by_css_selector("div.activity-rating span.rating").get_attribute("title").strip()
strReviewStar = re.match("^Rating: ([0-9\.]+) out of 5$", strRatingTitle).group(1)
intReviewStar = int(float(strReviewStar))
dicProductJson["intReviewStar"] = intReviewStar
#intReviewVisitor
intReviewVisitor = 0
if len(self.driver.find_elements_by_css_selector("#rating-link")) > 0:
strReviewVisitor = re.sub("[^\d]", "", self.driver.find_element_by_css_selector("#rating-link").text).strip()
intReviewVisitor = int(float(strReviewVisitor))
dicProductJson["intReviewVisitor"] = intReviewVisitor
#strIntroduction
strIntroduction = u""
elesIntroduction = self.driver.find_elements_by_css_selector("#highlights *")
for eleIntroduction in elesIntroduction:
strIntroduction = strIntroduction + u" " + re.sub("\s", " ", eleIntroduction.text.strip())
dicProductJson["strIntroduction"] = strIntroduction.strip()
#intDurationHour
strDurationHour = self.driver.find_element_by_css_selector("div.key-info-box div div.time").text.strip()
strDurationHour = re.sub("\s", " ", strDurationHour.lower())
intDurationHour = self.convertDurationStringToHourInt(strDurtation=strDurationHour)
dicProductJson["intDurationHour"] = intDurationHour
#strGuideLanguage
strGuideLanguage = u"english"
if len(self.driver.find_elements_by_css_selector("div.key-info-box div.live-guide div.lang")) > 0:
strGuideLanguage = self.driver.find_element_by_css_selector("div.key-info-box div.live-guide div.lang").text.strip().lower()
dicProductJson["strGuideLanguage"] = strGuideLanguage
#intOption (待確認)
dicProductJson["intOption"] = None
#strStyle (GetYourGuide 無該資料)
dicProductJson["strStyle"] = None
self.lstDicParsedProductJson.append(dicProductJson)
#爬取 product 頁面 (strCityPage1Url == None 會自動找尋已爬取完成之 city)
def crawlProductPage(self, strCityPage1Url=None):
#清空計憶體殘留資料
self.lstDicParsedProductJson = []
self.intProductJsonIndex = 1
if not strCityPage1Url:
#未指定 city
lstStrObtainedCityUrl = self.db.fetchallCompletedObtainedCityUrl()
for strObtainedCountryUrl in lstStrObtainedCityUrl:
self.crawlProductPageWithGivenCityUrl(strCityPage1Url=strObtainedCountryUrl)
else:
#有指定 city url
self.crawlProductPageWithGivenCityUrl(strCityPage1Url=strCityPage1Url)
#將最後資料寫入 json
if len(self.lstDicParsedProductJson) > 0:
strJsonFileName = "%d_product.json"%(self.intProductJsonIndex*100)
strProductJsonFilePath = self.fileUtil.getPackageResourcePath(strPackageName="findfine_crawler.resource.parsed_json.gyg", strResourceName=strJsonFileName)
self.ffUtil.writeObjectToJsonFile(dicData=self.lstDicParsedProductJson, strJsonFilePath=strProductJsonFilePath)
self.lstDicParsedProductJson = []
#爬取 product 頁面 (指定 city url)
def crawlProductPageWithGivenCityUrl(self, strCityPage1Url=None):
logging.info("crawl product page with city %s"%strCityPage1Url)
#re 找出 city 名稱
strCityName = re.sub("%20", " ", re.match("^https://www\.getyourguide\.com/s/\?q=(.*)&lc=l[\d]+$", strCityPage1Url).group(1))
#取得 DB 紀錄中,指定 strCityPage1Url city 的 product url
lstStrProductUrl = self.db.fetchallProductUrlByCityUrl(strCityPage1Url=strCityPage1Url)
for strProductUrl in lstStrProductUrl:
#檢查 product 是否已下載
if not self.db.checkProductIsGot(strProductUrl=strProductUrl):
time.sleep(random.randint(5,8)) #sleep random time
try:
self.driver.get(strProductUrl)
#解析 product 頁面
self.parseProductPage(strProductUrl=strProductUrl, strCityName=strCityName)
#更新 product DB 為已爬取 (isGot = 1)
#self.db.updateProductStatusIsGot(strProductUrl=strProductUrl)
except Exception as e:
logging.warning(str(e))
logging.warning("selenium driver crashed. skip get product: %s"%strProductUrl)
self.restartDriver() #重啟
#顯示進度
logging.info("進度: %d/100"%len(self.lstDicParsedProductJson))
#寫入 json
if len(self.lstDicParsedProductJson) == 100:
strJsonFileName = "%d_product.json"%(self.intProductJsonIndex*100)
strProductJsonFilePath = self.fileUtil.getPackageResourcePath(strPackageName="findfine_crawler.resource.parsed_json.gyg", strResourceName=strJsonFileName)
self.ffUtil.writeObjectToJsonFile(dicData=self.lstDicParsedProductJson, strJsonFilePath=strProductJsonFilePath)
self.intProductJsonIndex = self.intProductJsonIndex+1
self.lstDicParsedProductJson = []
#轉換 duration 資訊
def convertDurationStringToHourInt(self, strDurtation=None):
intDefaultDuration = 1
if not strDurtation or ("hour" not in strDurtation and "day" not in strDurtation):
return intDefaultDuration
else:
intTotalDurationHour = 0
mDurationHour = re.search("([\d\.]+) hour", strDurtation)
mDurationDay = re.search("([\d\.]+) day", strDurtation)
if mDurationHour:
intDurationHour = int(float(mDurationHour.group(1)))
intTotalDurationHour = intTotalDurationHour + intDurationHour
if mDurationDay:
intDurationDay = int(float(mDurationDay.group(1)))
intTotalDurationHour = intTotalDurationHour + (intDurationDay*8)
return intTotalDurationHour | bsd-3-clause | -8,451,980,319,109,748,000 | 49.605263 | 170 | 0.640837 | false | 3.405579 | false | false | false |
LarryHillyer/PoolHost | PoolHost/env/Lib/site-packages/django/core/handlers/base.py | 96 | 10769 | from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.db import connections, transaction
from django.urls import get_resolver, get_urlconf, set_urlconf
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.module_loading import import_string
from .exception import (
convert_exception_to_response, get_exception_response,
handle_uncaught_exception,
)
logger = logging.getLogger('django.request')
class BaseHandler(object):
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
self._middleware_chain = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE (or the deprecated
MIDDLEWARE_CLASSES).
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._request_middleware = []
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
if settings.MIDDLEWARE is None:
warnings.warn(
"Old-style middleware using settings.MIDDLEWARE_CLASSES is "
"deprecated. Update your middleware and use settings.MIDDLEWARE "
"instead.", RemovedInDjango20Warning
)
handler = convert_exception_to_response(self._legacy_get_response)
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
self._request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
else:
handler = convert_exception_to_response(self._get_response)
for middleware_path in reversed(settings.MIDDLEWARE):
middleware = import_string(middleware_path)
try:
mw_instance = middleware(handler)
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if mw_instance is None:
raise ImproperlyConfigured(
'Middleware factory %s returned None.' % middleware_path
)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.insert(0, mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.append(mw_instance.process_template_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.append(mw_instance.process_exception)
handler = convert_exception_to_response(mw_instance)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._middleware_chain = handler
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests:
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
return get_exception_response(request, resolver, status_code, exception, self.__class__)
def get_response(self, request):
"""Return an HttpResponse object for the given HttpRequest."""
# Setup default url resolver for this thread
set_urlconf(settings.ROOT_URLCONF)
response = self._middleware_chain(request)
# This block is only needed for legacy MIDDLEWARE_CLASSES; if
# MIDDLEWARE is used, self._response_middleware will be empty.
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
except Exception: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)):
response = response.render()
if response.status_code == 404:
logger.warning(
'Not Found: %s', request.path,
extra={'status_code': 404, 'request': request},
)
return response
def _get_response(self, request):
"""
Resolve and call the view, then apply view, exception, and
template_response middleware. This method is everything that happens
inside the request/response middleware.
"""
response = None
if hasattr(request, 'urlconf'):
urlconf = request.urlconf
set_urlconf(urlconf)
resolver = get_resolver(urlconf)
else:
resolver = get_resolver()
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError(
"The view %s.%s didn't return an HttpResponse object. It "
"returned None instead." % (callback.__module__, view_name)
)
# If the response supports deferred rendering, apply template
# response middleware and then render the response
elif hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__)
)
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""Allow subclasses to override uncaught exception handling."""
return handle_uncaught_exception(request, resolver, exc_info)
def _legacy_get_response(self, request):
"""
Apply process_request() middleware and call the main _get_response(),
if needed. Used only for legacy MIDDLEWARE_CLASSES.
"""
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
response = self._get_response(request)
return response
| gpl-3.0 | -3,024,721,698,968,874,500 | 42.076 | 107 | 0.597734 | false | 5.015836 | false | false | false |
wkcn/mobula | tests/test_ops/test_mul.py | 1 | 1768 | import mobula as M
import mobula.layers as L
import numpy as np
def test_multiply():
N, C, H, W = 2,3,4,5
a = np.random.random((N, C, H, W))
b = np.random.random((N, C, H, W))
[la, lb] = L.Data([a,b])
w = la * lb
w.reshape()
assert w.Y.shape == a.shape
w.forward()
w.dY = np.random.random(w.Y.shape)
w.backward()
assert np.allclose(np.multiply(a, b), w.Y)
assert np.allclose(w.dX[0], np.multiply(w.dY, w.X[1]))
assert np.allclose(w.dX[1], np.multiply(w.dY, w.X[0]))
def test_mul():
N, C, H, W = 2,3,4,5
a = np.random.random((N, C, H, W))
data = L.Data(a)
l = data * 3
assert type(l) == L.MultiplyConstant
assert np.allclose(a * 3, l.eval())
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(l.dX, 3 * l.dY)
l = 3 * data
assert type(l) == L.MultiplyConstant
assert np.allclose(a * 3, l.eval())
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(l.dX, 3 * l.dY)
def test_matmul():
R, N, K = 3,4,5
a = np.random.random((R, N))
b = np.random.random((N, K))
[la, lb] = L.Data([a,b])
l = L.MatMul([la, lb])
assert np.allclose(l.eval(), np.dot(a, b))
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(l.dX[0], np.dot(l.dY, b.T))
assert np.allclose(l.dX[1], np.dot(a.T, l.dY))
# test constant
lac = M.dot(la, b)
lbc = M.dot(a, lb)
assert np.allclose(lac.eval(), np.dot(a, b))
assert np.allclose(lbc.eval(), np.dot(a, b))
lac.dY = np.random.random(lac.Y.shape)
lbc.dY = np.random.random(lbc.Y.shape)
lac.backward()
lbc.backward()
assert np.allclose(lac.dX, np.dot(lac.dY, b.T))
assert np.allclose(lbc.dX, np.dot(a.T, lbc.dY))
| mit | 4,058,628,900,790,961,700 | 29.482759 | 58 | 0.567873 | false | 2.392422 | false | false | false |
alej0varas/django-o2o_tagging | o2o_tagging/tests/tests_views.py | 1 | 4834 | from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import TestCase
# from mock import call
from mock_django.http import MockHttpRequest
from mock_django.signals import mock_signal_receiver
from ..models import O2OTag
from ..signals import o2o_tags_created
from ..views import TagCreateView
from ..views import TagsCreateView
from .models import Tagged
from .models import TaggedIn
from .models import Tagger
class TagCreateViewTest(TestCase):
def setUp(self):
self.tagger = Tagger.objects.create()
self.tagger_content_type = ContentType.objects.get_for_model(Tagger)
self.tagged = Tagged.objects.create()
self.tagged_content_type = ContentType.objects.get_for_model(Tagged)
self.tagged_in = TaggedIn.objects.create()
self.tagged_in_content_type = ContentType.objects.get_for_model(
TaggedIn)
self.data = {'tagged_object_id': self.tagged.pk,
'tagged_content_type': self.tagged_content_type.pk,
'tagged_in_object_id': self.tagged_in.pk,
'tagged_in_content_type': self.tagged_in_content_type.pk,
}
self.url = reverse('o2o_taggin_tag_create')
def test_tag_create(self):
request = MockHttpRequest(POST=self.data)
request.user = self.tagger
response = TagCreateView.as_view()(request)
tag = O2OTag.objects.get()
self.assertEqual(201, response.status_code)
self.assertEqual(self.tagger, tag.tagger)
self.assertEqual(self.tagged, tag.tagged)
self.assertEqual(self.tagged_in, tag.tagged_in)
def test_tag_create__form_invalid(self):
request = MockHttpRequest(POST={})
request.user = self.tagger
response = TagCreateView.as_view()(request)
tags = O2OTag.objects.all()
self.assertEqual(400, response.status_code)
self.assertEqual(0, tags.count())
class TagsCreateViewTest(TestCase):
def setUp(self):
self.tagger_content_type = ContentType.objects.get_for_model(Tagger)
self.tagged_content_type = ContentType.objects.get_for_model(Tagged)
self.tagged_in_content_type = ContentType.objects.get_for_model(
TaggedIn)
self.tagger = Tagger.objects.create()
self.tagged = Tagged.objects.create()
self.tagged1 = Tagged.objects.create()
self.tagged_in = TaggedIn.objects.create()
self.data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-MAX_NUM_FORMS': u'',
'form-0-tagged_object_id': self.tagged.pk,
'form-0-tagged_content_type': self.tagged_content_type.pk,
'form-0-tagged_in_object_id': self.tagged_in.pk,
'form-0-tagged_in_content_type': self.tagged_in_content_type.pk,
'form-1-tagged_object_id': self.tagged1.pk,
'form-1-tagged_content_type': self.tagged_content_type.pk,
'form-1-tagged_in_object_id': self.tagged_in.pk,
'form-1-tagged_in_content_type': self.tagged_in_content_type.pk,
}
self.url = reverse('o2o_taggin_tag_create_multiple')
def test_create__tagger_is_request_user(self):
request = MockHttpRequest(POST=self.data)
request.user = self.tagger
response = TagsCreateView.as_view()(request)
tags = O2OTag.objects.all()
self.assertEqual(201, response.status_code)
self.assertEqual(2, tags.count())
for t in tags:
self.assertEqual(self.tagger, t.tagger)
def test_create__call_tags_created_signal(self):
from mock_django.http import MockHttpRequest
request = MockHttpRequest(POST=self.data)
request.user = self.tagger
with mock_signal_receiver(o2o_tags_created) as tags_created_receiver:
TagsCreateView.as_view()(request)
self.assertTrue(tags_created_receiver.called)
# this fail assertion is failing but must be correct
# self.assertEqual(tags_created_receiver.call_args_list, [
# call(signal=o2o_tags_created, sender=TagsCreateView, instances=tags),
# ])
def test_tag_create__form_invalid(self):
data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-MAX_NUM_FORMS': u'',
'form-0-tagged_object_id': self.tagged.pk,
'form-0-tagged_content_type': self.tagged_content_type.pk,
}
request = MockHttpRequest(POST=data)
request.user = self.tagger
response = TagsCreateView.as_view()(request)
tags = O2OTag.objects.all()
self.assertEqual(400, response.status_code)
self.assertEqual(0, tags.count())
| gpl-3.0 | -1,036,438,399,070,563,200 | 37.365079 | 87 | 0.636947 | false | 3.637321 | true | false | false |
almccon/mapgardening | blankspot_stats.py | 1 | 1366 | #! /usr/bin/env python
"""
Calculate statistics for each study area, and prints results to stdout.
All it prints is the number of blankspots, the number of v1 nodes,
and the number of total nodes. Since I am no longer storing the blankspot
information in the hist_point table itself, these stats are no longer very informative.
Currently, user_analysis.py does what this script used to do. It prints the "output_totals_*"
files which contain the stats for each study area by date.
"""
import MapGardening
import optparse
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--place', '-p',
default="all"
)
options, arguments = p.parse_args()
possible_tables = [
'blankspots_1000_b',
]
if options.place == "all":
places = MapGardening.get_all_places()
else:
placename = options.place
place = MapGardening.get_place(placename)
places = {placename: place}
MapGardening.init_logging()
for placename in places.keys():
print "printing blankspot info for", placename
MapGardening.init_db(places[placename]['dbname'])
for table in possible_tables:
nodetable = MapGardening.NodeTable(table) # Table may not exist, but object will still be created
nodetable.get_blankspot_stats()
MapGardening.disconnect_db() | mit | -1,744,390,772,079,783,700 | 26.34 | 105 | 0.686676 | false | 3.585302 | false | false | false |
theodric/nsclock | dev/oled/nsoled1.py | 1 | 8141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/theodric/nsclock
#theodric 20170717
import os
import sys
import time
import requests
import xmltodict
import argparse
from collections import OrderedDict
from demo_opts import get_device
from luma.core.legacy import show_message
from luma.core.legacy.font import proportional, SINCLAIR_FONT
## This script makes use of the NS API, documented extensively here:
## http://www.ns.nl/en/travel-information/ns-api
## The below imports settings.py, which contains your NS login and API key.
## You can sign up for this key at http://www.ns.nl/ews-aanvraagformulier/
## settings.py must be created in the same directory as this script. Format:
############################################################################
## username = '[email protected]'
## apikey = 'abyC7M5QqRUXrt1ttyf4rtD-mttw4nkn0zzl35rkGJnMj1zznIppl3'
############################################################################
try:
import settings
except ImportError:
print('Copy settings_example.py to settings.py and set the configuration to your own credentials')
sys.exit(1)
## CONFIGURABLE ITEM
## Hardcode your default DESTINATION stations here.
## Look up what your destination stations' long and short names
## are by searching the official station list:
## http://webservices.ns.nl/ns-api-stations-v2
#startStation = not configured here!
destStation1 = "Den Helder"
destStation2 = "Schagen"
## There are two destinations that get me to my target station,
## so I'm checking for both, but you can just uncomment the
## next line if you only need to watch one destination.
#destStation2 = destStation1
## CONFIGURABLE ITEM
## the station=<VALUE> at the end of the URL is your start station
## Look up the short code for your station in the above-referenced
## station list.
## This block retrieves the current station list to /tmp/trains.xml
response = requests.get('http://webservices.ns.nl/ns-api-avt?station=asd',
auth=requests.auth.HTTPBasicAuth(
settings.username,
settings.apikey), stream=True)
with open('/tmp/trains.xml', 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
## Define the OLED device to write to using the setup routine
device = get_device()
msg = "a"
show_message(device, msg, fill="white", font=proportional(SINCLAIR_FONT))
time.sleep(1)
## The below block reads the just-written XML file
with open('/tmp/trains.xml') as fd:
doc = xmltodict.parse(fd.read(), xml_attribs=True)
iterCount = 0
numDisplayed = 0
## Figure out how many trains are departing from your start station $
## the time the script is run. $
departingTrainsCount = len(doc['ActueleVertrekTijden']['VertrekkendeTrein']) $
## Then use that to feed the iterator so we don`t have an $
## underrun or miss any. $
if args.content:
for iterCount in range(departingTrainsCount):
## I'm only grabbing the end station, departure time, and
## departure platform at start station to display.
## There are more things you can retrieve-- paw through trains.xml
## +read xmltodict docs to understand how to retrieve them.
## I found this page useful:
## http://omz-software.com/pythonista/docs/ios/xmltodict.html
dest = doc['ActueleVertrekTijden']['VertrekkendeTrein'][iterCount]['EindBestemming']
time = doc['ActueleVertrekTijden']['VertrekkendeTrein'][iterCount]['VertrekTijd']
plat = doc['ActueleVertrekTijden']['VertrekkendeTrein'][iterCount]['VertrekSpoor']['#text']
spc = " "
print(dest + spc + time[11:16] + spc + plat) ## print each row on CLI
## CONFIGURABLE ITEM
## Currently the script outputs the next four trains matching your
## destination. Reduce the max value on both below checks of
## numDisplayed to get fewer results.
if (dest == destStation1 and numDisplayed <= 3) or (dest == destStation2 and numDisplayed <= 3):
## Shortening names to 3-letter codes to fit screen.
## I *may* automate and elegantize this later.
if dest == "Schagen":
dest = "SGN"
print("!! HIT") ## flagging matches on CLI for debug
elif dest == "Den Helder":
dest = "HLD"
print("!! HIT") ## flagging matches on CLI for debug
## save each extracted row to its own variable because
## I can't quite grasp how to do this better.
if numDisplayed == 0:
## chars [11:16] is where the time lives.
## the raw var contains e.g.
## 2017-07-01T21:07:00+0200
disp = dest + spc + time[11:16] + spc + "Spoor " + plat
elif numDisplayed == 1:
disp2 = dest + spc + time[11:16] + spc + "Spoor " + plat
elif numDisplayed == 2:
disp3 = dest + spc + time[11:16] + spc + "Spoor " + plat
elif numDisplayed == 3:
disp4 = dest + spc + time[11:16] + spc + "Spoor " + plat
numDisplayed += 1
#initialize screen buffer var "text" without displaying anything
text = PapirusTextPos(False, rotation=args.rotation)
#Append the first bit of text to the screen buffer, top centered.
#X position 12, Y position 0, font size 13, Id="Header"
#text.AddText("Text", Xpos, Xpos, fontSize, Id="freeformElementID")
text.AddText("Vertrek van de treinen", 12, 0, 13, Id="Header")
text.AddText(disp, 0, 19, 18, Id="opt1")
## The next three stanzas are merely an attempt to gracefully
## handle fewer than the maximum allowed number of results.
## The results, if they exist, are presented roughly centered
## in a stack starting from the top, as you can see from the
## increasing Y values in text.AddText.
try:
disp2
except NameError:
disp2_exists = False
else:
disp2_exists = True
if disp2_exists == True:
text.AddText(disp2, 0, 39, 18, Id="opt2")
try:
disp3
except NameError:
disp3_exists = False
else:
disp3_exists = True
if disp3_exists == True:
text.AddText(disp3, 0, 59, 18, Id="opt3")
try:
disp4
except NameError:
disp4_exists = False
else:
disp4_exists = True
if disp4_exists == True:
text.AddText(disp4, 0, 79, 18, Id="opt4")
#if __name__ == "__main__":
# try:
# main()
# except KeyboardInterrupt:
# pass
| lgpl-3.0 | -7,173,653,490,440,075,000 | 47.171598 | 178 | 0.512959 | false | 4.37453 | false | false | false |
jiakai-lian/XcodeProjectAutoConfigurator | xcode_auto_configurator.py | 1 | 2710 | #The MIT License (MIT)
#
#Copyright (c) 2015 Jiakai Lian
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#!/usr/bin/python
from mod_pbxproj import XcodeProject
import sys
import json
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
class Configuration:
def __init__(self,jsonFileName):
self.jsonFileName = jsonFileName
#find config name
self.name = jsonFileName.split(".")[0].lower()
#load json data
with open(jsonFileName) as data_file:
self.jsonContent = json.load(data_file)
if len(sys.argv) < 2:
raise Exception("need project.pbxproj file path")
#read the file path
filePath = sys.argv[1]
if len(sys.argv) > 2:
jsonFiles = list(sys.argv)
del jsonFiles[0:2]
else:
jsonFiles = ["debug.json","release.json"]
print jsonFiles
#create configuration objects
dictOfConfig = dict();
for file in jsonFiles:
config = Configuration(file)
dictOfConfig[config.name] = config
#load project file and create a backup
project = XcodeProject.Load(filePath)
project.backup()
rootObject = project["rootObject"]
projectObject = project["objects"][rootObject]["buildConfigurationList"]
for id in project["objects"][projectObject]["buildConfigurations"]:
name = project["objects"][id]["name"].lower()
#if this configuration need to be changed
if dictOfConfig[name] is not None:
entry = project["objects"][id]["buildSettings"]
#for each setting in the json, apply to the target entry
for key in dictOfConfig[name].jsonContent:
entry[key] = dictOfConfig[name].jsonContent[key]
project.save()
print "Auto Configuration Complete" | mit | -900,034,453,004,515,700 | 31.662651 | 79 | 0.726568 | false | 3.967789 | true | false | false |
jannon/django-allauth-api | src/allauth_api/account/rest_framework/adapter.py | 1 | 7198 | from email.mime.image import MIMEImage
from email.utils import make_msgid
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from PIL import Image
from allauth.account import app_settings
from allauth.utils import get_user_model
from allauth_api.settings import allauth_api_settings
class AccountAdapterMixin(object):
def new_user_response_data(self, user, request=None):
serializer_class = self.new_user_serializer_class(user)
return_data = None
if serializer_class:
return_data = serializer_class(instance=user, context={'request': request}).data
return return_data
def new_user_serializer_class(self, user):
return None
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen. This copies most of the code from the django-allauth
DefaultAccountAdapter, but adds support for the CASE_INSENTIVE_IDS setting because the PRESERVE_USERNAME_CASING
setting in allauth, does not allow you to preserve the username case but check against it in a case-insensitive way
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [ub.lower()
for ub in app_settings.USERNAME_BLACKLIST]
if username.lower() in username_blacklist_lower:
raise forms.ValidationError(
self.error_messages['username_blacklisted'])
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
user_model = get_user_model()
username_field = app_settings.USER_MODEL_USERNAME_FIELD
error_message = user_model._meta.get_field(
username_field).error_messages.get('unique')
if not error_message:
error_message = self.error_messages['username_taken']
raise forms.ValidationError(error_message)
return username
def login(self, request, user):
super(AccountAdapterMixin, self).login(request, user)
return {'detail': 'User logged in.'}
def add_message(self, request, level, message_template, message_context=None, extra_tags=''):
if allauth_api_settings.USE_DJANGO_MESSAGES:
super(AccountAdapterMixin, self).add_message(request, level, message_template, message_context, extra_tags)
def email_confirmation_key(self, request):
return request.data.get('key', None)
def email_confirmation_response_data(self, confirmation):
return {'detail': '%s %s' % (confirmation.email_address.email, _("confirmed"))}
def reset_password_confirmation_data(self, request):
return {
'uidb36': request.data.get('uidb36', None),
'key': request.data.get('key', None),
'password1': request.data.get('password1', None),
'password2': request.data.get('password2', None),
}
def reset_password_confirmation_form_kwargs(self, request):
return {}
def reset_password_confirmation_response_data(self, user):
return {'detail': _("User password changed")}
class ImageKeyMixin(object):
"""
A mixin class for an account adapter that enables sending and receiving images for email validation
and password reset keys.
"""
def render_mail(self, template_prefix, email, context):
"""
Overrides to catch the prefixes for email confirmation and password reset and render html
emails with image-based keys
"""
if template_prefix not in allauth_api_settings.IMAGE_KEY_PREFIXES:
return super(ImageKeyMixin, self).render_mail(template_prefix, email, context)
# Create an image key
gc = allauth_api_settings.IMAGE_KEY_GENERATOR_CLASS
generator = gc()
key = self.get_key_from_context(template_prefix, context)
image = generator.create_image_key(key)
key_cid = make_msgid()
context['key_cid'] = key_cid[1:-1] # trim angle brackets
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
# We require both html and text templates
raise ImproperlyConfigured('Both text and html templates must exist to use ImageKeyMixin')
msg = EmailMultiAlternatives(subject, bodies['txt'], settings.DEFAULT_FROM_EMAIL, [email])
msg.attach_alternative(bodies['html'], 'text/html')
img = MIMEImage(image.read())
img.add_header('Content-ID', key_cid)
img.add_header('Content-Disposition', 'inline')
# msg.attach('key.png', image.read(), 'image/png')
msg.attach(img)
image.close()
return msg
def get_key_from_context(self, template_prefix, context):
result = ""
if 'email_confirmation' in template_prefix:
result = context['key']
elif 'password_reset'in template_prefix:
result = context['password_reset_url'].split('/')[-2]
return result
def reset_password_confirmation_data(self, request):
data = {
'password1': request.data.get('password1', None),
'password2': request.data.get('password2', None),
}
key_image = request.data.get('key', None)
if key_image:
try:
image = Image.open(key_image)
key_text = image.text.get('key', None)
image.close()
except:
key_text = key_image # Fall back on single text key
if key_text:
i = key_text.index('-')
data['uidb36'] = key_text[0:i]
data['key'] = key_text[i + 1:]
return data
def email_confirmation_key(self, request):
key = None
key_image = request.data.get('key', None)
if key_image:
try:
key = Image.open(key_image).text.get('key', None)
key_image.close()
except:
key = key_image # Fall back on text key
return key
| bsd-2-clause | 1,554,501,400,920,103,400 | 40.367816 | 123 | 0.620033 | false | 4.281975 | false | false | false |
liguangyulgy/cupQrSys | common/redis.py | 1 | 4521 | __author__ = 'LiGuangyu'
import aioredis
import asyncio,functools
from concurrent.futures import CancelledError
import logging;logging.basicConfig(level=logging.INFO)
from common.comm import singleton
import time
def handler(loop, context):
'''
本来打算在eventloop上注册异常处理函数,但是没玩明白,后续研究
'''
print('in exception Handler')
print(context)
class baseConnectPool(object):
_pool1 = None
_pool2 = None
pool = None
@classmethod
async def init(cls,loop=None,addr='127.0.0.1',port=6379,password=None):
# if not loop:
# loop = asyncio.get_event_loop()
# loop.set_exception_handler(handler=handler)
try:
baseConnectPool._pool1 = await aioredis.create_pool((addr,port),loop=loop,password=password,encoding='utf-8',minsize=1,maxsize=1)
baseConnectPool._pool2 = await aioredis.create_pool((addr,port),loop=loop,password=password,encoding='utf-8',minsize=1,maxsize=1)
baseConnectPool.pool = baseConnectPool._pool1
print('hello')
except ConnectionRefusedError as e:
print('Redis Cannot access')
raise e
except Exception as e:
print('Error')
print(e)
pass
@classmethod
def tryCatch(cls,func):
@functools.wraps(func)
async def wapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except aioredis.errors.ReplyError as err:
'''redis返回err信息,则进入此处'''
print('Reply Error Catched')
raise err
except CancelledError as err:
'''当单条连接连接断开时,进入此处异常,
如果使用单连接,此处可进行高可用处理
如果使用连接池,在下面判断'''
print('hello world')
except ConnectionRefusedError as err:
'''重连失败进入此处异常
如果使用连接池,在此处进行高可用
判断pool.size是否为0(池内连接数量)来判断是否单个Redis挂掉'''
print(cls._pool1)
print(cls._pool2)
print('connect Refused')
except Exception as err:
print(type(err))
print(err)
return wapper
class redis(baseConnectPool):
ORDERNOKEY = 'ORDERNOKEY'
def __init__(self):
pass
@classmethod
async def init(cls,loop=None,addr='127.0.0.1',port=6379,password=None):
a = super()
print(a)
await super().init(loop=loop,addr=addr,port=port,password=password)
@classmethod
@baseConnectPool.tryCatch
async def incr(cls,key):
with await cls.pool as rdsConn:
return await rdsConn.incr(key)
@classmethod
@baseConnectPool.tryCatch
async def set(cls,key,value):
with await cls.pool as rdsConn:
return await rdsConn.set(key,value)
@classmethod
@baseConnectPool.tryCatch
async def get(cls,key):
with await cls.pool as rdsConn:
return await rdsConn.get(key)
@classmethod
@baseConnectPool.tryCatch
async def getOrderId(cls):
with await cls.pool as rdsConn:
tr = rdsConn.multi_exec()
tr.setnx(cls.ORDERNOKEY, 1000000000000)
tr.incr(cls.ORDERNOKEY)
rev = await tr.execute()
if rev[1] > 9000000000000:
redis.incr(cls.ORDERNOKEY, - 8000000000000)
return rev[1]
@classmethod
@baseConnectPool.tryCatch
async def getTime(cls):
with await cls.pool as rdsConn:
tt = await rdsConn.time()
x = time.localtime(tt)
rev = time.strftime('%Y%m%d%H%M%S', x)
return rev
@classmethod
@baseConnectPool.tryCatch
async def getTime1(cls):
with await cls.pool as rdsConn:
return await rdsConn.time()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.set_exception_handler(handler=handler)
async def test():
await redis.init()
a = 0
print(await redis.set('test',0))
print(await redis.getTime1())
start_time = time.time()
while True:
a+=1
await redis.incr('test')
print('Time Used %s' % (time.time() - start_time))
loop.run_until_complete(test())
| gpl-3.0 | 1,745,043,486,204,156,400 | 28.143836 | 141 | 0.583314 | false | 3.371632 | false | false | false |
ashutrix03/inteygrate_flaskapp-master | build/lib/yowsup/layers/protocol_groups/layer.py | 18 | 6769 | from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity
from yowsup.layers.protocol_iq.protocolentities.iq_result import ResultIqProtocolEntity
from .protocolentities import *
import logging
logger = logging.getLogger(__name__)
class YowGroupsProtocolLayer(YowProtocolLayer):
HANDLE = (
CreateGroupsIqProtocolEntity,
InfoGroupsIqProtocolEntity,
LeaveGroupsIqProtocolEntity,
ListGroupsIqProtocolEntity,
SubjectGroupsIqProtocolEntity,
ParticipantsGroupsIqProtocolEntity,
AddParticipantsIqProtocolEntity,
PromoteParticipantsIqProtocolEntity,
DemoteParticipantsIqProtocolEntity,
RemoveParticipantsIqProtocolEntity
)
def __init__(self):
handleMap = {
"iq": (None, self.sendIq),
"notification": (self.recvNotification, None)
}
super(YowGroupsProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Groups Iq Layer"
def sendIq(self, entity):
if entity.__class__ in self.__class__.HANDLE:
if entity.__class__ == SubjectGroupsIqProtocolEntity:
self._sendIq(entity, self.onSetSubjectSuccess, self.onSetSubjectFailed)
elif entity.__class__ == CreateGroupsIqProtocolEntity:
self._sendIq(entity, self.onCreateGroupSuccess, self.onCreateGroupFailed)
elif entity.__class__ == ParticipantsGroupsIqProtocolEntity:
self._sendIq(entity, self.onGetParticipantsResult)
elif entity.__class__ == AddParticipantsIqProtocolEntity:
self._sendIq(entity, self.onAddParticipantsSuccess, self.onAddParticipantsFailed)
elif entity.__class__ == PromoteParticipantsIqProtocolEntity:
self._sendIq(entity, self.onPromoteParticipantsSuccess, self.onPromoteParticipantsFailed)
elif entity.__class__ == DemoteParticipantsIqProtocolEntity:
self._sendIq(entity, self.onDemoteParticipantsSuccess, self.onDemoteParticipantsFailed)
elif entity.__class__ == RemoveParticipantsIqProtocolEntity:
self._sendIq(entity, self.onRemoveParticipantsSuccess, self.onRemoveParticipantsFailed)
elif entity.__class__ == ListGroupsIqProtocolEntity:
self._sendIq(entity, self.onListGroupsResult)
elif entity.__class__ == LeaveGroupsIqProtocolEntity:
self._sendIq(entity, self.onLeaveGroupSuccess, self.onLeaveGroupFailed)
elif entity.__class__ == InfoGroupsIqProtocolEntity:
self._sendIq(entity, self.onInfoGroupSuccess, self.onInfoGroupFailed)
else:
self.entityToLower(entity)
def onCreateGroupSuccess(self, node, originalIqEntity):
logger.info("Group create success")
self.toUpper(SuccessCreateGroupsIqProtocolEntity.fromProtocolTreeNode(node))
def onCreateGroupFailed(self, node, originalIqEntity):
logger.error("Group create failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onSetSubjectSuccess(self, node, originalIqEntity):
logger.info("Group subject change success")
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(node))
def onSetSubjectFailed(self, node, originalIqEntity):
logger.error("Group subject change failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onGetParticipantsResult(self, node, originalIqEntity):
self.toUpper(ListParticipantsResultIqProtocolEntity.fromProtocolTreeNode(node))
def onAddParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group add participants success")
self.toUpper(SuccessAddParticipantsIqProtocolEntity.fromProtocolTreeNode(node))
def onRemoveParticipantsFailed(self, node, originalIqEntity):
logger.error("Group remove participants failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onRemoveParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group remove participants success")
self.toUpper(SuccessRemoveParticipantsIqProtocolEntity.fromProtocolTreeNode(node))
def onPromoteParticipantsFailed(self, node, originalIqEntity):
logger.error("Group promote participants failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onPromoteParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group promote participants success")
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(node))
def onDemoteParticipantsFailed(self, node, originalIqEntity):
logger.error("Group demote participants failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onDemoteParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group demote participants success")
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(node))
def onAddParticipantsFailed(self, node, originalIqEntity):
logger.error("Group add participants failed")
self.toUpper(FailureAddParticipantsIqProtocolEntity.fromProtocolTreeNode(node))
def onListGroupsResult(self, node, originalIqEntity):
self.toUpper(ListGroupsResultIqProtocolEntity.fromProtocolTreeNode(node))
def onLeaveGroupSuccess(self, node, originalIqEntity):
logger.info("Group leave success")
self.toUpper(SuccessLeaveGroupsIqProtocolEntity.fromProtocolTreeNode(node))
def onLeaveGroupFailed(self, node, originalIqEntity):
logger.error("Group leave failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onInfoGroupSuccess(self, node, originalIqEntity):
logger.info("Group info success")
self.toUpper(InfoGroupsResultIqProtocolEntity.fromProtocolTreeNode(node))
def onInfoGroupFailed(self, node, originalIqEntity):
logger.error("Group info failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def recvNotification(self, node):
if node["type"] == "w:gp2":
if node.getChild("subject"):
self.toUpper(SubjectGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("create"):
self.toUpper(CreateGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("remove"):
self.toUpper(RemoveGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("add"):
self.toUpper(AddGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
| gpl-3.0 | 968,199,711,413,080,400 | 48.050725 | 105 | 0.724332 | false | 4.386909 | false | false | false |
substructural/deep-random-fields | modules/report.py | 1 | 13358 | #===================================================================================================
# report
#---------------------------------------------------------------------------------------------------
import inspect
import os
import os.path
import numpy
import labels
import output
from results import Images, Metrics, SegmentationResults
import ipdb
#---------------------------------------------------------------------------------------------------
def html( experiment_name,
epoch,
content,
page_foreground = '#000000',
page_background = '#ffffff',
image_table_foreground = '#ffffff',
image_table_background = '#000000' ):
return f'''
<html>
<style>
html
{{
width: 80%;
margin-left: auto;
margin-right: auto;
}}
body
{{
color: {page_foreground};
background-color: {page_background};
}}
table.metrictable
{{
width: 80%;
margin: 50px auto;
text-align: left;
}}
img
{{
margin: 25px 25px;
text-align: center;
vertical-align: center;
}}
table.imagetable
{{
width: 80%;
margin: 50px auto;
color: {image_table_foreground};
background-color: {image_table_background};
text-align: center;
}}
</style>
<body>
<h1>{experiment_name}</h1>
<h2>epoch: {epoch}</h2>
{content}
</body>
</html>
'''
#---------------------------------------------------------------------------------------------------
def source_section( source ):
return f'''
<hr>
<h2>definition</h2>
<pre>
{source}
</pre>
'''
#---------------------------------------------------------------------------------------------------
def metric_section(
metric_name,
statistics,
sample_images ):
value_table = statistics_table( statistics )
mean_image_table = image_table( 'mean ' + metric_name, sample_images.mean )
median_image_table = image_table( 'median ' + metric_name, sample_images.median )
minimum_image_table = image_table( 'minimum ' + metric_name, sample_images.minimum )
maximum_image_table = image_table( 'maximum ' + metric_name, sample_images.maximum )
return f'''
<hr>
<h2>{metric_name}</h2>
{value_table}
{mean_image_table}
{median_image_table}
{minimum_image_table}
{maximum_image_table}
'''
#---------------------------------------------------------------------------------------------------
def statistics_table( statistics ):
return f'''
<h3>overview</h3>
<table class="metrictable">
<tr> <th> metric </th> <th> value </th> </tr>
<tr> <td> mean </td> <td> {statistics.mean[0]:0.5} </td> </tr>
<tr> <td> median </td> <td> {statistics.median[0]:0.5} </td> </tr>
<tr> <td> minimum </td> <td> {statistics.minimum[0]:0.5} </td> </tr>
<tr> <td> maximum </td> <td> {statistics.maximum[0]:0.5} </td> </tr>
</table>
'''
#---------------------------------------------------------------------------------------------------
def image_table( table_name, sample_images ):
body = (
image_row( 'axial', sample_images[ 0 ] ) +
image_row( 'coronal', sample_images[ 1 ] ) +
image_row( 'sagittal', sample_images[ 2 ] ) )
return f'''
<h3>{table_name}</h3>
<table class="imagetable">
{body}
</table>
'''
def image_row( label_for_row, image_paths ):
image_cells = ''.join( [ image_cell( p ) for p in image_paths ] )
return f'''
<tr>
<td>{label_for_row}</td>
{image_cells}
</tr>
'''
def image_cell( image_path ):
return f'''
<td><image src="{image_path}"></td> '''
#---------------------------------------------------------------------------------------------------
def cost_table_row( epoch, phase, statistic ):
return f'''
<tr>
<td> {epoch} </td>
<td> {phase} </td>
<td> {statistic.mean:0.3} </td>
<td> {statistic.median:0.3} </td>
<td> {statistic.minimum:0.3} </td>
<td> {statistic.maximum:0.3} </td>
<td> {statistic.deviation:0.3} </td>
<td> {statistic.change_from_base:0.3} </td>
</tr>
'''
def cost_table( costs_per_epoch ):
rows = ''.join( [
cost_table_row( epoch, phase, cost )
for epoch, costs_per_phase in enumerate( costs_per_epoch )
for phase, cost in enumerate( costs_per_phase ) ] )
return f'''
<hr>
<h2>costs</h2>
<table class="metrictable">
<tr>
<th> epoch </th>
<th> phase </th>
<th> mean </th>
<th> median </th>
<th> minimum </th>
<th> maximum </th>
<th> deviation </th>
<th> change </th>
</tr>
{rows}
</table>
'''
#---------------------------------------------------------------------------------------------------
class SourceData( object ):
@staticmethod
def representative_volumes_for_metrics( metrics, dataset ):
indices = set( index for metric in metrics for statistic, index in metric )
volumes = { i: dataset.validation_set[ i ].read_volume() for i in indices }
return volumes
@staticmethod
def representative_distributions_and_offsets_for_metrics( metrics, results ):
indices = set( index for metric in metrics for statistic, index in metric )
distribution_and_offsets = {
i: results.predicted_distribution( i ) for i in indices }
for i in distribution_and_offsets:
_, offset = distribution_and_offsets[ i ]
assert offset[ 0 ] == i
distributions = { i : d for i, (d, o) in distribution_and_offsets.items() }
offsets = { i : o for i, (d, o) in distribution_and_offsets.items() }
return distributions, offsets
@staticmethod
def image_data_from_volumes( volumes, offsets, reconstructed_shape, margin ):
def extract( i ):
return Images.extract( volumes[i].images, offsets[i][1:], reconstructed_shape, margin )
return { i : extract( i ) for i in volumes }
@staticmethod
def reference_labels_from_volumes( volumes, offsets, reconstructed_shape, margin ):
def extract( i ):
return Images.extract( volumes[i].labels, offsets[i][1:], reconstructed_shape, margin )
return { i : extract( i ) for i in volumes }
@staticmethod
def predicted_labels_from_distributions( distributions ):
return { i : labels.dense_volume_distribution_to_dense_volume_indices( d )
for i, d in distributions.items() }
@staticmethod
def costs_for_epoch( epoch, archive ):
with archive.read_array_output( 'costs', epoch = epoch ) as data:
costs = data[ 'arr_0' ]
return costs
#---------------------------------------------------------------------------------------------------
class Report( object ):
@staticmethod
def results_only( epoch, experiment ):
definition = experiment.definition
results = SegmentationResults(
experiment.output_path, definition.experiment_id, epoch, definition.label_count )
results.restore( experiment.dataset, definition.sample_parameters, experiment.log )
results.persist()
@staticmethod
def generate( epoch, experiment ):
definition = experiment.definition
results = SegmentationResults(
experiment.output_path, definition.experiment_id, epoch, definition.label_count )
results.restore( experiment.dataset, definition.sample_parameters, experiment.log )
Report.write( results, experiment )
@staticmethod
def write( results, experiment ):
log = experiment.log
log.subsection( 'writing report' )
epoch = results.epoch
class_count = results.class_count
archive = results.archive
dataset = experiment.dataset
sample_parameters = experiment.definition.sample_parameters
reconstructed_shape = sample_parameters.reconstructed_shape
margin = sample_parameters.window_margin
log.entry( 'collating metrics' )
dice = results.statistics_for_mean_dice_score_per_volume
dice_per_class = [
results.statistics_for_dice_score_for_class( i )
for i in range( class_count ) ]
metrics = [ dice ] + dice_per_class
log.entry( 'loading data' )
volumes = SourceData.representative_volumes_for_metrics( metrics, dataset )
distributions, offsets = SourceData.representative_distributions_and_offsets_for_metrics(
metrics,
results )
log.entry( 'extracting data')
image_data = SourceData.image_data_from_volumes(
volumes, offsets, reconstructed_shape, margin )
reference = SourceData.reference_labels_from_volumes(
volumes, offsets, reconstructed_shape, margin )
predicted = SourceData.predicted_labels_from_distributions( distributions )
log.entry( 'generating source section' )
source_code = inspect.getsource( type( experiment.definition ) )
source = source_section( source_code )
log.entry( 'generating cost section' )
cost_data = SourceData.costs_for_epoch( epoch, results.archive )
costs = Metrics.costs_over_experiment( cost_data, phases = 10 )
section_for_costs = cost_table( costs )
log.entry( 'generating dice sections' )
section_for_all_classes = Report.section_for_all_classes(
dice, image_data, predicted, reference, results )
section_per_class = '\n'.join( [
Report.section_for_class(
c, dice_per_class[ c ], image_data, predicted, reference, results )
for c in range( class_count ) ] )
log.entry( 'combining sections' )
report_name = experiment.definition.experiment_name
sections = source + section_for_costs + section_for_all_classes + section_per_class
file_content = html( report_name, epoch, sections )
file_name = archive.saved_object_file_name( 'report', epoch = epoch ) + '.html'
log.entry( f'writing report to {file_name}' )
with open( file_name, 'w' ) as output_file:
output_file.write( file_content )
log.entry( 'done' )
return file_name
@staticmethod
def section_for_costs( costs ):
pass
@staticmethod
def section_for_all_classes( statistics, image_data, predicted, reference, results ):
name = f'dice over all classes'
method = Images.sample_difference_of_multiple_masks
return Report.section(
name, statistics, image_data, predicted, reference, method, results )
@staticmethod
def section_for_class( c, statistics_for_c, image_data, predicted, reference, results ):
name = f'dice for class {c}'
method = lambda i, p, r, n : Images.sample_difference_of_masks( i, p, r, n, c )
return Report.section(
name, statistics_for_c, image_data, predicted, reference, method, results )
@staticmethod
def section( name, statistics, image_data, predicted, reference, sample_method, results ):
names = statistics._fields
images_per_statistic = [
sample_method(
image_data[ volume_index ],
predicted[ volume_index ],
reference[ volume_index ],
results.class_count )
for value, volume_index in statistics ]
image_file_names_per_statistic = [
Report.save_sample_images_for_statistic(
images,
f'{name}-{names[ statistic ]}',
results.archive )
for statistic, images in enumerate( images_per_statistic ) ]
samples_indexed_by_statistic_name = Images.Samples( **{
names[ statistic ] : image_file_names
for statistic, image_file_names in enumerate( image_file_names_per_statistic ) } )
return metric_section( name, statistics, samples_indexed_by_statistic_name )
@staticmethod
def save_sample_images_for_statistic( sample_images_for_statistic, section_name, archive ):
axis_count = len( Images.Axes )
position_count = len( Images.SamplePositions )
assert sample_images_for_statistic.shape[ 0 : 2 ] == ( axis_count, position_count )
prefix = 'report-' + section_name.replace( ' ', '_' )
file_names = (
[ [ archive.saved_object_file_name( prefix, f'{axis.name}-{position.name}.png' )
for position in Images.SamplePositions ]
for axis in Images.Axes ] )
relative_file_names = (
[ [ os.path.basename( file_name ) for file_name in row ] for row in file_names ] )
for i in range( axis_count ):
for j in range( position_count ):
if os.path.exists( file_names[ i ][ j ] ):
os.remove( file_names[ i ][ j ] )
Images.save_image( sample_images_for_statistic[ i ][ j ], file_names[ i ][ j ] )
return relative_file_names
#---------------------------------------------------------------------------------------------------
| gpl-3.0 | -5,500,868,599,115,104,000 | 26.656315 | 100 | 0.542896 | false | 4.041755 | false | false | false |
farzadghanei/distutilazy | distutilazy/clean.py | 1 | 7976 | """
distutilazy.clean
-----------------
command classes to help clean temporary files
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
from __future__ import absolute_import
import os
from shutil import rmtree
from distutils import log
from distutils.core import Command
from distutils.command import clean
from . import util
__version__ = "0.4.0"
class BaseFileSystemCleanerCommand(Command):
@staticmethod
def default_extensions(cls):
return []
@staticmethod
def default_directories(cls):
return []
def initialize_options(self):
self.root = os.getcwd()
self.extensions = ','.join(self.default_extensions())
self.directories = ','.join(self.default_directories())
def finalize_options(self):
if not os.path.exists(self.root):
raise IOError("Failed to access root path '{}'".format(self.root))
self.extensions = [ext.strip() for ext in self.extensions.split(',')]
self.directories = [
dir_.strip() for dir_ in self.directories.split(',')]
def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files
def _find_directories(self):
directories = []
for dir_name in self.directories:
dirs = util.find_directories(self.root, dir_name)
log.debug("found {} directories in '{}'".format(
len(dirs), self.root))
directories.extend(dirs)
return directories
def _clean_file(self, filename):
"""Clean a file if exists and not in dry run"""
if not os.path.exists(filename):
return
self.announce("removing '{}'".format(filename))
if not self.dry_run:
os.remove(filename)
def _clean_directory(self, name):
"""Clean a directory if exists and not in dry run"""
if not os.path.exists(name):
return
self.announce(
"removing directory '{}' and all its contents".format(name)
)
if not self.dry_run:
rmtree(name, True)
class CleanPyc(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied python files"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return [".pyc", ".pyo", ".pyd"]
@staticmethod
def default_directories():
return ["__pycache__"]
def find_compiled_files(self):
"""Find compiled Python files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled python files in '{}'".format(
len(files), self.root
)
)
return files
def find_cache_directories(self):
directories = self._find_directories()
self.announce(
"found {} python cache directories in '{}'".format(
len(directories), self.root
)
)
return directories
def run(self):
directories = self.find_cache_directories()
if directories:
self.announce(
"cleaning python cache directories in '{}' ...".format(
self.root))
if not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self.find_compiled_files()
if files:
self.announce(
"cleaning compiled python files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanJythonClass(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied files created by Jython"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return ["$py.class"]
@staticmethod
def default_directories():
return []
def find_class_files(self):
"""Find compiled class files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled class files in '{}'".format(
len(files), self.root
)
)
return files
def run(self):
files = self.find_class_files()
if files:
self.announce(
"cleaning compiled class files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanAll(clean.clean, BaseFileSystemCleanerCommand):
description = "Clean root dir from temporary files (complied files, etc)"
user_options = [
("keep-build", None, "do not clean build directory"),
("keep-dist", None, "do not clean dist directory"),
("keep-egginfo", None, "do not clean egg info directory"),
("keep-extra", None, "do not clean extra files"),
]
boolean_options = ["keep-build", "keep-dist", "keep-egginfo", "keep-extra"]
@staticmethod
def default_extensions():
return CleanPyc.default_extensions() + \
CleanJythonClass.default_extensions()
@staticmethod
def default_directories():
return CleanPyc.default_directories() + \
CleanJythonClass.default_directories()
def initialize_options(self):
clean.clean.initialize_options(self)
BaseFileSystemCleanerCommand.initialize_options(self)
self.keep_build = None
self.keep_dist = None
self.keep_egginfo = None
self.keep_extra = None
def finalize_options(self):
clean.clean.finalize_options(self)
BaseFileSystemCleanerCommand.finalize_options(self)
self.all = True
def get_egginfo_dir(self):
return self.distribution.metadata.get_name() + ".egg-info"
def get_extra_paths(self):
"""Return list of extra files/directories to be removed"""
return []
def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name)
def clean_dist(self):
self._clean_directory(os.path.join(self.root, "dist"))
def clean_build(self):
self._clean_directory(os.path.join(self.root, "build"))
def clean_extra(self):
"""Clean extra files/directories specified by get_extra_paths()"""
extra_paths = self.get_extra_paths()
for path in extra_paths:
if not os.path.exists(path):
continue
if os.path.isdir(path):
self._clean_directory(path)
else:
self._clean_file(path)
def run(self):
clean.clean.run(self)
if not self.keep_build:
self.clean_build()
if not self.keep_egginfo:
self.clean_egginfo()
if not self.keep_dist:
self.clean_dist()
if not self.keep_extra:
self.clean_extra()
directories = self._find_directories()
if directories and not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self._find_files()
if files and not self.dry_run:
for filename in files:
self._clean_file(filename)
clean_pyc = CleanPyc
clean_all = CleanAll
| mit | -5,143,906,356,079,670,000 | 29.559387 | 79 | 0.57836 | false | 4.29973 | false | false | false |
IMIO/django-fixmystreet | django_fixmystreet/middleware/smtpforward.py | 2 | 4934 | #-*- coding: utf-8 -*-
"""SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
from django.utils.encoding import force_bytes
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, **kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
if username is None:
self.username = settings.EMAIL_HOST_USER
else:
self.username = username
if password is None:
self.password = settings.EMAIL_HOST_PASSWORD
else:
self.password = password
if use_tls is None:
self.use_tls = settings.EMAIL_USE_TLS
else:
self.use_tls = use_tls
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
self.connection = smtplib.SMTP(self.host, self.port,
local_hostname=DNS_NAME.get_fqdn())
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else 'utf-8'
# tricky-stuff
message = email_message.message().as_string()
message = message.replace('<p>Bonjour,</p>', '<p>ATTENTION, UN TRICKY STUFF EST UTILISÉ (smtpforward.EmailBackend).<br/>CET EMAIL ÉTAIT CENSÉ ÊTRE ENVOYÉ À : <strong>%s</strong></p><p>Bonjour,</p>' %recipients)
message = message.replace('Bonjour,\r\n\n\n', 'ATTENTION, UN TRICKY STUFF EST UTILISÉ (smtpforward.EmailBackend)\r\n\n\nCET EMAIL ÉTAIT CENSÉ ÊTRE ENVOYÉ À : %s\r\n\n\nBonjour,\r\n\n\n' %recipients)
to_list = settings.TO_LIST
try:
#self.connection.sendmail(from_email, recipients,
self.connection.sendmail(from_email, to_list,
force_bytes(message, charset))
except:
if not self.fail_silently:
raise
return False
return True
| agpl-3.0 | 2,438,398,094,984,649,700 | 38.063492 | 218 | 0.577001 | false | 4.265165 | false | false | false |
HPAC/TTC | ttc/transpose.py | 1 | 24710 | # This script generates high-performance C/C++ code for any given multi-dimensional transposition.
#
# Tensor-Contraction Compiler (TTC), copyright (C) 2015 Paul Springer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import copy
import time
import ttc_util
###################################
#
# This file generates transpositions of the form B_perm(I) = alpha * A_I + beta * B_perm(I)
#
###################################
class implementation:
def __init__(self, blocking, loopPerm, perm, size, alpha, beta, floatTypeA, floatTypeB,
optimization, scalar, prefetchDistance, microBlocking, reference,
architecture, parallelize):
self.registerSizeBits = microBlocking[0] * 8 * ttc_util.getFloatTypeSize(floatTypeA)
self.parallelize = parallelize
self.debug = 0
self.floatTypeA = floatTypeA
self.floatTypeB = floatTypeB
self.architecture = architecture
self.alpha = alpha
self.beta = beta
self.optimization = optimization #TODO remane optimization to streamingStores
self.prefetchDistance = prefetchDistance
self.microBlocking = microBlocking
self.numMicroBlocksPerBlock = blocking[0]/microBlocking[0] * blocking[1]/microBlocking[1]
self.reference = reference
self.size = copy.deepcopy(size)
self.scalar = scalar
if self.reference == 0:
self.blockA= blocking[0] #blocking in stride-1 indices of A
self.blockB= blocking[1] #blocking in stride-1 indices of B
else:
self.blockA= 1 #blocking in stride-1 indices of A
self.blockB= 1 #blocking in stride-1 indices of B
self.dim = len(perm)
self.perm = copy.deepcopy(perm)
self.loopPerm = copy.deepcopy(loopPerm)
self.indent = " "
self.cost = 0.0
self.code = ""
self.ldout = -1
for i in range(len(self.perm)):
if( self.perm[0] != 0 ):
if self.perm[i] == 0:
self.ldout = i
break;
else:
if self.perm[i] == 1:
self.ldout = i
break;
self.transposeMacroKernelname = "%sTranspose%dx%d"%(ttc_util.getFloatPrefix(self.floatTypeA, self.floatTypeB), self.blockA,self.blockB)
if( self.perm[0] == 0):
self.transposeMacroKernelname += "_0"
if( self.optimization != "" ):
self.transposeMacroKernelname += "_%s"%self.optimization
if( self.beta == 0 ):
self.transposeMacroKernelname += "_bz"
if( self.prefetchDistance > 0 ):
self.transposeMacroKernelname += "_prefetch_"+ str(self.prefetchDistance)
def getPrefetchDistance(self):
return self.prefetchDistance
def getLoopPerm(self):
return self.loopPerm
def getOffsetA(self,start = 0):
offset = ""
for i in range(start,self.dim):
offset += "i" + str(i)
if(i != 0):
offset += "*lda" + str(i)
if( i != self.dim-1):
offset += " + "
return offset
def getOffsetB(self, start = 0):
offset = ""
for i in range(start,self.dim):
#find idx idxPerm
invIdx = -1
for j in range(self.dim):
if self.perm[j] == i:
invIdx = j
offset += "i" + str(i)
if(invIdx != 0):
offset += "*ldb" + str(invIdx)
if( i != self.dim-1):
offset += " + "
return offset
def getUpdateString(self,indent):
outStr = "B[" + self.getOffsetB() + "]"
inStr = "A[" + self.getOffsetA() + "]"
ret = ""
if(self.beta != 0):
ret += "%s%s = alpha*%s + beta*%s;\n"%(indent + self.indent, outStr, inStr,outStr)
else:
ret += "%s%s = alpha*%s;\n"%(indent + self.indent, outStr, inStr)
return ret
def printScalarLoop(self, loopPerm, indent):
loopIdx = loopPerm[0]
increment = 1
if len(loopPerm) == 1:
if( self.optimization == "streamingstore" ):
self.code += "#pragma vector nontemporal\n"
self.code += "#pragma simd\n"
self.code += "%sfor(int i%d = 0; i%d < size%d; i%d += %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,increment)
if len(loopPerm) > 1:
self.printScalarLoop(loopPerm[1:], indent + self.indent)
else:#we reached the innermost loop, no recursion
#get input and output offsets correct
self.code += self.getUpdateString(indent)
def printRemainderLoop(self, loopPerm, indent, remainderIdx):
loopIdx = loopPerm[0]
increment = 1
if(loopIdx == remainderIdx):
self.code += "%sfor(int i%d = size%d - remainder%d; i%d < size%d; i%d += %d)\n"%(indent,loopIdx, loopIdx, loopIdx,loopIdx,loopIdx,loopIdx,increment)
else:
firstIdx = 0
if( self.perm[0] == 0 ):
firstIdx = 1
if( self.optimization == "streamingstore" and len(loopPerm) == 1 and self.perm[0] != 0):
self.code += "#pragma vector nontemporal\n"
if( remainderIdx == firstIdx and loopIdx == self.perm[firstIdx]):
self.code += "%sfor(int i%d = 0; i%d < size%d - remainder%d; i%d += %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,loopIdx,increment)
else:
self.code += "%sfor(int i%d = 0; i%d < size%d; i%d += %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,increment)
if len(loopPerm) > 1:
self.printRemainderLoop(loopPerm[1:], indent + self.indent, remainderIdx)
else:#we reached the innermost loop, no recursion
if( self.perm[0] == 0 ):
indent += self.indent
if( self.optimization == "streamingstore" ):
self.code += "#pragma vector nontemporal\n"
self.code += "#pragma simd\n"
self.code += "%sfor(int i0 = 0; i0 < size0; i0++)\n"%(indent)
#get input and output offsets correct
self.code += self.getUpdateString(indent)
def getBlocking(self):
return (self.blockA, self.blockB)
def getBroadcastVariables(self):
code = " "
if(self.perm[0]!=0 and self.scalar == 0):
code += " ,reg_alpha"
if(self.beta):
code += " ,reg_beta"
else:
code += " ,alpha"
if(self.beta):
code += " ,beta"
return code
def __printLoopBody(self, loopPerm, indent, clean):
loopIdx = loopPerm[0]
increment = 1
if( self.perm[0] != 0 ):
if loopIdx == 0:
increment = self.blockA
elif loopIdx == self.perm[0]:
increment = self.blockB
else:
#we block along the outer two dimensions if the first index doesn't change
if loopIdx == 1:
increment = self.blockA
elif loopIdx == self.perm[1]:
increment = self.blockB
if( increment > 1):
self.code += "%sfor(int i%d = 0; i%d < size%d - %d; i%d+= %d)\n"%(indent,loopIdx,loopIdx,loopIdx,increment-1,loopIdx,increment)
else:
self.code += "%sfor(int i%d = 0; i%d < size%d; i%d+= %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,increment)
if len(loopPerm) > 1: #we have not reached the inner most loop yet => recursion
self.__printLoopBody(loopPerm[1:], indent + " ", clean)
else: #we reached the innermost loop, no recursion
#if( clean ):
# cleanMacroTransposeName = self.transposeMacroKernelname + "_"
# for i in self.perm:
# cleanMacroTransposeName += str(i)
# cleanMacroTransposeName +="_"
# for idx in range(len(self.size)):
# cleanMacroTransposeName += "%d"%(self.size[idx])
# if(idx != len(self.size)-1):
# cleanMacroTransposeName +="x"
#else:
cleanMacroTransposeName = self.transposeMacroKernelname
if( self.prefetchDistance > 0):
indexStr = ""
indexPrintStr = "("
for i in range(self.dim):
indexStr += "i%d, "%i
indexPrintStr += "%d, "
indexPrintStr += ")"
self.code += "%s{\n"%indent
self.code += "%sint offsetA = %s;\n"%(indent + self.indent, self.getOffsetA())
self.code += "%sint offsetB = %s;\n"%(indent + self.indent, self.getOffsetB())
prefetchDistance = (self.prefetchDistance + self.numMicroBlocksPerBlock - 1) / self.numMicroBlocksPerBlock
self.code += "%sif( counter >= %d ){\n"%(indent + self.indent, prefetchDistance )
self.code += "%sconst Offset &task = tasks.back();\n"%(indent + self.indent + self.indent)
self.code += "%sint offsetAnext0 = task.offsetA;\n"%(indent + self.indent + self.indent)
self.code += "%sint offsetBnext0 = task.offsetB;\n"%(indent + self.indent + self.indent)
self.code += "%sconst Offset ¤tTask = tasks.front();\n"%(indent + self.indent + self.indent)
self.code += "%s%s(&A[currentTask.offsetA], lda%d, &B[currentTask.offsetB], ldb%d, &A[offsetAnext0], &B[offsetBnext0], &A[offsetA], &B[offsetB]%s);\n"%(indent + self.indent + self.indent, cleanMacroTransposeName, self.perm[0], self.ldout, self.getBroadcastVariables())
self.code += "%stasks.pop();\n"%(indent + self.indent + self.indent)
self.code += "%s}\n"%(indent + self.indent)
self.code += "%scounter++;\n"%(indent + self.indent)
self.code += "%sOffset offset; offset.offsetA = offsetA; offset.offsetB = offsetB;\n"%(indent + self.indent)
self.code += "%stasks.push( offset );\n"%(indent + self.indent)
#if self.debug:
# self.code += "%sif( offsetA != offsetAnext || offsetB != offsetBnext)\n"%(indent + self.indent)
# self.code += "%s printf(\"%%d: %s %s %%d %%d %%d %%d\\n\",omp_get_thread_num(), %soffsetA, offsetAnext1, offsetB, offsetBnext1);\n"%(indent + self.indent,self.getVersionName(), indexPrintStr, indexStr)
# self.code += "%soffsetAnext = offsetAnext1;\n"%(indent)
# self.code += "%soffsetBnext = offsetBnext1;\n"%(indent)
else:
if( self.perm[0] != 0):
self.code += "%s%s(&A[%s], lda%d, &B[%s], ldb%d%s);\n"%(indent + self.indent, cleanMacroTransposeName,self.getOffsetA(), self.perm[0], self.getOffsetB(),self.ldout, self.getBroadcastVariables())
else:
if( not clean) :
self.code += "%s%s(&A[%s], lda1, lda%d, &B[%s], ldb1, ldb%d%s);\n"%(indent + self.indent, cleanMacroTransposeName,self.getOffsetA(1), self.perm[1], self.getOffsetB(1),self.ldout, self.getBroadcastVariables())
else:
self.code += "%s%s<size0>(&A[%s], lda1, lda%d, &B[%s], ldb1, ldb%d%s);\n"%(indent + self.indent, cleanMacroTransposeName,self.getOffsetA(1), self.perm[1], self.getOffsetB(1),self.ldout, self.getBroadcastVariables())
if( self.prefetchDistance > 0 ):
self.code += "%s}\n"%indent
def getVersionName(self):
versionName = ""
if(self.reference != 0):
versionName += "reference"
else:
versionName += "v"
found0 = 0
for i in self.loopPerm:
if(i == 0):
found0 = 1
versionName += str(i)
if(self.perm[0] == 0 and not found0):
versionName += str(0) #0 is always the innermost loop in this case
versionName += "_%dx%d"%(self.blockA, self.blockB)
if( self.prefetchDistance > 0 ):
versionName += "_prefetch_" + str(self.prefetchDistance)
return versionName
def getTransposeName(self, clean = 0):
if(self.floatTypeA == "float"):
if(self.floatTypeB == "float"):
transposeName = "s"
else:
transposeName = "sd"
if(self.floatTypeA == "double"):
if(self.floatTypeB == "double"):
transposeName = "d"
else:
transposeName = "ds"
if(self.floatTypeA == "float complex"):
if(self.floatTypeB == "float complex"):
transposeName = "c"
else:
transposeName = "cz"
if(self.floatTypeA == "double complex"):
if(self.floatTypeB == "double complex"):
transposeName = "z"
else:
transposeName = "zs"
transposeName += "Transpose_"
for i in self.perm:
transposeName += str(i)
transposeName +="_"
for idx in range(len(self.size)):
transposeName += "%d"%(self.size[idx])
if(idx != len(self.size)-1):
transposeName +="x"
# transposeName +="_"
# for idx in range(len(self.lda)):
# transposeName += "%d"%(self.lda[idx])
# if(idx != len(self.lda)-1):
# transposeName +="x"
#
# transposeName +="_"
# for idx in range(len(self.ldb)):
# transposeName += "%d"%(self.ldb[idx])
# if(idx != len(self.ldb)-1):
# transposeName +="x"
if(clean == 0):
transposeName += "_"
transposeName += self.getVersionName()
if(self.parallelize == 1):
transposeName += "_par"
if( self.optimization != "" ):
transposeName += "_%s"%self.optimization
if(self.beta == 0):
transposeName += "_bz"
return transposeName
def getBroadcastKernel(self, name, value, floatType):
self.code += self.indent +"//broadcast %s\n"%name
if(self.architecture == "power"):
self.code += self.indent + "vector4double %s = vec_splats(%s);\n"%(name, value)
else:
if( value == "beta" and self.floatTypeA.find("double") != -1 and self.floatTypeB.find("float") != -1):
_floatType = "__m128"
functionName = "_mm_set1_ps"
elif( floatType == "float" or floatType == "float complex" ):
if( self.registerSizeBits == 128 ):
functionName = "_mm_set1_ps"
else:
functionName = "_mm%d_set1_ps"%self.registerSizeBits
_floatType = "__m%d"%self.registerSizeBits
elif( floatType == "double" or floatType == "double complex" ):
if( self.registerSizeBits == 128 ):
functionName = "_mm_set1_pd"
else:
functionName = "_mm%d_set1_pd"%self.registerSizeBits
_floatType = "__m%dd"%self.registerSizeBits
self.code += self.indent + "%s %s = %s(%s);\n"%(_floatType, name,functionName,value)
return self.code + "\n"
def getHeader(self, headerFlag = 1, clean = 0):
transposeName = self.getTransposeName(clean)
if headerFlag == 0:
trailingChar = "\n{\n"
else:
trailingChar = ";\n"
alphaFloatType = "float"
if( self.floatTypeA.find("double") != -1 ):
alphaFloatType = "double"
betaFloatType = "float"
if( self.floatTypeB.find("double") != -1 ):
betaFloatType = "double"
size_str = ""
for i in range(self.dim):
size_str += "int size%d, "%i
size_str = size_str[:-2]
Astr = ""
Bstr = ""
for i in range(len(self.perm)):
Astr += "i%d,"%i
Bstr += "i%d,"%self.perm[i]
Astr = Astr[:-1]
Bstr = Bstr[:-1]
if(self.beta != 0):
if( not clean ):
return "void %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const %s beta, const int *size, const int *lda, const int *ldb)%s"% (transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType,betaFloatType, trailingChar)
else:
ret = ""
ret += "/**\n"
ret += " * B(%s) <- alpha * A(%s) + beta * B(%s);\n"%(Bstr,Astr,Bstr)
ret += " */\n"
ret += "template<%s>\nvoid %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const %s beta, const int *lda, const int *ldb)%s"% (size_str, transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType,betaFloatType, trailingChar)
return ret
else:
if( not clean ):
return "void %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const int *size, const int *lda, const int *ldb)%s"% (transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType, trailingChar)
else:
ret = "/**\n"
ret += " * B(%s) <- alpha * A(%s);\n"%(Bstr,Astr)
ret += " */\n"
ret += "template<%s>\nvoid %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const int *lda, const int *ldb)%s"% (size_str, transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType, trailingChar)
return ret
def printHeader(self, headerFlag = 1, clean = 0):
self.code += self.getHeader(headerFlag, clean)
def declareVariables(self,clean):
if( not clean ):
for i in range(self.dim):
self.code += "%sconst int size%d = size[%d];\n"%(self.indent,i,i)
if(self.dim > 1):
#LDA
for i in range(1,self.dim):
self.code += "%sint lda%d;\n"%(self.indent,i)
self.code += "%sif( lda == NULL ){\n"%(self.indent)
self.code += "%s lda1 = size0;\n"%(self.indent)
for i in range(2,self.dim):
self.code += "%s lda%d = size%d * lda%d;\n"%(self.indent,i,i-1,i-1)
self.code += "%s}else{\n"%(self.indent)
self.code += "%s lda1 = lda[0];\n"%(self.indent)
for i in range(2,self.dim):
self.code += "%s lda%d = lda[%d] * lda%d;\n"%(self.indent,i,i-1,i-1)
self.code += "%s}\n"%(self.indent)
#LDB
for i in range(1,self.dim):
self.code += "%sint ldb%d;\n"%(self.indent,i)
self.code += "%sif( ldb == NULL ){\n"%(self.indent)
self.code += "%s ldb1 = size%d;\n"%(self.indent,self.perm[0])
for i in range(2,self.dim):
self.code += "%s ldb%d = size%d * ldb%d;\n"%(self.indent,i,self.perm[i-1],i-1)
self.code += "%s}else{\n"%(self.indent)
self.code += "%s ldb1 = ldb[0];\n"%(self.indent)
for i in range(2,self.dim):
self.code += "%s ldb%d = ldb[%d] * ldb%d;\n"%(self.indent,i,i-1,i-1)
self.code += "%s}\n"%(self.indent)
if( self.perm[0] != 0 ):
self.code += "%sconst int remainder0 = size0 %% %d;\n"%(self.indent,self.blockA)
self.code += "%sconst int remainder%d = size%d %% %d;\n"%(self.indent,self.perm[0],self.perm[0], self.blockB)
else:
self.code += "%sconst int remainder1 = size1 %% %d;\n"%(self.indent,self.blockA)
if(self.perm[1] != 1):
self.code += "%sconst int remainder%d = size%d %% %d;\n"%(self.indent,self.perm[1],self.perm[1], self.blockB)
if( self.prefetchDistance > 0 and self.debug ):
self.code += "%sint offsetAnext = 0, offsetBnext = 0;\n"%(self.indent)
def getCostLoop(self):
if( self.cost != 0.0 ):
return self.cost
self.cost = ttc_util.getCostLoop(self.loopPerm, self.perm, self.size)
return self.cost
def getImplementation(self, parallel = 1, clean = 0):
self.code = ""
self.printHeader(0,clean)
self.declareVariables(clean)
if(self.perm[0] != 0 and self.scalar ==0):
self.getBroadcastKernel("reg_alpha","alpha", self.floatTypeA)
if(self.beta != 0):
self.getBroadcastKernel("reg_beta","beta", self.floatTypeB)
if( self.reference == 0):
indent = self.indent
if( parallel ):
self.code += "#pragma omp parallel\n"
self.code += self.indent +"{\n"
indent += self.indent
if( self.prefetchDistance > 0 ):
self.code += indent + "int counter = 0;\n"
self.code += indent + "std::queue<Offset> tasks;\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(len(self.loopPerm))
self.__printLoopBody(self.loopPerm, indent, clean)
if( self.prefetchDistance > 0 ):
self.code += indent + "while(tasks.size() > 0){\n"
self.code += indent + " const Offset &task = tasks.front();\n"
endPos = self.transposeMacroKernelname.find("prefetch")
if( endPos != -1):
endPos -= 1 #remove last '_'
cleanMacroTransposeName = self.transposeMacroKernelname[:endPos]#remove prefetch
#if( clean ):
# cleanMacroTransposeName += "_"
# for i in self.perm:
# cleanMacroTransposeName += str(i)
# cleanMacroTransposeName +="_"
# for idx in range(len(self.size)):
# cleanMacroTransposeName += "%d"%(self.size[idx])
# if(idx != len(self.size)-1):
# cleanMacroTransposeName +="x"
self.code += indent + " %s(&A[task.offsetA], lda%d, &B[task.offsetB], ldb%d %s);\n"%(cleanMacroTransposeName, self.perm[0], self.ldout, self.getBroadcastVariables())
self.code += indent + " tasks.pop();\n"
self.code += indent + "}\n"
#print remainder loops
indent = self.indent
if( parallel ):
indent += self.indent
if( self.perm[0] != 0 ):
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, 0)
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, self.perm[0])
else:
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, 1)
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, self.perm[1])
if( parallel ):
self.code += self.indent +"}\n"
else:
if( parallel ):
self.code += "#pragma omp parallel for collapse(%d)\n"%(max(1,len(self.loopPerm)-1))
self.printScalarLoop(self.loopPerm, self.indent)
self.code += "}\n"
return self.code
| gpl-3.0 | -5,242,585,203,455,404,000 | 41.824957 | 284 | 0.522177 | false | 3.664541 | false | false | false |
bmyerz/log2sqlite | projects/radish_paper/tpch_radish_experiments.py | 1 | 3558 | from grappa import GrappaExperiment, MPIRunGrappaExperiment
"""
A place to keep a bunch of example experiments.
Feel free to use this script while developing experiments.
But, when saving things for reproducibility, put each of your final experiments in a separate file
"""
# tpch
tpch_sampa = GrappaExperiment({
'trial': range(1, 3 + 1),
'qn': range(1, 22 + 1),
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 12,
'nnode': 8,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'sampa'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_pal = GrappaExperiment({
'trial': range(1, 3 + 1),
'qn': range(1, 22 + 1),
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'pal'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_bigdatann = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
'qn': range(1, 22 + 1),
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'bigdata',
'system': 'radish'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_iter_bigdatann = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
#'qn': range(1, 22 + 1),
'qn': [6,11,12,14,15,17,19],
'exe': lambda qn: "grappa_tpc_iter_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'bigdata',
'system': 'radish-iter'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_bigdatann_debug = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
'qn': [18,19],
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v2-debugmode',
'machine': 'bigdata',
'system': 'radish'
},
{
'shared_pool_memory_fraction': 0.5
})
#tpch_bigdatann.run()
#tpch_bigdatann_debug.run()
tpch_iter_bigdatann.run()
| mit | 1,643,706,453,897,373,200 | 35.306122 | 98 | 0.360596 | false | 3.975419 | false | false | false |
nismod/energy_demand | energy_demand/assumptions/fuel_shares.py | 1 | 13596 | """All fuel shares of the base year for the
different technologies are defined in this file.
"""
from energy_demand.initalisations import helpers
def assign_by_fuel_tech_p(
enduses,
sectors,
fueltypes,
fueltypes_nr
):
"""Assigning fuel share per enduse for different technologies
for the base year.
Arguments
----------
enduses : dict
Enduses
sectors : dict
Sectors per submodel
fueltypes : dict
Fueltypes lookup
fueltypes_nr : int
Number of fueltypes
Returns
-------
fuel_tech_p_by : dict
Residential fuel share percentages
Note
----
- In an enduse, either all fueltypes with assigned fuelsneed to be
assigned with technologies or none. No mixing possible
- Technologies can be defined for the following fueltypes:
'solid_fuel': 0,
'gas': 1,
'electricity': 2,
'oil': 3,
'biomass': 4,
'hydrogen': 5,
'heat': 6
- Not defined fueltypes will be assigned placholder technologies
"""
fuel_tech_p_by = {}
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['residential'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['service'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['industry'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
# ====================
# Residential Submodel
# ====================
# ---------------
# rs_lighting
# Calculated on the basis of ECUK Table 3.08
# ---------------
fuel_tech_p_by['rs_lighting'][fueltypes['electricity']] = {
'standard_lighting_bulb': 0.04,
'halogen': 0.56,
'fluorescent_strip_lighting': 0.07,
'energy_saving_lighting_bulb': 0.32,
'LED': 0.01}
# ---------------
# rs_cold
# Calculated on the basis of ECUK Table 3.08
# ---------------
fuel_tech_p_by['rs_cold'][fueltypes['electricity']] = {
'chest_freezer': 0.087,
'fridge_freezer': 0.588,
'refrigerator': 0.143,
'upright_freezer': 0.182}
# ---------------
# rs_cooking
# Calculated on the basis of ECUK Table 3.08
# Calculated on the assumption that 5 to 10%
# of all households have induction hobs (https://productspy.co.uk/are-induction-hobs-safe/ (5-10%))
# ---------------
fuel_tech_p_by['rs_cooking'][fueltypes['electricity']] = {
'hob_electricity': 0.95,
'hob_induction_electricity': 0.05}
fuel_tech_p_by['rs_cooking'][fueltypes['gas']] = {
'hob_gas': 1.0}
fuel_tech_p_by['rs_cooking'][fueltypes['hydrogen']] = {
'hob_hydrogen': 1.0}
fuel_tech_p_by['rs_cooking'][fueltypes['biomass']] = {
'hob_biomass': 1.0}
# ---------------
# rs_wet
# calculated on the basis of EUCK Table 3.08
# ---------------
fuel_tech_p_by['rs_wet'][fueltypes['electricity']] = {
'washing_machine': 0.305,
'washer_dryer': 0.157,
'dishwasher': 0.220,
'tumble_dryer': 0.318}
# ---------------
# rs_space_heating
#
# According to the DCLG (2014) English Housing Survey. Energy Report. doi: 10.1017/CBO9781107415324.004.
# Annex Table 3.1, the following number of electric heating technologies can be found in the UK:
#
# storage heaters 5.5 % of all houses
# electric room heaters 2.0 % of all houses
# electric central heating 0.65 % of all houses
#
# As heat pumps were not accounted for, they are taken from OFGEM (2015),
# which states that there are about 0.1m heat pumps of about in total 27m
# households in the UK. This corresponds to about 0.4 %. (see also Hannon 2015).
# According to Hannon (2015), heat pumps account only for a tiny fraction of the UK.
# heat supply for buildings (approximately 0.2%). This percentage is substract from
# the storage heaters.
#
# storage heaters 5.1 % of all houses --> ~ 62% (100.0 / 8.15) * 5.1
# secondary_heater_electricity
# electric room heaters 2.0 % of all houses --> ~ 25% (100.0 / 8.15) * 2.0
# electric central heating 0.65 % of all houses --> ~ 8% (100.0 / 8.15) * 0.65
# heat pumps 0.4 % of all houses --> ~ 0.5% (100.0 / 8.15) * 0.4
#
# OFGEM (2015); Insights paper on households with electric and other non-gas heating,
# (December), 1–84.
#
# Hannon, M. J. (2015). Raising the temperature of the UK heat pump market:
# Learning lessons from Finland. Energy Policy, 85, 369–375.
# https://doi.org/10.1016/j.enpol.2015.06.016
# ---------------
fuel_tech_p_by['rs_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['rs_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
# ---
# According to table 3.19, 59.7% (43.5% + 14.3%) have some form of condensing boiler.
# Todays share of district heating is about 2% of UK non-industraiyl demand
# http://fes.nationalgrid.com/media/1215/160712-national-grid-dh-summary-report.pdf
# ---
fuel_tech_p_by['rs_space_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['rs_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['rs_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['rs_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'district_heating_fuel_cell': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0}
# -------------
# Residential water heating
# -------------
fuel_tech_p_by['rs_water_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['rs_water_heating'][fueltypes['electricity']] = {
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['rs_water_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['hydrogen']] = {
'boiler_hydrogen': 1.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['oil']] = {
'boiler_oil': 1.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
# ===================
# Service subModel
# ===================
# ss_lighting Simplified based on Table 5.09 (Office lighting)
fuel_tech_p_by['ss_lighting'][fueltypes['electricity']] = {
'halogen': 0.45,
'fluorescent_strip_lighting': 0.07,
'energy_saving_lighting_bulb': 0.47, #All different lighting next to halogen are summarised here ("non-halogen lighting")
'LED': 0.01}
# ----------------
# Service space heating (ss_space_heating)
# For ss_space heating the load profile is the same for all technologies
# ----------------
fuel_tech_p_by['ss_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['ss_space_heating'][fueltypes['gas']] = {
'district_heating_CHP_gas': 0.02,
'boiler_condensing_gas': 0.6,
'boiler_gas': 0.38}
fuel_tech_p_by['ss_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'secondary_heater_electricity': 0.95,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['ss_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
fuel_tech_p_by['ss_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0}
fuel_tech_p_by['ss_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0,
'district_heating_fuel_cell': 0.0}
# -------------
# Service water heating
# -------------
fuel_tech_p_by['ss_water_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['ss_water_heating'][fueltypes['electricity']] = {
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['ss_water_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['hydrogen']] = {
'boiler_hydrogen': 1.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['oil']] = {
'boiler_oil': 1.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
# ------------------------------
# Cooling
# ECUK Table 5.09
# ------------------------------
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['electricity']] = {
'central_air_conditioner_electricity': 0.64,
'decentral_air_conditioner_electricity': 0.36}
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['gas']] = {
'central_air_conditioner_gas': 0.64,
'decentral_air_conditioner_gas': 0.36}
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['oil']] = {
'central_air_conditioner_oil': 0.64,
'decentral_air_conditioner_oil': 0.36}
# Helper: Transfer all defined shares for every enduse to every sector
fuel_tech_p_by = helpers.copy_fractions_all_sectors(
fuel_tech_p_by,
sectors['service'],
affected_enduses=enduses['service'])
# ===================
# Industry subModel - Fuel shares of technologies in enduse
# ===================
# ----------------
# Industrial space heating (is_space_heating)
# ----------------
fuel_tech_p_by['is_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['is_space_heating'][fueltypes['gas']] = {
'district_heating_CHP_gas': 0.02,
'boiler_condensing_gas': 0.6,
'boiler_gas': 0.38}
fuel_tech_p_by['is_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'secondary_heater_electricity': 0.95,
'heat_pumps_electricity': 0.05,
'storage_heater_electricity': 0}
fuel_tech_p_by['is_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
fuel_tech_p_by['is_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0}
fuel_tech_p_by['is_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0,
'district_heating_fuel_cell': 0.0}
# Helper: Transfer all defined shares for every enduse to every sector
fuel_tech_p_by = helpers.copy_fractions_all_sectors(
fuel_tech_p_by,
sectors=sectors['industry'],
affected_enduses=enduses['industry'])
# ----------------
# Industrial High temporal processes (is_high_temp_process)
# ----------------
# Todays share is about: 17% electric furnace, 82% basic oxygen (Key Statistics 2016, appea, EnergyQuest)
#-- basic_metals (sector)
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['solid_fuel']] = {
'basic_oxygen_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['electricity']] = {
'electric_arc_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['gas']] = {
'SNG_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['biomass']] = {
'biomass_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['hydrogen']] = {
'hydrogen_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['oil']] = {
'oil_furnace': 1.0}
#-- non_metallic_mineral_products
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['solid_fuel']] = {
'dry_kiln_coal': 0.9,
'wet_kiln_coal': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['oil']] = {
'dry_kiln_oil': 0.9,
'wet_kiln_oil': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['gas']] = {
'dry_kiln_gas': 0.9,
'wet_kiln_gas': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['electricity']] = {
'dry_kiln_electricity': 0.9,
'wet_kiln_electricity': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['biomass']] = {
'dry_kiln_biomass': 1.0}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['hydrogen']] = {
'dry_kiln_hydrogen': 1.0}
return dict(fuel_tech_p_by)
| mit | -7,506,671,172,259,957,000 | 36.238356 | 129 | 0.576074 | false | 2.838171 | false | false | false |
drougge/wwwwellpapp | image.py | 1 | 1071 | #!/usr/bin/env python
from os.path import exists
from os import stat
from time import strftime, gmtime, time
from bottle import get, abort, response
from common import init
from wellpapp import RawWrapper, raw_exts
def fmttime(t):
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(t))
def serve(fn, ext):
if not exists(fn):
abort(404)
if ext in raw_exts:
fh = RawWrapper(open(fn, "rb"), True)
fh.seek(0, 2)
z = fh.tell()
fh.seek(0)
ext = "jpeg"
else:
z = stat(fn).st_size
fh = open(fn, "rb")
response.content_type = "image/" + ext
response.set_header("Content-Length", str(z))
response.set_header("Expires", fmttime(time() + 60*60*24 * 10))
response.set_header("Date", fmttime(time()))
return fh
@get("/image/<z>/<m:re:[0-9a-z]{32}>")
def thumb(m, z):
client = init()
if z in ("normal", "large"):
return serve(client.pngthumb_path(m, z), "png")
else:
return serve(client.thumb_path(m, z), "jpeg")
@get("/image/<m:re:[0-9a-z]{32}>.<ext:re:[a-z]{3,4}>")
def r_image(m, ext):
client = init()
return serve(client.image_path(m), ext)
| mit | -7,243,784,597,464,014,000 | 23.906977 | 64 | 0.643324 | false | 2.496503 | false | false | false |
MewX/Psiphon3-for-Linux | pyclient/psi_api.py | 1 | 11970 | #!/usr/bin/python
#
# Copyright (c) 2012, Psiphon Inc.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import httplib
import ssl
import binascii
import json
sys.path.insert(0, 'SocksiPy')
import socks
import socket
socket.socket = socks.socksocket
import urllib2
#
# Psiphon 3 Server API
#
class Psiphon3Server(object):
def __init__(self, servers, propagation_channel_id, sponsor_id, client_version, client_platform):
self.servers = servers
server_entry = binascii.unhexlify(servers[0]).split(" ")
(self.ip_address, self.web_server_port, self.web_server_secret,
self.web_server_certificate) = server_entry[:4]
# read the new json config element of the server entry, if present
self.extended_config = None
if len(server_entry) > 4:
try:
self.extended_config = json.loads(' '.join(server_entry[4:]))
except Exception:
pass
self.propagation_channel_id = propagation_channel_id
self.sponsor_id = sponsor_id
self.client_version = client_version
self.client_platform = client_platform
self.handshake_response = None
self.client_session_id = os.urandom(16).encode('hex')
socks.setdefaultproxy()
handler = CertificateMatchingHTTPSHandler(self.web_server_certificate)
self.opener = urllib2.build_opener(handler)
def set_socks_proxy(self, proxy_port):
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', proxy_port)
def _has_extended_config_key(self, key):
if not self.extended_config: return False
return key in self.extended_config
def _has_extended_config_value(self, key):
if not self._has_extended_config_key(key): return False
return ((type(self.extended_config[key]) == str and len(self.extended_config[key]) > 0) or
(type(self.extended_config[key]) == unicode and len(self.extended_config[key]) > 0) or
(type(self.extended_config[key]) == int and self.extended_config[key] != 0) or
(type(self.extended_config[key]) == list))
# This will return False if there is not enough information in the server entry to determine
# if the relay protocol is supported.
def relay_not_supported(self, relay_protocol):
if relay_protocol not in ['SSH', 'OSSH']: return True
if self._has_extended_config_value('capabilities'):
return relay_protocol not in self.extended_config['capabilities']
if relay_protocol == 'SSH':
if (self._has_extended_config_key('sshPort') and
not self._has_extended_config_value('sshPort')): return True
elif relay_protocol == 'OSSH':
if (self._has_extended_config_key('sshObfuscatedPort') and
not self._has_extended_config_value('sshObfuscatedPort')): return True
if (self._has_extended_config_key('sshObfuscatedKey') and
not self._has_extended_config_value('sshObfuscatedKey')): return True
else:
return True
return False
def can_attempt_relay_before_handshake(self, relay_protocol):
if relay_protocol not in ['SSH', 'OSSH']: return False
if not self._has_extended_config_value('sshUsername'): return False
if not self._has_extended_config_value('sshPassword'): return False
if not self._has_extended_config_value('sshHostKey'): return False
if relay_protocol == 'SSH':
if not self._has_extended_config_value('sshPort'): return False
elif relay_protocol == 'OSSH':
if not self._has_extended_config_value('sshObfuscatedPort'): return False
if not self._has_extended_config_value('sshObfuscatedKey'): return False
else:
return False
return True
# handshake
# Note that self.servers may be updated with newly discovered servers after a successful handshake
# TODO: upgrade the current server entry if not self.extended_config
# TODO: page view regexes
def handshake(self, relay_protocol):
request_url = (self._common_request_url(relay_protocol) % ('handshake',) + '&' +
'&'.join(['known_server=%s' % (binascii.unhexlify(server).split(" ")[0],) for server in self.servers]))
response = self.opener.open(request_url).read()
self.handshake_response = {'Upgrade': '',
'SSHPort': '',
'SSHUsername': '',
'SSHPassword': '',
'SSHHostKey': '',
'SSHSessionID': '',
'SSHObfuscatedPort': '',
'SSHObfuscatedKey': '',
'PSK': '',
'Homepage': []}
for line in response.split('\n'):
key, value = line.split(': ', 1)
if key in self.handshake_response.keys():
if type(self.handshake_response[key]) == list:
self.handshake_response[key].append(value)
else:
self.handshake_response[key] = value
if key == 'Server':
# discovery
if value not in self.servers:
self.servers.insert(1, value)
if key == 'SSHSessionID':
self.ssh_session_id = value
return self.handshake_response
def get_ip_address(self):
return self.ip_address
def get_ssh_port(self):
if self.handshake_response:
return self.handshake_response['SSHPort']
if self._has_extended_config_value('sshPort'):
return self.extended_config['sshPort']
return None
def get_username(self):
if self.handshake_response:
return self.handshake_response['SSHUsername']
if self._has_extended_config_value('sshUsername'):
return self.extended_config['sshUsername']
return None
def get_password(self):
if self.handshake_response:
return self.handshake_response['SSHPassword']
if self._has_extended_config_value('sshPassword'):
return self.extended_config['sshPassword']
return None
def get_password_for_ssh_authentication(self):
return self.client_session_id + self.get_password()
def get_host_key(self):
if self.handshake_response:
return self.handshake_response['SSHHostKey']
if self._has_extended_config_value('sshHostKey'):
return self.extended_config['sshHostKey']
return None
def get_obfuscated_ssh_port(self):
if self.handshake_response:
return self.handshake_response['SSHObfuscatedPort']
if self._has_extended_config_value('sshObfuscatedPort'):
return self.extended_config['sshObfuscatedPort']
return None
def get_obfuscate_keyword(self):
if self.handshake_response:
return self.handshake_response['SSHObfuscatedKey']
if self._has_extended_config_value('sshObfuscatedKey'):
return self.extended_config['sshObfuscatedKey']
return None
# TODO: download
# connected
# For SSH and OSSH, SSHSessionID from the handshake response is used when session_id is None
# For VPN, the VPN IP Address should be used for session_id (ie. 10.0.0.2)
def connected(self, relay_protocol, session_id=None):
if not session_id and relay_protocol in ['SSH', 'OSSH']:
session_id = self.ssh_session_id
assert session_id is not None
request_url = (self._common_request_url(relay_protocol) % ('connected',) +
'&session_id=%s' % (session_id,))
self.opener.open(request_url)
# disconnected
# For SSH and OSSH, SSHSessionID from the handshake response is used when session_id is None
# For VPN, this should not be called
def disconnected(self, relay_protocol, session_id=None):
assert relay_protocol not in ['VPN']
if not session_id and relay_protocol in ['SSH', 'OSSH']:
session_id = self.ssh_session_id
assert session_id is not None
request_url = (self._common_request_url(relay_protocol) % ('status',) +
'&session_id=%s&connected=%s' % (session_id, '0'))
self.opener.open(request_url)
# TODO: failed
# TODO: status
def _common_request_url(self, relay_protocol):
assert relay_protocol in ['VPN','SSH','OSSH']
return 'https://%s:%s/%%s?server_secret=%s&propagation_channel_id=%s&sponsor_id=%s&client_version=%s&client_platform=%s&relay_protocol=%s&client_session_id=%s' % (
self.ip_address, self.web_server_port, self.web_server_secret,
self.propagation_channel_id, self.sponsor_id, self.client_version,
self.client_platform, relay_protocol, self.client_session_id)
#
# CertificateMatchingHTTPSHandler
#
# Adapted from CertValidatingHTTPSConnection and VerifiedHTTPSHandler
# http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
#
class InvalidCertificateException(httplib.HTTPException, urllib2.URLError):
def __init__(self, host, cert, reason):
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s) %s\n' %
(self.host, self.reason, self.cert))
class CertificateMatchingHTTPSConnection(httplib.HTTPConnection):
def __init__(self, host, expected_server_certificate, **kwargs):
httplib.HTTPConnection.__init__(self, host, **kwargs)
self.expected_server_certificate = expected_server_certificate
def connect(self):
sock = socket.create_connection((self.host, self.port))
self.sock = ssl.wrap_socket(sock)
cert = ssl.DER_cert_to_PEM_cert(self.sock.getpeercert(True))
# Remove newlines and -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----
cert = ''.join(cert.splitlines())[len('-----BEGIN CERTIFICATE-----'):-len('-----END CERTIFICATE-----')]
if cert != self.expected_server_certificate:
raise InvalidCertificateException(self.host, cert,
'server presented the wrong certificate')
class CertificateMatchingHTTPSHandler(urllib2.HTTPSHandler):
def __init__(self, expected_server_certificate):
urllib2.AbstractHTTPHandler.__init__(self)
self.expected_server_certificate = expected_server_certificate
def https_open(self, req):
def http_class_wrapper(host, **kwargs):
return CertificateMatchingHTTPSConnection(
host, self.expected_server_certificate, **kwargs)
try:
return self.do_open(http_class_wrapper, req)
except urllib2.URLError, e:
if type(e.reason) == ssl.SSLError and e.reason.args[0] == 1:
raise InvalidCertificateException(req.host, '',
e.reason.args[1])
raise
https_request = urllib2.HTTPSHandler.do_request_
| gpl-2.0 | 6,429,732,307,235,011,000 | 40.5625 | 171 | 0.620718 | false | 4.103531 | true | false | false |
bitkeeper/python-opcua | opcua/server/address_space.py | 1 | 27000 | from threading import RLock
import logging
from datetime import datetime
import collections
import shelve
try:
import cPickle as pickle
except:
import pickle
from opcua import ua
from opcua.server.users import User
class AttributeValue(object):
def __init__(self, value):
self.value = value
self.value_callback = None
self.datachange_callbacks = {}
def __str__(self):
return "AttributeValue({0})".format(self.value)
__repr__ = __str__
class NodeData(object):
def __init__(self, nodeid):
self.nodeid = nodeid
self.attributes = {}
self.references = []
self.call = None
def __str__(self):
return "NodeData(id:{0}, attrs:{1}, refs:{2})".format(self.nodeid, self.attributes, self.references)
__repr__ = __str__
class AttributeService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def read(self, params):
self.logger.debug("read %s", params)
res = []
for readvalue in params.NodesToRead:
res.append(self._aspace.get_attribute_value(readvalue.NodeId, readvalue.AttributeId))
return res
def write(self, params, user=User.Admin):
self.logger.debug("write %s as user %s", params, user)
res = []
for writevalue in params.NodesToWrite:
if user != User.Admin:
if writevalue.AttributeId != ua.AttributeIds.Value:
res.append(ua.StatusCode(ua.StatusCodes.BadUserAccessDenied))
continue
al = self._aspace.get_attribute_value(writevalue.NodeId, ua.AttributeIds.AccessLevel)
ual = self._aspace.get_attribute_value(writevalue.NodeId, ua.AttributeIds.UserAccessLevel)
if not ua.ua_binary.test_bit(al.Value.Value, ua.AccessLevel.CurrentWrite) or not ua.ua_binary.test_bit(ual.Value.Value, ua.AccessLevel.CurrentWrite):
res.append(ua.StatusCode(ua.StatusCodes.BadUserAccessDenied))
continue
res.append(self._aspace.set_attribute_value(writevalue.NodeId, writevalue.AttributeId, writevalue.Value))
return res
class ViewService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def browse(self, params):
self.logger.debug("browse %s", params)
res = []
for desc in params.NodesToBrowse:
res.append(self._browse(desc))
return res
def _browse(self, desc):
res = ua.BrowseResult()
if desc.NodeId not in self._aspace:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdInvalid)
return res
node = self._aspace[desc.NodeId]
for ref in node.references:
if not self._is_suitable_ref(desc, ref):
continue
res.References.append(ref)
return res
def _is_suitable_ref(self, desc, ref):
if not self._suitable_direction(desc.BrowseDirection, ref.IsForward):
self.logger.debug("%s is not suitable due to direction", ref)
return False
if not self._suitable_reftype(desc.ReferenceTypeId, ref.ReferenceTypeId, desc.IncludeSubtypes):
self.logger.debug("%s is not suitable due to type", ref)
return False
if desc.NodeClassMask and ((desc.NodeClassMask & ref.NodeClass) == 0):
self.logger.debug("%s is not suitable due to class", ref)
return False
self.logger.debug("%s is a suitable ref for desc %s", ref, desc)
return True
def _suitable_reftype(self, ref1, ref2, subtypes):
"""
"""
if not subtypes and ref2.Identifier == ua.ObjectIds.HasSubtype:
return False
if ref1.Identifier == ref2.Identifier:
return True
oktypes = self._get_sub_ref(ref1)
if not subtypes and ua.NodeId(ua.ObjectIds.HasSubtype) in oktypes:
oktypes.remove(ua.NodeId(ua.ObjectIds.HasSubtype))
return ref2 in oktypes
def _get_sub_ref(self, ref):
res = []
nodedata = self._aspace[ref]
if nodedata is not None:
for ref in nodedata.references:
if ref.ReferenceTypeId.Identifier == ua.ObjectIds.HasSubtype and ref.IsForward:
res.append(ref.NodeId)
res += self._get_sub_ref(ref.NodeId)
return res
def _suitable_direction(self, desc, isforward):
if desc == ua.BrowseDirection.Both:
return True
if desc == ua.BrowseDirection.Forward and isforward:
return True
if desc == ua.BrowseDirection.Inverse and not isforward:
return True
return False
def translate_browsepaths_to_nodeids(self, browsepaths):
self.logger.debug("translate browsepath: %s", browsepaths)
results = []
for path in browsepaths:
results.append(self._translate_browsepath_to_nodeid(path))
return results
def _translate_browsepath_to_nodeid(self, path):
self.logger.debug("looking at path: %s", path)
res = ua.BrowsePathResult()
if path.StartingNode not in self._aspace:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdInvalid)
return res
current = path.StartingNode
for el in path.RelativePath.Elements:
nodeid = self._find_element_in_node(el, current)
if not nodeid:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNoMatch)
return res
current = nodeid
target = ua.BrowsePathTarget()
target.TargetId = current
target.RemainingPathIndex = 4294967295
res.Targets = [target]
return res
def _find_element_in_node(self, el, nodeid):
nodedata = self._aspace[nodeid]
for ref in nodedata.references:
# FIXME: here we should check other arguments!!
if ref.BrowseName == el.TargetName:
return ref.NodeId
self.logger.info("element %s was not found in node %s", el, nodeid)
return None
class NodeManagementService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def add_nodes(self, addnodeitems, user=User.Admin):
results = []
for item in addnodeitems:
results.append(self._add_node(item, user))
return results
def _add_node(self, item, user):
result = ua.AddNodesResult()
# If Identifier of requested NodeId is null we generate a new NodeId using
# the namespace of the nodeid, this is an extention of the spec to allow
# to requests the server to generate a new nodeid in a specified namespace
if item.RequestedNewNodeId.has_null_identifier():
self.logger.debug("RequestedNewNodeId has null identifier, generating Identifier")
nodedata = NodeData(self._aspace.generate_nodeid(item.RequestedNewNodeId.NamespaceIndex))
else:
nodedata = NodeData(item.RequestedNewNodeId)
if nodedata.nodeid in self._aspace:
self.logger.warning("AddNodesItem: Requested NodeId %s already exists", nodedata.nodeid)
result.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdExists)
return result
if item.ParentNodeId.is_null():
# self.logger.warning("add_node: creating node %s without parent", nodedata.nodeid)
# should return Error here, but the standard namespace define many nodes without parents...
pass
elif item.ParentNodeId not in self._aspace:
self.logger.warning("add_node: while adding node %s, requested parent node %s does not exists", nodedata.nodeid, item.ParentNodeId)
result.StatusCode = ua.StatusCode(ua.StatusCodes.BadParentNodeIdInvalid)
return result
if not user == User.Admin:
result.StatusCode = ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
return result
self._add_node_attributes(nodedata, item)
# now add our node to db
self._aspace[nodedata.nodeid] = nodedata
if not item.ParentNodeId.is_null():
self._add_ref_from_parent(nodedata, item)
self._add_ref_to_parent(nodedata, item, user)
# add type definition
if item.TypeDefinition != ua.NodeId():
self._add_type_definition(nodedata, item, user)
result.StatusCode = ua.StatusCode()
result.AddedNodeId = nodedata.nodeid
return result
def _add_node_attributes(self, nodedata, item):
# add common attrs
nodedata.attributes[ua.AttributeIds.NodeId] = AttributeValue(
ua.DataValue(ua.Variant(nodedata.nodeid, ua.VariantType.NodeId))
)
nodedata.attributes[ua.AttributeIds.BrowseName] = AttributeValue(
ua.DataValue(ua.Variant(item.BrowseName, ua.VariantType.QualifiedName))
)
nodedata.attributes[ua.AttributeIds.NodeClass] = AttributeValue(
ua.DataValue(ua.Variant(item.NodeClass, ua.VariantType.Int32))
)
# add requested attrs
self._add_nodeattributes(item.NodeAttributes, nodedata)
def _add_ref_from_parent(self, nodedata, item):
desc = ua.ReferenceDescription()
desc.ReferenceTypeId = item.ReferenceTypeId
desc.NodeId = nodedata.nodeid
desc.NodeClass = item.NodeClass
desc.BrowseName = item.BrowseName
desc.DisplayName = item.NodeAttributes.DisplayName
desc.TypeDefinition = item.TypeDefinition
desc.IsForward = True
self._aspace[item.ParentNodeId].references.append(desc)
def _add_ref_to_parent(self, nodedata, item, user):
addref = ua.AddReferencesItem()
addref.ReferenceTypeId = item.ReferenceTypeId
addref.SourceNodeId = nodedata.nodeid
addref.TargetNodeId = item.ParentNodeId
addref.TargetNodeClass = self._aspace[item.ParentNodeId].attributes[ua.AttributeIds.NodeClass].value.Value.Value
addref.IsForward = False
self._add_reference(addref, user)
def _add_type_definition(self, nodedata, item, user):
addref = ua.AddReferencesItem()
addref.SourceNodeId = nodedata.nodeid
addref.IsForward = True
addref.ReferenceTypeId = ua.NodeId(ua.ObjectIds.HasTypeDefinition)
addref.TargetNodeId = item.TypeDefinition
addref.TargetNodeClass = ua.NodeClass.DataType
self._add_reference(addref, user)
def delete_nodes(self, deletenodeitems, user=User.Admin):
results = []
for item in deletenodeitems.NodesToDelete:
results.append(self._delete_node(item, user))
return results
def _delete_node(self, item, user):
if user != User.Admin:
return ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
if item.NodeId not in self._aspace:
self.logger.warning("DeleteNodesItem: NodeId %s does not exists", item.NodeId)
return ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown)
if item.DeleteTargetReferences:
for elem in self._aspace.keys():
for rdesc in self._aspace[elem].references:
if rdesc.NodeId == item.NodeId:
self._aspace[elem].references.remove(rdesc)
self._delete_node_callbacks(self._aspace[item.NodeId])
del(self._aspace[item.NodeId])
return ua.StatusCode()
def _delete_node_callbacks(self, nodedata):
if ua.AttributeIds.Value in nodedata.attributes:
for handle, callback in nodedata.attributes[ua.AttributeIds.Value].datachange_callbacks.items():
try:
callback(handle, None, ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown))
self._aspace.delete_datachange_callback(handle)
except Exception as ex:
self.logger.exception("Error calling delete node callback callback %s, %s, %s", nodedata, ua.AttributeIds.Value, ex)
def add_references(self, refs, user=User.Admin):
result = []
for ref in refs:
result.append(self._add_reference(ref, user))
return result
def _add_reference(self, addref, user):
if addref.SourceNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadSourceNodeIdInvalid)
if addref.TargetNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadTargetNodeIdInvalid)
if user != User.Admin:
return ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
rdesc = ua.ReferenceDescription()
rdesc.ReferenceTypeId = addref.ReferenceTypeId
rdesc.IsForward = addref.IsForward
rdesc.NodeId = addref.TargetNodeId
rdesc.NodeClass = addref.TargetNodeClass
bname = self._aspace.get_attribute_value(addref.TargetNodeId, ua.AttributeIds.BrowseName).Value.Value
if bname:
rdesc.BrowseName = bname
dname = self._aspace.get_attribute_value(addref.TargetNodeId, ua.AttributeIds.DisplayName).Value.Value
if dname:
rdesc.DisplayName = dname
self._aspace[addref.SourceNodeId].references.append(rdesc)
return ua.StatusCode()
def delete_references(self, refs, user=User.Admin):
result = []
for ref in refs:
result.append(self._delete_reference(ref, user))
return result
def _delete_reference(self, item, user):
if item.SourceNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadSourceNodeIdInvalid)
if item.TargetNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadTargetNodeIdInvalid)
if user != User.Admin:
return ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
for rdesc in self._aspace[item.SourceNodeId].references:
if rdesc.NodeId is item.TargetNodeId:
if rdesc.RefrenceTypeId != item.RefrenceTypeId:
return ua.StatusCode(ua.StatusCodes.BadReferenceTypeIdInvalid)
if rdesc.IsForward == item.IsForward or item.DeleteBidirectional:
self._aspace[item.SourceNodeId].references.remove(rdesc)
for rdesc in self._aspace[item.TargetNodeId].references:
if rdesc.NodeId is item.SourceNodeId:
if rdesc.RefrenceTypeId != item.RefrenceTypeId:
return ua.StatusCode(ua.StatusCodes.BadReferenceTypeIdInvalid)
if rdesc.IsForward == item.IsForward or item.DeleteBidirectional:
self._aspace[item.SourceNodeId].references.remove(rdesc)
return ua.StatusCode()
def _add_node_attr(self, item, nodedata, name, vtype=None):
if item.SpecifiedAttributes & getattr(ua.NodeAttributesMask, name):
dv = ua.DataValue(ua.Variant(getattr(item, name), vtype))
dv.ServerTimestamp = datetime.utcnow()
dv.SourceTimestamp = datetime.utcnow()
nodedata.attributes[getattr(ua.AttributeIds, name)] = AttributeValue(dv)
def _add_nodeattributes(self, item, nodedata):
self._add_node_attr(item, nodedata, "AccessLevel", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "ArrayDimensions", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "BrowseName", ua.VariantType.QualifiedName)
self._add_node_attr(item, nodedata, "ContainsNoLoops", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "DataType", ua.VariantType.NodeId)
self._add_node_attr(item, nodedata, "Description", ua.VariantType.LocalizedText)
self._add_node_attr(item, nodedata, "DisplayName", ua.VariantType.LocalizedText)
self._add_node_attr(item, nodedata, "EventNotifier", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "Executable", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "Historizing", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "InverseName", ua.VariantType.LocalizedText)
self._add_node_attr(item, nodedata, "IsAbstract", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "MinimumSamplingInterval", ua.VariantType.Double)
self._add_node_attr(item, nodedata, "NodeClass", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "NodeId", ua.VariantType.NodeId)
self._add_node_attr(item, nodedata, "Symmetric", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "UserAccessLevel", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "UserExecutable", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "UserWriteMask", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "ValueRank", ua.VariantType.Int32)
self._add_node_attr(item, nodedata, "WriteMask", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "UserWriteMask", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "Value")
class MethodService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def call(self, methods):
results = []
for method in methods:
results.append(self._call(method))
return results
def _call(self, method):
res = ua.CallMethodResult()
if method.ObjectId not in self._aspace or method.MethodId not in self._aspace:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdInvalid)
else:
node = self._aspace[method.MethodId]
if node.call is None:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNothingToDo)
else:
try:
res.OutputArguments = node.call(method.ObjectId, *method.InputArguments)
for _ in method.InputArguments:
res.InputArgumentResults.append(ua.StatusCode())
except Exception:
self.logger.exception("Error executing method call %s, an exception was raised: ", method)
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadUnexpectedError)
return res
class AddressSpace(object):
"""
The address space object stores all the nodes of the OPC-UA server
and helper methods.
The methods are thread safe
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._nodes = {}
self._lock = RLock() # FIXME: should use multiple reader, one writter pattern
self._datachange_callback_counter = 200
self._handle_to_attribute_map = {}
self._default_idx = 2
self._nodeid_counter = {0: 20000, 1: 2000}
def __getitem__(self, nodeid):
with self._lock:
if nodeid in self._nodes:
return self._nodes.__getitem__(nodeid)
def __setitem__(self, nodeid, value):
with self._lock:
return self._nodes.__setitem__(nodeid, value)
def __contains__(self, nodeid):
with self._lock:
return self._nodes.__contains__(nodeid)
def __delitem__(self, nodeid):
with self._lock:
self._nodes.__delitem__(nodeid)
def generate_nodeid(self, idx=None):
if idx is None:
idx = self._default_idx
if idx in self._nodeid_counter:
self._nodeid_counter[idx] += 1
else:
self._nodeid_counter[idx] = 1
nodeid = ua.NodeId(self._nodeid_counter[idx], idx)
with self._lock: # OK since reentrant lock
while True:
if nodeid in self._nodes:
nodeid = self.generate_nodeid(idx)
else:
return nodeid
def keys(self):
with self._lock:
return self._nodes.keys()
def empty(self):
"""
Delete all nodes in address space
"""
with self._lock:
self._nodes = {}
def dump(self, path):
"""
Dump address space as binary to file; note that server must be stopped for this method to work
DO NOT DUMP AN ADDRESS SPACE WHICH IS USING A SHELF (load_aspace_shelf), ONLY CACHED NODES WILL GET DUMPED!
"""
# prepare nodes in address space for being serialized
for nodeid, ndata in self._nodes.items():
# if the node has a reference to a method call, remove it so the object can be serialized
if ndata.call is not None:
self._nodes[nodeid].call = None
with open(path, 'wb') as f:
pickle.dump(self._nodes, f, pickle.HIGHEST_PROTOCOL)
def load(self, path):
"""
Load address space from a binary file, overwriting everything in the current address space
"""
with open(path, 'rb') as f:
self._nodes = pickle.load(f)
def make_aspace_shelf(self, path):
"""
Make a shelf for containing the nodes from the standard address space; this is typically only done on first
start of the server. Subsequent server starts will load the shelf, nodes are then moved to a cache
by the LazyLoadingDict class when they are accessed. Saving data back to the shelf
is currently NOT supported, it is only used for the default OPC UA standard address space
Note: Intended for slow devices, such as Raspberry Pi, to greatly improve start up time
"""
s = shelve.open(path, "n", protocol=pickle.HIGHEST_PROTOCOL)
for nodeid, ndata in self._nodes.items():
s[nodeid.to_string()] = ndata
s.close()
def load_aspace_shelf(self, path):
"""
Load the standard address space nodes from a python shelve via LazyLoadingDict as needed.
The dump() method can no longer be used if the address space is being loaded from a shelf
Note: Intended for slow devices, such as Raspberry Pi, to greatly improve start up time
"""
class LazyLoadingDict(collections.MutableMapping):
"""
Special dict that only loads nodes as they are accessed. If a node is accessed it gets copied from the
shelve to the cache dict. All user nodes are saved in the cache ONLY. Saving data back to the shelf
is currently NOT supported
"""
def __init__(self, source):
self.source = source # python shelf
self.cache = {} # internal dict
def __getitem__(self, key):
# try to get the item (node) from the cache, if it isn't there get it from the shelf
try:
return self.cache[key]
except KeyError:
node = self.cache[key] = self.source[key.to_string()]
return node
def __setitem__(self, key, value):
# add a new item to the cache; if this item is in the shelf it is not updated
self.cache[key] = value
def __contains__(self, key):
return key in self.cache or key.to_string() in self.source
def __delitem__(self, key):
# only deleting items from the cache is allowed
del self.cache[key]
def __iter__(self):
# only the cache can be iterated over
return iter(self.cache.keys())
def __len__(self):
# only returns the length of items in the cache, not unaccessed items in the shelf
return len(self.cache)
self._nodes = LazyLoadingDict(shelve.open(path, "r"))
def get_attribute_value(self, nodeid, attr):
with self._lock:
self.logger.debug("get attr val: %s %s", nodeid, attr)
if nodeid not in self._nodes:
dv = ua.DataValue()
dv.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown)
return dv
node = self._nodes[nodeid]
if attr not in node.attributes:
dv = ua.DataValue()
dv.StatusCode = ua.StatusCode(ua.StatusCodes.BadAttributeIdInvalid)
return dv
attval = node.attributes[attr]
if attval.value_callback:
return attval.value_callback()
return attval.value
def set_attribute_value(self, nodeid, attr, value):
with self._lock:
self.logger.debug("set attr val: %s %s %s", nodeid, attr, value)
if nodeid not in self._nodes:
return ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown)
node = self._nodes[nodeid]
if attr not in node.attributes:
return ua.StatusCode(ua.StatusCodes.BadAttributeIdInvalid)
if not value.SourceTimestamp:
value.SourceTimestamp = datetime.utcnow()
if not value.ServerTimestamp:
value.ServerTimestamp = datetime.utcnow()
attval = node.attributes[attr]
old = attval.value
attval.value = value
cbs = []
if old.Value != value.Value: # only send call callback when a value change has happend
cbs = list(attval.datachange_callbacks.items())
for k, v in cbs:
try:
v(k, value)
except Exception as ex:
self.logger.exception("Error calling datachange callback %s, %s, %s", k, v, ex)
return ua.StatusCode()
def add_datachange_callback(self, nodeid, attr, callback):
with self._lock:
self.logger.debug("set attr callback: %s %s %s", nodeid, attr, callback)
if nodeid not in self._nodes:
return ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown), 0
node = self._nodes[nodeid]
if attr not in node.attributes:
return ua.StatusCode(ua.StatusCodes.BadAttributeIdInvalid), 0
attval = node.attributes[attr]
self._datachange_callback_counter += 1
handle = self._datachange_callback_counter
attval.datachange_callbacks[handle] = callback
self._handle_to_attribute_map[handle] = (nodeid, attr)
return ua.StatusCode(), handle
def delete_datachange_callback(self, handle):
with self._lock:
if handle in self._handle_to_attribute_map:
nodeid, attr = self._handle_to_attribute_map.pop(handle)
self._nodes[nodeid].attributes[attr].datachange_callbacks.pop(handle)
def add_method_callback(self, methodid, callback):
with self._lock:
node = self._nodes[methodid]
node.call = callback
| lgpl-3.0 | -2,830,200,730,932,796,400 | 40.925466 | 165 | 0.619704 | false | 4.096495 | false | false | false |
guh/guh-cli | nymea/timedescriptor.py | 1 | 11990 | # -*- coding: UTF-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Copyright (C) 2016 - 2018 Simon Stuerz <[email protected]> #
# #
# This file is part of nymea-cli. #
# #
# nymea-cli is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, version 2 of the License. #
# #
# nymea-cli is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with nymea-cli. If not, see <http://www.gnu.org/licenses/>. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import nymea
import selector
import time
import datetime
def createTimeDescriptor():
print "\n========================================================"
print "Create time descriptor\n"
timeDescriptor = {}
enough = False
options = ["Create calendar items", "Create time event items"]
selection = nymea.get_selection("Which kind of time items do you want to create?", options)
if options[selection] == "Create calendar items":
calendarItems = []
while not enough:
calendarItems.append(createCalendarItem())
input = raw_input("Do you want to add another calendar item? (y/N): ")
if not input == "y":
enough = True
timeDescriptor['calendarItems'] = calendarItems
return timeDescriptor
else:
timeEventItems = []
while not enough:
timeEventItems.append(createTimeEventItem())
input = raw_input("Do you want to add another time event item? (y/N): ")
if not input == "y":
enough = True
timeDescriptor['timeEventItems'] = timeEventItems
return timeDescriptor
def createTimeEventItem():
print "\n========================================================"
print "Create time event item\n"
timeEventItem = {}
if selector.getYesNoSelection("Do you want a time event for a certain date and time?"):
timeString = raw_input("Please enter unix time for this time event (\"DD.MM.YYYY hh:mm\"): ")
timeEventItem['datetime'] = int(time.mktime(time.strptime(timeString, "%d.%m.%Y %H:%M")))
if selector.getYesNoSelection("Do you want to define a repeating option for this time event item?"):
timeEventItem['repeating'] = createRepeatingOption(True)
else:
timeEventItem['time'] = raw_input("Please enter the time for this time event (\"hh:mm\"): ")
if selector.getYesNoSelection("Do you want to define a repeating option for this time event item?"):
timeEventItem['repeating'] = createRepeatingOption()
return timeEventItem
def createCalendarItem():
print "\n========================================================"
print "Create calendar item\n"
calendarItem = {}
if selector.getYesNoSelection("Do you want a calendar entry for a certain date and time?"):
timeString = raw_input("Please enter unix time for this calendar item (\"DD.MM.YYYY hh:mm\"): ")
calendarItem['datetime'] = int(time.mktime(time.strptime(timeString, "%d.%m.%Y %H:%M")))
if selector.getYesNoSelection("Do you want to define a repeating option for this calendar item?"):
calendarItem['repeating'] = createRepeatingOption(True)
else:
calendarItem['startTime'] = raw_input("Please enter the start time of this calendar item (\"hh:mm\"): ")
if selector.getYesNoSelection("Do you want to define a repeating option for this calendar item?"):
calendarItem['repeating'] = createRepeatingOption()
print "\n========================================================"
calendarItem['duration'] = int(raw_input("duration of the calendar item (\"minutes\") = "))
print calendarItem
return calendarItem
def createRepeatingOption(forDateTime = False):
print "\n========================================================"
print "Create repeating option\n"
repeatingOption = {}
if forDateTime:
options = ["Repeat yearly"]
selection = nymea.get_selection("Please select the repeating mode:", options)
repeatingOption['mode'] = "RepeatingModeYearly"
print repeatingOption
return repeatingOption
options = ["0. Repeat hourly",
"1. Repeat daily",
"2. Repeat weekly",
"3. Repeat monthly",
"4. Repeat yearly"]
selection = nymea.get_selection("Please select the repeating mode:", options)
if selection is 0:
repeatingOption['mode'] = "RepeatingModeHourly"
if selection is 1:
repeatingOption['mode'] = "RepeatingModeDaily"
if selection is 2:
repeatingOption['mode'] = "RepeatingModeWeekly"
weekDaysString = raw_input("Please enter the list of week days (space separated [1-7]): ")
repeatingOption['weekDays'] = [int(weekDay) for weekDay in weekDaysString.split()]
if selection is 3:
repeatingOption['mode'] = "RepeatingModeMonthly"
monthDaysString = raw_input("Please enter the list of month days (space separated [1-31]): ")
repeatingOption['monthDays'] = [int(monthDay) for monthDay in monthDaysString.split()]
print repeatingOption
return repeatingOption
def printTimeDescriptor(timeDescriptor):
if 'calendarItems' in timeDescriptor:
printCalendarItems(timeDescriptor['calendarItems'])
if 'timeEventItems' in timeDescriptor:
printTimeEventItems(timeDescriptor['timeEventItems'])
def printCalendarItems(calendarItems):
for i in range(len(calendarItems)):
calendarItem = calendarItems[i]
#############################################
if 'datetime' in calendarItem and calendarItem['datetime'] != 0:
timeStamp = int(calendarItem['datetime'])
if 'repeating' in calendarItem:
startTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m %H:%M")
endTime = datetime.datetime.fromtimestamp(timeStamp + int(calendarItem['duration'])*60).strftime("%d.%m %H:%M")
print "%5s. -> Every year from %s \n %37s" % (i, startTime, endTime)
else:
startTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m.%Y %H:%M")
endTime = datetime.datetime.fromtimestamp(timeStamp + int(calendarItem['duration'])*60).strftime("%d.%m.%Y %H:%M")
print "%5s. -> From %s \n %30s" % (i, startTime, endTime)
#############################################
elif 'startTime' in calendarItem:
if 'repeating' in calendarItem:
repeatingOption = calendarItem['repeating']
# Hourly
if repeatingOption['mode'] == "RepeatingModeHourly":
print "%5s. -> Every hour at %s for %s minutes." % (i, calendarItem['startTime'], calendarItem['duration'])
# Daily
if repeatingOption['mode'] == "RepeatingModeDaily":
print "%5s. -> Every day at %s for %s minutes." % (i, calendarItem['startTime'], calendarItem['duration'])
# Weekly
if repeatingOption['mode'] == "RepeatingModeWeekly":
print "%5s. -> Every week at %s for %s minutes on following week days:" % (i, calendarItem['startTime'], calendarItem['duration'])
printWeekDays(repeatingOption)
# Monthly
if repeatingOption['mode'] == "RepeatingModeMonthly":
print "%5s. -> Every month at %s for %s minutes on following month days:" % (i, calendarItem['startTime'], calendarItem['duration'])
print "%22s" % repeatingOption['monthDays']
else:
print "%5s. -> Every day at %s for %s minutes." % (i, calendarItem['startTime'], calendarItem['duration'])
else:
print timeEventItem
def printTimeEventItems(timeEventItems):
for i in range(len(timeEventItems)):
timeEventItem = timeEventItems[i]
#############################################
if 'datetime' in timeEventItem and timeEventItem['datetime'] != 0:
timeStamp = int(timeEventItem['datetime'])
if 'repeating' in timeEventItem:
eventTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m %H:%M")
print "%5s. -> Every year at %s" % (i, eventTime)
else:
eventTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m.%Y %H:%M")
print "%5s. -> Trigger at %s" % (i, eventTime)
#############################################
elif 'time' in timeEventItem:
if 'repeating' in timeEventItem:
repeatingOption = timeEventItem['repeating']
# Hourly
if repeatingOption['mode'] == "RepeatingModeHourly":
print "%5s. -> Every hour at %s." % (i, timeEventItem['time'])
# Daily
if repeatingOption['mode'] == "RepeatingModeDaily":
print "%5s. -> Every day at %s." % (i, timeEventItem['time'])
# Weekly
if repeatingOption['mode'] == "RepeatingModeWeekly":
print "%5s. -> Every week at %s on following week days:" % (i, timeEventItem['time'])
printWeekDays(repeatingOption)
# Monthly
if repeatingOption['mode'] == "RepeatingModeMonthly":
print "%5s. -> Every month at %s on following month days:" % (i, timeEventItem['time'])
print "%22s" % repeatingOption['monthDays']
else:
print "%5s. -> Every day at %s." % (i, timeEventItem['time'])
else:
print timeEventItem
def printWeekDays(repeatingOption):
weekString = ""
if 1 in repeatingOption['weekDays']:
weekString += "Mo[#] "
else:
weekString += "Mo[ ] "
if 2 in repeatingOption['weekDays']:
weekString += "Tu[#] "
else:
weekString += "Tu[ ] "
if 3 in repeatingOption['weekDays']:
weekString += "We[#] "
else:
weekString += "We[ ] "
if 4 in repeatingOption['weekDays']:
weekString += "Th[#] "
else:
weekString += "Th[ ] "
if 5 in repeatingOption['weekDays']:
weekString += "Fr[#] "
else:
weekString += "Fr[ ] "
if 6 in repeatingOption['weekDays']:
weekString += "Sa[#] "
else:
weekString += "Sa[ ] "
if 7 in repeatingOption['weekDays']:
weekString += "Su[#]"
else:
weekString += "Su[ ]"
print " %s" % (weekString)
| gpl-2.0 | -7,164,972,957,974,699,000 | 43.738806 | 153 | 0.525855 | false | 4.375912 | false | false | false |
ghtmtt/QGIS | python/plugins/db_manager/db_plugins/info_model.py | 33 | 18079 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.PyQt.QtWidgets import QApplication
from .html_elems import HtmlContent, HtmlSection, HtmlParagraph, HtmlList, HtmlTable, HtmlTableHeader, HtmlTableCol
class DatabaseInfo(object):
def __init__(self, db):
self.db = db
def __del__(self):
self.db = None
def generalInfo(self):
info = self.db.connector.getInfo()
tbl = [
(QApplication.translate("DBManagerPlugin", "Server version: "), info[0])
]
return HtmlTable(tbl)
def connectionDetails(self):
tbl = [
(QApplication.translate("DBManagerPlugin", "Host:"), self.db.connector.host),
(QApplication.translate("DBManagerPlugin", "User:"), self.db.connector.user)
]
return HtmlTable(tbl)
def spatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def privilegesDetails(self):
details = self.db.connector.getDatabasePrivileges()
lst = []
if details[0]:
lst.append(QApplication.translate("DBManagerPlugin", "create new schemas"))
if details[1]:
lst.append(QApplication.translate("DBManagerPlugin", "create temporary tables"))
return HtmlList(lst)
def toHtml(self):
if self.db is None:
return HtmlSection(QApplication.translate("DBManagerPlugin", 'Not connected')).toHtml()
ret = []
# connection details
conn_details = self.connectionDetails()
if conn_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Connection details'), conn_details))
# database information
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
# has spatial enabled?
spatial_info = self.spatialInfo()
if spatial_info is None:
pass
else:
typename = self.db.connection().typeNameString()
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate("DBManagerPlugin", '<warning> {0} support not enabled!').format(typename)
ret.append(HtmlSection(typename, spatial_info))
# privileges
priv_details = self.privilegesDetails()
if priv_details is None:
pass
else:
priv_details = HtmlContent(priv_details)
if not priv_details.hasContents():
priv_details = QApplication.translate("DBManagerPlugin", '<warning> This user has no privileges!')
else:
priv_details = [QApplication.translate("DBManagerPlugin", "User has privileges:"), priv_details]
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Privileges'), priv_details))
return HtmlContent(ret).toHtml()
class SchemaInfo(object):
def __init__(self, schema):
self.schema = schema
def __del__(self):
self.schema = None
def generalInfo(self):
tbl = [
# ("Tables:", self.schema.tableCount)
]
if self.schema.owner:
tbl.append((QApplication.translate("DBManagerPlugin", "Owner:"), self.schema.owner))
if self.schema.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.schema.comment))
return HtmlTable(tbl)
def privilegesDetails(self):
details = self.schema.database().connector.getSchemaPrivileges(self.schema.name)
lst = []
if details[0]:
lst.append(QApplication.translate("DBManagerPlugin", "create new objects"))
if details[1]:
lst.append(QApplication.translate("DBManagerPlugin", "access objects"))
return HtmlList(lst)
def toHtml(self):
ret = []
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Schema details'), general_info))
priv_details = self.privilegesDetails()
if priv_details is None:
pass
else:
priv_details = HtmlContent(priv_details)
if not priv_details.hasContents():
priv_details = QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges to access this schema!')
else:
priv_details = [QApplication.translate("DBManagerPlugin", "User has privileges:"), priv_details]
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Privileges'), priv_details))
return HtmlContent(ret).toHtml()
class TableInfo(object):
def __init__(self, table):
self.table = table
def __del__(self):
self.table = None
def generalInfo(self):
if self.table.rowCount is None:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table.isView else QApplication.translate(
"DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Rows:"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate("DBManagerPlugin",
'Unknown (<a href="action:rows/count">find out</a>)'))
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
return HtmlTable(tbl)
def spatialInfo(self): # implemented in subclasses
return None
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Null"), QApplication.translate("DBManagerPlugin", "Default"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), is_null_txt, fld.default2String()))
return HtmlTable(tbl, {"class": "header"})
def constraintsDetails(self):
if self.table.constraints() is None or len(self.table.constraints()) <= 0:
return None
tbl = []
# define the table header
header = (QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Column(s)"))
tbl.append(HtmlTableHeader(header))
# add table contents
for con in self.table.constraints():
# get the fields the constraint is defined on
cols = [p[1].name if p[1] is not None else u"??? (#%d)" % p[0] for p in iter(list(con.fields().items()))]
tbl.append((con.name, con.type2String(), u'\n'.join(cols)))
return HtmlTable(tbl, {"class": "header"})
def indexesDetails(self):
if self.table.indexes() is None or len(self.table.indexes()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Column(s)"))
tbl.append(HtmlTableHeader(header))
# add table contents
for idx in self.table.indexes():
# get the fields the index is defined on
cols = [p[1].name if p[1] is not None else u"??? (#%d)" % p[0] for p in iter(list(idx.fields().items()))]
tbl.append((idx.name, u'\n'.join(cols)))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
tbl.append((name, trig.function))
return HtmlTable(tbl, {"class": "header"})
def getViewDefinition(self):
if not self.table.isView:
return None
return self.table.database().connector.getViewDefinition((self.table.schemaName(), self.table.name))
def getTableInfo(self):
ret = []
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
# spatial info
spatial_info = self.spatialInfo()
if spatial_info is None:
pass
else:
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate("DBManagerPlugin", '<warning> This is not a spatial table.')
ret.append(HtmlSection(self.table.database().connection().typeNameString(), spatial_info))
# fields
fields_details = self.fieldsDetails()
if fields_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Fields'), fields_details))
# constraints
constraints_details = self.constraintsDetails()
if constraints_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Constraints'), constraints_details))
# indexes
indexes_details = self.indexesDetails()
if indexes_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Indexes'), indexes_details))
# triggers
triggers_details = self.triggersDetails()
if triggers_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Triggers'), triggers_details))
return ret
def getViewInfo(self):
if not self.table.isView:
return []
ret = self.getTableInfo()
# view definition
view_def = self.getViewDefinition()
if view_def is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'View definition'), view_def))
return ret
def toHtml(self):
if self.table.isView:
ret = self.getViewInfo()
else:
ret = self.getTableInfo()
return HtmlContent(ret).toHtml()
class VectorTableInfo(TableInfo):
def __init__(self, table):
TableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if self.table.geomType is None:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType)
]
# only if we have info from geometry_columns
if self.table.geomDim:
tbl.append((QApplication.translate("DBManagerPlugin", "Dimension:"), self.table.geomDim))
srid = self.table.srid if self.table.srid not in (None, 0) else -1
sr_info = self.table.database().connector.getSpatialRefInfo(srid) if srid != -1 else QApplication.translate(
"DBManagerPlugin", "Undefined")
if sr_info:
tbl.append((QApplication.translate("DBManagerPlugin", "Spatial ref:"), u"%s (%d)" % (sr_info, srid)))
# estimated extent
if not self.table.isView:
if self.table.estimatedExtent is None:
# estimated extent information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshEstimatedExtent->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshTableEstimatedExtent()
self.table.blockSignals(False)
if self.table.estimatedExtent is not None and self.table.estimatedExtent[0] is not None:
if isinstance(self.table.estimatedExtent, list):
estimated_extent_str = ', '.join('%.5f' % e for e in self.table.estimatedExtent)
else:
estimated_extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.estimatedExtent
tbl.append((QApplication.translate("DBManagerPlugin", "Estimated extent:"), estimated_extent_str))
# extent
if self.table.extent is not None and self.table.extent[0] is not None:
if isinstance(self.table.extent, list):
extent_str = ', '.join('%.5f' % e for e in self.table.extent)
else:
extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.extent
else:
extent_str = QApplication.translate("DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
tbl.append((QApplication.translate("DBManagerPlugin", "Extent:"), extent_str))
ret.append(HtmlTable(tbl))
# is there an entry in geometry_columns?
if self.table.geomType.lower() == 'geometry':
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> There is no entry in geometry_columns!")))
# find out whether the geometry column has spatial index on it
if not self.table.isView:
if not self.table.hasSpatialIndex():
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)')))
return ret
class RasterTableInfo(TableInfo):
def __init__(self, table):
TableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if self.table.geomType is None:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType)
]
# only if we have info from geometry_columns
srid = self.table.srid if self.table.srid is not None else -1
sr_info = self.table.database().connector.getSpatialRefInfo(srid) if srid != -1 else QApplication.translate(
"DBManagerPlugin", "Undefined")
if sr_info:
tbl.append((QApplication.translate("DBManagerPlugin", "Spatial ref:"), u"%s (%d)" % (sr_info, srid)))
# extent
if self.table.extent is not None and self.table.extent[0] is not None:
extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.extent
else:
extent_str = QApplication.translate("DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
tbl.append((QApplication.translate("DBManagerPlugin", "Extent:"), extent_str))
ret.append(HtmlTable(tbl))
return ret
| gpl-2.0 | -6,618,836,119,215,804,000 | 37.963362 | 156 | 0.57387 | false | 4.481656 | false | false | false |
JorgeDeLosSantos/nusa | nusa/io.py | 1 | 2320 | # ***********************************
# Author: Pedro Jorge De Los Santos
# E-mail: [email protected]
# Blog: numython.github.io
# License: MIT License
# ***********************************
"""
The purpose of this module is to provide tools to build
a model automatically from text files with coordinates
and connectivities.
"""
import numpy as np
import re
FLOATS = "[-+]?([0-9]*\.[0-9]+|[0-9]+)"
def read_file(filename):
mshfile = open(filename,"r")
msh = mshfile.readlines()
mshfile.close()
return msh
def parse_nodes(line):
p = re.compile(FLOATS)
nd = [float(k) for k in p.findall(line)]
return nd[1::]
def parse_elements(line):
p = re.compile(FLOATS)
elm = [int(k) for k in p.findall(line)]
if len(elm) < 8: return []
enum = elm[0]
etype = elm[1]
elmtags = elm[2:5]
econn = elm[5::]
if etype == 2: return econn
else: return []
def isempty(iterable):
return True if len(iterable)==0 else False
def read_msh(filename):
msh = read_file(filename)
APPEND_NODES = False
APPEND_ELEMENTS = False
nodes = []
elements = []
for line in msh:
if "$Nodes" in line: APPEND_NODES = True
if "$EndNodes" in line: APPEND_NODES = False
if "$Elements" in line: APPEND_ELEMENTS = True
if "$EndElements" in line: APPEND_ELEMENTS = False
if APPEND_NODES:
nc = parse_nodes(line)
if not isempty(nc): nodes.append(nc)
if APPEND_ELEMENTS:
ec = parse_elements(line)
if not isempty(ec): elements.append(ec)
return np.array(nodes), np.array(elements)
def ModelFromFiles(nodesfile,elementsfile,model):
"""
Creates a model from ASCII files, where nodesfile contains
the coordinates X/Y and elementsfile contains the connectivity
of elements.
"""
pass
#~ dlm = "," # CSV values
#~ NC = np.loadtxt(nodesfile, delimiter=dlm)
#~ EC = np.loadtxt(elementsfile, delimiter=dlm)
#~ for nd in NC:
#~ cnode = Node( (nd[0], nd[1]) )
#~ model.addNode(cnode)
#~ for el in EC:
#~ na = model.nodes[el[0]]
#~ nb = model.nodes[el[1]]
#~ cel = Spring((na,nb))
#~ model.addElement(cel)
if __name__=='__main__':
pass
| mit | -7,657,294,035,896,943,000 | 25.976744 | 67 | 0.570259 | false | 3.253857 | false | false | false |
mlorant/webcrawler | robot/pagerank.py | 1 | 5684 | # -*- coding: utf-8 -*-
# Part of this file is produced by:
# Copyright (c) 2010 Pedro Matiello <[email protected]>
# Juarez Bochi <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
PageRank algorithm
@sort: pagerank
"""
from models import Link
class Digraph(object):
def __init__(self):
self.node_neighbors = {} # Pairing: Node -> Neighbors
self.node_incidence = {} # Pairing: Node -> Incident nodes
self.node_attrs = {}
def nodes(self):
return list(self.node_neighbors.keys())
def neighbors(self, node):
"""
Return all nodes that are directly accessible from given node.
"""
return self.node_neighbors[node]
def incidents(self, node):
"""
Return all nodes that are incident to the given node.
"""
return self.node_incidence[node]
def edges(self):
"""
Return all edges in the graph.
"""
return [a for a in self._edges()]
def _edges(self):
for n, neighbors in self.node_neighbors.items():
for neighbor in neighbors:
yield (n, neighbor)
def has_node(self, node):
return node in self.node_neighbors
def add_node(self, node, attrs={}):
if attrs is None:
attrs = []
if (node not in self.node_neighbors):
self.node_neighbors[node] = []
self.node_incidence[node] = []
self.node_attrs[node] = attrs
else:
raise AdditionError("Node %s already in digraph" % node)
def add_edge(self, edge, wt=1, label=""):
"""
Add an directed edge to the graph connecting two nodes.
An edge, here, is a pair of nodes like C{(n, m)}.
"""
u, v = edge
for n in [u, v]:
if not n in self.node_neighbors:
raise AdditionError("%s is missing from the node_neighbors" % n)
if not n in self.node_incidence:
raise AdditionError("%s is missing from the node_incidence" % n)
if v in self.node_neighbors[u] and u in self.node_incidence[v]:
return
else:
self.node_neighbors[u].append(v)
self.node_incidence[v].append(u)
def node_order(self, node):
"""
Return the order of the given node.
@rtype: number
@return: Order of the given node.
"""
return len(self.neighbors(node))
def __str__(self):
return "\n".join(
"(%s, %s)" % (k, v)
for k, v in self.node_neighbors.items() if v)
def pagerank(graph, dumping_factor=0.85, max_iter=100, min_delta=0.00001):
"""
Compute and return the PageRank in an directed graph.
@type graph: digraph
@param graph: Digraph.
@type dumping_factor: number
@param dumping_factor: PageRank dumping factor.
@type max_iter: number
@param max_iter: Maximum number of iterations.
@type min_delta: number
@param min_delta: Smallest variation required to have a new iteration.
@rtype: Dict
@return: Dict containing all the nodes PageRank.
"""
nodes = graph.nodes()
graph_size = len(nodes)
if graph_size == 0:
return {}
min_value = (1.0-dumping_factor)/graph_size # value for nodes without inbound links
# itialize the page rank dict with 1/N for all nodes
pagerank = dict.fromkeys(nodes, 1.0/graph_size)
for i in range(max_iter):
diff = 0 # total difference compared to last iteraction
# computes each node PageRank based on inbound links
for node in nodes:
rank = min_value
for referring_page in graph.incidents(node):
rank += dumping_factor * pagerank[referring_page] / len(graph.neighbors(referring_page))
diff += abs(pagerank[node] - rank)
pagerank[node] = rank
#stop if PageRank has converged
if diff < min_delta:
break
return pagerank
def generate_graph():
""" Generate the directional graph needed to compute pageranks """
graph = Digraph()
links = Link.select()
for link in links:
if not graph.has_node(link.inbound.id):
graph.add_node(link.inbound.id, link.inbound)
if not graph.has_node(link.target.id):
graph.add_node(link.target.id, link.target)
graph.add_edge((link.inbound.id, link.target.id))
return graph
def compute_pagerank():
""" Compute and write the pagerank ranking into a file """
g = generate_graph()
import operator
pages = sorted(pagerank(g).iteritems(),
key=operator.itemgetter(1), reverse=True)
with open('logs/pagerank.txt', 'w') as f:
for idx, elem in enumerate(pages):
f.write(
("%6s & %s - %s\n" %
(idx, elem, g.node_attrs[elem[0]].url)).encode('utf8'))
| gpl-3.0 | -816,333,319,626,065,700 | 29.403315 | 104 | 0.580049 | false | 3.917298 | false | false | false |
andela-ooshodi/codango-debug | codango/account/views.py | 1 | 10668 | from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from django.views.generic import View, TemplateView
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.template.context_processors import csrf
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template import RequestContext, loader
from account.hash import UserHasher
from emails import SendGrid
from resources.views import CommunityBaseView
from account.forms import LoginForm, RegisterForm, ResetForm, ContactUsForm
from userprofile.models import UserProfile
from codango.settings.base import ADMIN_EMAIL, CODANGO_EMAIL
class IndexView(TemplateView):
initial = {'key': 'value'}
template_name = 'account/index.html'
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
messages.add_message(
request, messages.SUCCESS, 'Welcome back!')
return redirect(
'/home',
context_instance=RequestContext(request)
)
return super(IndexView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['loginform'] = LoginForm()
context['registerform'] = RegisterForm()
return context
class LoginView(IndexView):
form_class = LoginForm
def post(self, request, *args, **kwargs):
if self.request.is_ajax():
try:
userprofile = UserProfile.objects.get(
social_id=request.POST['id'])
user = userprofile.get_user()
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return HttpResponse("success", content_type='text/plain')
except UserProfile.DoesNotExist:
return HttpResponse("register", content_type='text/plain')
form = self.form_class(request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if not request.POST.get('remember_me'):
request.session.set_expiry(0)
if user is not None:
if user.is_active:
login(request, user)
messages.add_message(
request, messages.SUCCESS, 'Logged in Successfully!')
return redirect(
'/home',
context_instance=RequestContext(request)
)
else:
messages.add_message(
request, messages.ERROR, 'Incorrect username or password!')
return redirect(
'/',
context_instance=RequestContext(request)
)
else:
context = super(LoginView, self).get_context_data(**kwargs)
context['loginform'] = form
return render(request, self.template_name, context)
class RegisterView(IndexView):
form_class = RegisterForm
def post(self, request, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
new_user = form.save()
new_user = authenticate(username=request.POST['username'],
password=request.POST['password'])
login(request, new_user)
messages.add_message(
request, messages.SUCCESS, 'Registered Successfully!')
new_profile = new_user.profile
new_profile.social_id = request.POST[
'social_id'] if 'social_id' in request.POST else None
new_profile.first_name = request.POST[
'first_name'] if 'first_name' in request.POST else None
new_profile.last_name = request.POST[
'last_name'] if 'last_name' in request.POST else None
new_profile.save()
return redirect(
'/user/' + self.request.user.username + '/edit',
context_instance=RequestContext(request)
)
else:
context = super(RegisterView, self).get_context_data(**kwargs)
context['registerform'] = form
return render(request, self.template_name, context)
class ContactUsView(TemplateView):
form_class = ContactUsForm
template_name = 'account/contact-us.html'
def get_context_data(self, **kwargs):
context = super(ContactUsView, self).get_context_data(**kwargs)
context['contactusform'] = ContactUsForm()
return context
def post(self, request, *args, **kwargs):
# get email data from form
form = self.form_class(request.POST)
if form.is_valid():
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
message = request.POST['message']
# compose the email
email_compose = SendGrid.compose(
sender='{0} <{1}>'.format(name, email),
recipient=ADMIN_EMAIL,
subject=subject,
text=message,
html=None
)
# send email
response = SendGrid.send(email_compose)
# inform the user if mail sent was successful or not
if response == 200:
messages.add_message(
request, messages.SUCCESS, 'Message sent successfully!')
return redirect(
'/contact-us',
context_instance=RequestContext(request)
)
else:
messages.add_message(
request, messages.ERROR,
'Message failed to send, please try again later')
return redirect(
'/contact-us',
context_instance=RequestContext(request)
)
else:
context = super(ContactUsView, self).get_context_data(**kwargs)
context['contactusform'] = form
return render(request, self.template_name, context)
class AboutUsView(TemplateView):
template_name = 'account/about-us.html'
class TeamView(TemplateView):
template_name = 'account/team.html'
class HomeView(CommunityBaseView):
pass
class ForgotPasswordView(TemplateView):
form_class = ResetForm
template_name = 'account/forgot-password.html'
def post(self, request, *args, **kwargs):
try:
# get the email inputted
email_inputted = request.POST.get("email")
# query the database if that email exists
user = User.objects.get(email=email_inputted)
# generate a recovery hash for that user
user_hash = UserHasher.gen_hash(user)
user_hash_url = request.build_absolute_uri(
reverse('reset_password', kwargs={'user_hash': user_hash}))
hash_email_context = RequestContext(
request, {'user_hash_url': user_hash_url})
# compose the email
email_compose = SendGrid.compose(
sender='Codango <{}>'.format(CODANGO_EMAIL),
recipient=user.email,
subject='Codango: Password Recovery',
text=loader.get_template(
'account/forgot-password-email.txt'
).render(hash_email_context),
html=loader.get_template(
'account/forgot-password-email.html'
).render(hash_email_context),
)
# send email
email_response = SendGrid.send(email_compose)
# inform the user if mail sent was successful
context = {
"email_status": email_response
}
return render(
request,
'account/forgot-password-status.html',
context
)
except ObjectDoesNotExist:
messages.add_message(
request, messages.ERROR,
'The email specified does not belong to any valid user.')
return render(request, 'account/forgot-password.html')
class ResetPasswordView(View):
def get(self, request, *args, **kwargs):
user_hash = kwargs['user_hash']
user = UserHasher.reverse_hash(user_hash)
if user is not None:
if user.is_active:
request.session['user_pk'] = user.pk
context = {
"password_reset_form": ResetForm(auto_id=True)
}
context.update(csrf(request))
return render(
request,
'account/forgot-password-reset.html',
context
)
else:
messages.add_message(
request, messages.ERROR, 'Account not activated!')
return HttpResponse(
'Account not activated!',
status_code=403,
reason_phrase='You are not allowed to view this\
content because your account is not activated!'
)
else:
raise Http404("User does not exist")
def post(self, request, *args, **kwargs):
password_reset_form = ResetForm(request.POST, auto_id=True)
new_password = request.POST.get("password")
if password_reset_form.is_valid():
try:
user_pk = request.session['user_pk']
user = User.objects.get(pk=user_pk)
user.set_password(new_password)
user.save()
messages.add_message(
request, messages.INFO,
'Your password has been changed successfully!')
return redirect('/')
except ObjectDoesNotExist:
messages.add_message(
request, messages.ERROR,
'You are not allowed to perform this action!')
return HttpResponse('Action not allowed!', status_code=403)
context = {
"password_reset_form": password_reset_form
}
context.update(csrf(request))
return render(request, 'account/forgot-password-reset.html', context)
| mit | -8,443,781,310,264,066,000 | 35.786207 | 79 | 0.559618 | false | 4.732919 | false | false | false |
kjagoo/wger_stark | wger/gym/forms.py | 2 | 3478 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext as _
from wger.core.forms import UserPersonalInformationForm
from wger.utils.widgets import BootstrapSelectMultiple
class GymUserPermisssionForm(forms.ModelForm):
'''
Form used to set the permission group of a gym member
'''
USER = 'user'
GYM_ADMIN = 'admin'
TRAINER = 'trainer'
MANAGER = 'manager'
# Empty default roles, they are always set at run time
ROLES = ()
class Meta:
model = User
fields = ('role',)
role = forms.MultipleChoiceField(choices=ROLES,
initial=USER)
def __init__(self, available_roles=[], *args, **kwargs):
'''
Custom logic to reduce the available permissions
'''
super(GymUserPermisssionForm, self).__init__(*args, **kwargs)
field_choices = [(self.USER, _('User'))]
if 'trainer' in available_roles:
field_choices.append((self.TRAINER, _('Trainer')))
if 'admin' in available_roles:
field_choices.append((self.GYM_ADMIN, _('Gym administrator')))
if 'manager' in available_roles:
field_choices.append((self.MANAGER, _('General manager')))
self.fields['role'] = forms.MultipleChoiceField(choices=field_choices,
initial=User,
widget=BootstrapSelectMultiple())
class GymUserAddForm(GymUserPermisssionForm, UserPersonalInformationForm):
'''
Form used when adding a user to a gym
'''
class Meta:
model = User
widgets = {'role': BootstrapSelectMultiple()}
fields = ('first_name', 'last_name', 'username', 'email', 'role',)
username = forms.RegexField(label=_("Username"),
max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/.//-/_ characters.")})
def clean_username(self):
'''
Since User.username is unique, this check is redundant,
but it sets a nicer error message than the ORM. See #13147.
'''
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("A user with that username already exists."))
| agpl-3.0 | 7,061,158,757,622,213,000 | 36.804348 | 100 | 0.589419 | false | 4.570302 | false | false | false |
CaptFrank/NetworkDeviceMonitor | NetworkMonitor/Base/Resource.py | 1 | 2937 | """
:Resource:
==========
:
This is the managed resource between processes.
Resources such as queues, locks and data are housed
here to allow for synchronization to occur.
:
:copyright: (c) 9/30/2015 by gammaRay.
:license: BSD, see LICENSE for more details.
Author: gammaRay
Version: :1.0:
Date: 9/30/2015
"""
"""
=============================================
Imports
=============================================
"""
import datetime
from uuid import uuid4
"""
=============================================
Constants
=============================================
"""
# Program Attributes
__author__ = "gammaRay"
__version__ = "1.0"
__date__ = "9/28/2015"
# ===========================================
# Types
RESOURCE_DEFAULT = 0
RESOURCE_TEST = 1
"""
=============================================
Source
=============================================
"""
RESOURCE_TYPES = [
"DEFAULT",
"TEST"
]
def add_type(type):
"""
Adds a type to monitor.
"""
RESOURCE_TYPES.append(type)
return
# ===========================================
# Managed Resource
class ManagedResource(object):
"""
This is the wrapper class that is used to combine all
resources into one cohesive object. In this case,
we attribute resources based on tasks and interfaces to the
application.
i.e. Ip motoring task, Arp monitoring task
"""
# Name of the resource
name = None
# Tag for the resource
tag = None
# Tracking
uuid = None
# The resource to manage
resource = None
# Time at which the resource is set
time = None
# The queue name where the resource will be published to.
__queue = None
def __init__(self, name=None, tag=None):
"""
This is the default constructor for the class object.
:param name: Name of the plugin
:param tag: Tag for the resource
:param sync: Synchronization enabled
:return:
"""
# Set the internals of the class
self.name = name
self.tag = tag
self.uuid = str(uuid4())
self.__queue = "{plugin}.{app}".format(
plugin = name,
app = tag
)
return
def setObj(self, obj):
"""
Sets the object in the resource.
:param obj: The object to manage.
:return:
"""
# Set the object
self.resource = obj
# Set the time at which the object is set
self.time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return
def getObj(self):
"""
Gets the object within the resource.
:return:
"""
return self.resource
__obj = property(getObj, setObj)
| gpl-2.0 | 4,229,579,164,839,813,600 | 19.829787 | 77 | 0.470548 | false | 4.456753 | false | false | false |
apavlenko/vmf | cmake-clean.py | 2 | 3100 | #!/usr/bin/env python
"""
Clean cmake generated files.
"""
import optparse
import os
import shutil
import subprocess
import sys
# Do not cleanup anything in these subdirectories.
PRUNE_DIRS = [".svn", ".git", "CVS"]
def make_clean(directory):
"""Run 'make clean' in directory.
Arguments:
- `directory`: target directory
"""
args = [
"make",
"--directory=%s" % directory,
"--quiet",
"clean"
]
process = subprocess.Popen(args)
return process.wait()
def clean(
directory
):
"""Clean cmake files.
Arguments:
- `directory`: target directory
"""
# Toplevel files.
for filename in [
"CMakeCache.txt",
"CPackConfig.cmake",
"CPackSourceConfig.cmake",
"install_manifest.txt"
]:
pathname = os.path.join(directory, filename)
if os.path.exists(pathname):
os.remove(pathname)
# Toplevel directories.
for dirname in ["_CPack_Packages"]:
pathname = os.path.join(directory, dirname)
if os.path.exists(pathname):
shutil.rmtree(pathname)
# CMakeFiles, Makefile, cmake_install.cmake.
for dirpath, dirnames, filenames in os.walk(directory):
# Prune subdirs.
for dirname in dirnames:
if dirname in PRUNE_DIRS:
dirnames.remove(dirname)
if "CMakeFiles" in dirnames:
for filename in ["Makefile", "cmake_install.cmake"]:
if filename in filenames:
pathname = os.path.join(dirpath, filename)
if os.path.exists(pathname):
os.remove(pathname)
shutil.rmtree(os.path.join(dirpath, "CMakeFiles"))
dirnames.remove("CMakeFiles")
# Remove empty directories. The "repeat" construct is needed
# because the dirnames list for the parent is generated before the
# parent is processed. When a directory is removed, there is no
# way to remove it from the parent's dirnames list. Note that
# setting topdown=False will not help here, and it complicates the
# pruning logic.
repeat = True
while repeat:
repeat = False
for dirpath, dirnames, filenames in os.walk(directory):
# We must check for emptiness before pruning. Otherwise
# we could try to remove a directory that contains only
# prunable subdirs.
if len(dirnames) == 0 and len(filenames) == 0:
os.rmdir(dirpath)
repeat = True
# Prune subdirs.
for dirname in dirnames:
if dirname in PRUNE_DIRS:
dirnames.remove(dirname)
def main():
"""main"""
option_parser = optparse.OptionParser(
usage="usage: %prog [DIR...]\n" +
" Clean cmake generated files."
)
(_, args) = option_parser.parse_args()
if len(args) == 0:
args.append(".")
for arg in args:
#make_clean(arg)
clean(arg)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -3,668,471,455,453,956,600 | 25.724138 | 70 | 0.57871 | false | 4.229195 | false | false | false |
franklingu/leetcode-solutions | questions/design-search-autocomplete-system/Solution.py | 1 | 1350 | """
None
"""
from collections import defaultdict
class TrieNode:
def __init__(self):
self.children = {}
self.words = defaultdict(int)
class AutocompleteSystem:
def __init__(self, sentences: List[str], times: List[int]):
self.ss = ''
self.root = TrieNode()
for sentence, time in zip(sentences, times):
self.add_input(sentence, time)
def add_input(self, sentence, time):
curr = self.root
for ch in sentence:
if ch not in curr.children:
curr.children[ch] = TrieNode()
curr = curr.children[ch]
curr.words[sentence] += time
def input(self, c: str) -> List[str]:
if c != '#':
self.ss = self.ss + c
else:
self.add_input(self.ss, 1)
self.ss = ''
return []
curr = self.root
for ch in self.ss:
if ch not in curr.children:
curr = None
break
curr = curr.children[ch]
ret = []
if curr is not None:
ret = [x[0] for x in list(sorted(curr.words.items(), key=lambda x: (-x[1], x[0])))[:3]]
return ret
# Your AutocompleteSystem object will be instantiated and called as such:
# obj = AutocompleteSystem(sentences, times)
# param_1 = obj.input(c) | mit | 195,136,025,544,930,530 | 24.980769 | 99 | 0.527407 | false | 3.824363 | false | false | false |
richarddunks/ll84 | ll84_proc.py | 1 | 3453 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A script to process data from LL84 energy and water use benchmarking from
http://www.nyc.gov/html/gbee/html/plan/ll84_scores.shtml
Manually removed column 29 "OwnerName", column 62 "HistDist, and column 63 "Landmark" due to
formatting errors that caused the file to be parsed incorrectly. They aren't germane to this analysis
"""
import pandas as pd
# download file from internet
# open file
mnplutoFile = open('mn13v1.csv','r')
# iterate and extract
mnplutoList = []
for line in mnplutoFile:
lineSplit = line.split(",")
# remove duplicates
# remove outliers
# Fields of interest:
# BBL 65
# BldgFront 45
# BldgDepth 46
# NumFloors 40
# NumBldgs 39
# BldgArea 29
next(mnplutoFile) #dump header
for line in mnplutoFile:
lineSplit = line.split(",")
cat_bbl = lineSplit[0].strip() + lineSplit[1].strip() + lineSplit[2].strip()
plutoLineLength.append([cat_bbl, len(lineSplit)])
for line in mnplutoFile:
lineSplit = line.split(",")
mnplutoList.append([ lineSplit[68].strip(), lineSplit[44].strip(), lineSplit[45].split(), lineSplit[52].strip() ])
# Borough 0
# Block 1
# Lot 2
# CD 3
# CT2010 4
# CB2010 5
# SchoolDist 6
# Council 7
# ZipCode 8
# FireComp 9
# PolicePrct 10
# Address 11
# ZoneDist1 12
# ZoneDist2 13
# ZoneDist3 14
# ZoneDist4 15
# Overlay1 16
# Overlay2 17
# SPDist1 18
# SPDist2 19
# LtdHeight 20
# AllZoning1 21
# AllZoning2 22
# SplitZone 23
# BldgClass 24
# LandUse 25
# Easements 26
# OwnerType 27
# LotArea 28
# BldgArea 29
# ComArea 30
# ResArea 31
# OfficeArea 32
# RetailArea 33
# GarageArea 34
# StrgeArea 35
# FactryArea 36
# OtherArea 37
# AreaSource 38
# NumBldgs 39
# NumFloors 40
# UnitsRes 41
# UnitsTotal 42
# LotFront 43
# LotDepth 44
# BldgFront 45
# BldgDepth 46
# Ext 47
# ProxCode 48
# IrrLotCode 49
# LotType 50
# BsmtCode 51
# AssessLand 52
# AssessTot 53
# ExemptLand 54
# ExemptTot 55
# YearBuilt 56
# BuiltCode 57
# YearAlter1 58
# YearAlter2 59
# BuiltFAR 60
# ResidFAR 61
# CommFAR 62
# FacilFAR 63
# BoroCode 64
# BBL 65
# CondoNo 66
# Tract2010 67
# XCoord 68
# YCoord 69
# ZoneMap 70
# ZMCode 71
# Sanborn 72
# TaxMap 73
# EDesigNum 74
# APPBBL 75
# APPDate 76
# PLUTOMapID 77
# Version 78
#### Old List ####
# Borough 0
# Block 1
# Lot 2
# CD 3
# CT2010 4
# CB2010 5
# SchoolDist 6
# Council 7
# ZipCode 8
# FireComp 9
# PolicePrct 10
# Address 11
# ZoneDist1 12
# ZoneDist2 13
# ZoneDist3 14
# ZoneDist4 15
# Overlay1 16
# Overlay2 17
# SPDist1 18
# SPDist2 19
# LtdHeight 20
# AllZoning1 21
# AllZoning2 22
# SplitZone 23
# BldgClass 24
# LandUse 25
# Easements 26
# OwnerType 27
# OwnerName 28
# LotArea 29
# BldgArea 30
# ComArea 31
# ResArea 32
# OfficeArea 33
# RetailArea 34
# GarageArea 35
# StrgeArea 36
# FactryArea 37
# OtherArea 38
# AreaSource 39
# NumBldgs 40
# NumFloors 41
# UnitsRes 42
# UnitsTotal 43
# LotFront 44
# LotDepth 45
# BldgFront 46
# BldgDepth 47
# Ext 48
# ProxCode 49
# IrrLotCode 50
# LotType 51
# BsmtCode 52
# AssessLand 53
# AssessTot 54
# ExemptLand 55
# ExemptTot 56
# YearBuilt 57
# BuiltCode 58
# YearAlter1 59
# YearAlter2 60
# HistDist 61
# Landmark 62
# BuiltFAR 63
# ResidFAR 64
# CommFAR 65
# FacilFAR 66
# BoroCode 67
# BBL 68
# CondoNo 69
# Tract2010 70
# XCoord 71
# YCoord 72
# ZoneMap 73
# ZMCode 74
# Sanborn 75
# TaxMap 76
# EDesigNum 77
# APPBBL 78
# APPDate 79
# PLUTOMapID 80
# Version 81 | gpl-2.0 | 8,096,575,113,988,458,000 | 15.447619 | 118 | 0.692152 | false | 2.43169 | false | false | false |
Endika/c2c-rd-addons | account_financial_report_chricar/wizard/wizard_general_ledger_report.py | 5 | 7759 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
#import pooler
import locale
import time
from openerp.tools.translate import _
account_form = '''<?xml version="1.0"?>
<form string="Select parent account">
<field name="account_list" colspan="4"/>
</form>'''
account_fields = {
'account_list': {'string':'Account', 'type':'many2many', 'relation':'account.account', 'required':True ,'domain':[]},
}
period_form = '''<?xml version="1.0"?>
<form string="Select Date-Period">
<field name="company_id" colspan="4"/>
<newline/>
<field name="fiscalyear"/>
<label colspan="2" string="(Keep empty for all open fiscal years)" align="0.0"/>
<newline/>
<field name="display_account" required="True"/>
<field name="sortbydate" required="True"/>
<field name="landscape"/>
<field name="amount_currency"/>
<field name="initial_balance"/>
<newline/>
<separator string="Filters" colspan="4"/>
<field name="state" required="True"/>
<newline/>
<group attrs="{'invisible':[('state','=','none')]}" colspan="4">
<group attrs="{'invisible':[('state','=','byperiod')]}" colspan="4">
<separator string="Date Filter" colspan="4"/>
<field name="date_from"/>
<field name="date_to"/>
</group>
<group attrs="{'invisible':[('state','=','bydate')]}" colspan="4">
<separator string="Filter on Periods" colspan="4"/>
<field name="periods" colspan="4" nolabel="1"/>
</group>
</group>
</form>'''
period_fields = {
'company_id': {'string': 'Company', 'type': 'many2one', 'relation': 'res.company', 'required': True},
'state':{
'string':"Date/Period Filter",
'type':'selection',
'selection':[('bydate','By Date'),('byperiod','By Period'),('all','By Date and Period'),('none','No Filter')],
'default': lambda *a:'none'
},
'fiscalyear': {'string': 'Fiscal year', 'type': 'many2one', 'relation': 'account.fiscalyear',
'help': 'Keep empty for all open fiscal year'},
'periods': {'string': 'Periods', 'type': 'many2many', 'relation': 'account.period', 'help': 'All periods if empty'},
'sortbydate':{'string':"Sort by", 'type':'selection', 'selection':[('sort_date','Date'),('sort_mvt','Movement')]},
'display_account':{'string':"Display accounts ", 'type':'selection', 'selection':[('bal_mouvement','With movements'),('bal_all','All'),('bal_solde','With balance is not equal to 0')]},
'landscape':{'string':"Landscape Mode", 'type':'boolean'},
'initial_balance':{'string':"Show initial balances", 'type':'boolean'},
'amount_currency':{'string':"With Currency", 'type':'boolean'},
'date_from': {'string':"Start date", 'type':'date', 'required':True, 'default': lambda *a: time.strftime('%Y-01-01')},
'date_to': {'string':"End date", 'type':'date', 'required':True, 'default': lambda *a: time.strftime('%Y-%m-%d')},
}
class wizard_report(wizard.interface):
def _get_defaults(self, cr, uid, data, context={}):
user = pooler.get_pool(cr.dbname).get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
company_id = user.company_id.id
else:
company_id = pooler.get_pool(cr.dbname).get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
data['form']['company_id'] = company_id
fiscalyear_obj = pooler.get_pool(cr.dbname).get('account.fiscalyear')
data['form']['fiscalyear'] = fiscalyear_obj.find(cr, uid)
# Better allow users to set theirs defaults
#periods_obj=pooler.get_pool(cr.dbname).get('account.period')
#data['form']['periods'] =periods_obj.search(cr, uid, [('fiscalyear_id','=',data['form']['fiscalyear'])])
#data['form']['display_account']='bal_all'
#data['form']['sortbydate'] = 'sort_date'
#data['form']['landscape']=True
#data['form']['amount_currency'] = True
data['form']['context'] = context
return data['form']
def _check_path(self, cr, uid, data, context):
if data['model'] == 'account.account':
return 'checktype'
else:
return 'account_selection'
def _check(self, cr, uid, data, context):
if data['form']['landscape']==True:
return 'report_landscape'
else:
return 'report'
def _check_date(self, cr, uid, data, context):
sql = """SELECT f.id, f.date_start, f.date_stop
FROM account_fiscalyear f
WHERE '%s' between f.date_start and f.date_stop """ % (data['form']['date_from'])
cr.execute(sql)
res = cr.dictfetchall()
if res:
if (data['form']['date_to'] > res[0]['date_stop'] or data['form']['date_to'] < res[0]['date_start']):
raise wizard.except_wizard(_('UserError'),_('Date to must be set between %s and %s') % (str(res[0]['date_start']), str(res[0]['date_stop'])))
else:
return 'checkreport'
else:
raise wizard.except_wizard(_('UserError'),_('Date not in a defined fiscal year'))
def _check_state(self, cr, uid, data, context):
if data['form']['state'] == 'bydate':
self._check_date(cr, uid, data, context)
# data['form']['fiscalyear'] = 0
# else :
# data['form']['fiscalyear'] = 1
return data['form']
states = {
'init': {
'actions': [],
'result': {'type':'choice','next_state':_check_path}
},
'account_selection': {
'actions': [],
'result': {'type':'form', 'arch':account_form,'fields':account_fields, 'state':[('end','Cancel','gtk-cancel'),('checktype','Next','gtk-go-forward')]}
},
'checktype': {
'actions': [_get_defaults],
'result': {'type':'form', 'arch':period_form, 'fields':period_fields, 'state':[('end','Cancel','gtk-cancel'),('checkreport','Print','gtk-print')]}
},
'checkreport': {
'actions': [],
'result': {'type':'choice','next_state':_check}
},
'report_landscape': {
'actions': [_check_state],
'result': {'type':'print', 'report':'account.general.ledger.cumulative.landscape', 'state':'end'}
},
'report': {
'actions': [_check_state],
'result': {'type':'print', 'report':'account.general.ledger.cumulative', 'state':'end'}
}
}
wizard_report('account.general.ledger.cumulative.report')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,547,208,066,806,900,700 | 42.836158 | 188 | 0.566826 | false | 3.757385 | false | false | false |
andela/codango | codango/community/tests/test_models.py | 1 | 2484 | from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase
from community.models import AddOn, Community
from factories import AddOnFactory, CommunityFactory, UserFactory
class AddonModelTestSuite(TestCase):
def setUp(self):
self.addon = AddOnFactory.build()
self.community = CommunityFactory()
self.addon = AddOn.objects.create(name=self.addon.name)
def test_can_create_addon(self):
self.assertIsNotNone(self.addon.id)
self.assertIsNotNone(self.addon.name)
def test_can_read_addon(self):
addon = AddOn.objects.get(name=self.addon.name)
self.assertIsInstance(addon, AddOn)
def test_can_update_addon(self):
addon = AddOn.objects.get(name=self.addon.name)
addon.name = "analytics"
addon.save()
addon = AddOn.objects.get(name="analytics")
self.assertEqual(addon.id, self.addon.id)
self.assertEqual(addon.name, "analytics")
def test_can_delete_addon(self):
addon = AddOn.objects.get(name=self.addon.name)
addon.delete()
self.assertRaises(
ObjectDoesNotExist,
AddOn.objects.get,
pk=self.addon.id
)
def test_can_read_community_from_addon(self):
self.addon.communities.add(self.community)
addon = AddOn.objects.get(name=self.addon.name)
community = addon.communities.get(name=self.community.name)
self.assertIsInstance(community, Community)
def test_can_add_community_to_addon(self):
self.addon.communities.add(self.community)
community = self.addon.communities.get(name=self.community.name)
self.assertEqual(community, self.community)
def test_can_add_multiple_communities_to_addon(self):
self.addon.communities.add(self.community)
self.community2 = CommunityFactory.create(name='community2')
self.addon.communities.add(self.community2)
self.assertEqual(self.addon.communities.all().count(), 2)
def test_can_remove_addon_from_community(self):
self.addon.communities.add(self.community)
self.assertEqual(self.addon.communities.all().count(), 1)
self.addon.communities.remove(self.community)
self.assertEqual(self.addon.communities.all().count(), 0)
def test_add_invalid_object_to_addon(self):
self.assertRaises(
TypeError,
self.addon.communities.add,
UserFactory()
)
| mit | -36,654,392,462,678,070 | 36.074627 | 72 | 0.674316 | false | 3.798165 | true | false | false |
zfkl/analyses-de-donn-es | sendToVisuClient.py | 1 | 1596 | """
Allows to handle websockets,
maintains clients list
send json to web browser
"""
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.options import define, options, parse_command_line
define("port", default=443, help="run on the given port", type=int)
# we gonna store clients in dictionary..
clients = set()
class IndexHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
#self.write("This is your response")
self.render("visu.html")
print "MAIN HANDLER"
print self.request
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self, *args):
print"SOCKET HANDLER"
print self.request
clients.add(self)
print" clients size is {}".format(len(clients))
def on_message(self, message):
"""
when we receive some message we send json to visu client
"""
print "Client %s received a message : %s" % (self, message)
print "envoyer ceci pour les dashboards dynamiques"
for c in clients:
c.write_message(message)
#clients[self.id] = {"id": self.id, "object": self}
print message
def on_close(self):
print "close"
print" clients size is {}".format(len(clients))
if self in clients:
clients.remove(self)
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/websocket', WebSocketHandler),
])
if __name__ == '__main__':
parse_command_line()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| gpl-3.0 | 7,353,359,219,174,872,000 | 24.741935 | 67 | 0.640351 | false | 3.864407 | false | false | false |
vagonbar/GNUnetwork | gwn/gwnevents/gwnevent_dc.py | 1 | 2850 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of GNUWiNetwork,
# Copyright (C) 2014 by
# Pablo Belzarena, Gabriel Gomez Sena, Victor Gonzalez Barbone,
# Facultad de Ingenieria, Universidad de la Republica, Uruguay.
#
# GNUWiNetwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GNUWiNetwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNUWiNetwork. If not, see <http://www.gnu.org/licenses/>.
#
'''Dictionary of valid nicknames and attribute values for each nickname.
This module provides a dictionary of valid nicknames for events, and the attributes corresponding to each nickname.
@var ev_dc_nicknames: a dictionary {nichname: (ev_type, ev_subtype, ev_class)} to determine type, subtype and event class corresponding to a certain nickname.
'''
from gwnevent import * #Event, EventNameException
timer_dc = { \
'TimerTOH' : ('Timer', 'TOH', EventTimer ), \
'TimerTOC' : ('Timer', 'TOC', EventTimer ), \
'TimerTOR1' : ('Timer', 'TOR1', EventTimer ), \
'TimerTOR2' : ('Timer', 'TOR2', EventTimer ), \
'TimerTimer' : ('Timer', 'Timer', EventTimer ), \
'TimerCTSTout' : ('Timer', 'CTSTout', EventTimer ), \
'TimerRTSAbort' : ('Timer', 'CTSTout', EventTimer ), \
'TimerACKTout' : ('Timer', 'ACKTout', EventTimer ), \
'TimerDataAbort' : ('Timer', 'ACKTout', EventTimer ) \
}
config_dc = { \
'TimerConfig' : ('Request', 'SetTimerConfig', EventConfig), \
'EventConsumerStatus' : ('Config', 'EventConsumerStatus', EventConfig) \
}
# change type to Config in all nicknames!
data_dc = { \
'DataIn' : ('Data', 'DataIn', EventComm ), \
'DataOut' : ('Data', 'DataOut', EventComm ) \
}
ctrl_dc = { \
#'Nickname' : ('Ctrl', 'SubType', Event ), \
#'Nickname' : ('Ctrl', 'SubType', Event ) \
}
mgmt_dc = { \
#'Nickname' : ('Mgmt', 'SubType', Event ), \
#'Nickname' : ('Mgmt', 'SubType', Event ) \
}
ev_dc_nicknames = {}
all_dics = [timer_dc, data_dc, ctrl_dc, mgmt_dc, config_dc]
for dic in all_dics:
ev_dc_nicknames.update(dic)
# TODO: write a function check_dics() to verify a nickname is unique,
# i.e. not in two different dictionaries in all_dics.
| gpl-3.0 | -2,478,899,628,620,671,500 | 36.5 | 158 | 0.614035 | false | 3.177258 | true | false | false |
ocadotechnology/django-nuit | example_project/demo/views.py | 1 | 1347 | from django.shortcuts import render
from nuit.views import SearchableListView
from .models import Publisher
from .forms import PublisherForm
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
class MyListView(SearchableListView):
model = Publisher
template_name = 'demo/list_view.html'
paginate_by = 15
search_fields = ('name', ('address', 'iexact'))
def kitchen_sink(request):
messages.set_level(request, messages.DEBUG)
messages.add_message(request, messages.DEBUG, 'Debug Message')
messages.add_message(request, messages.INFO, 'Info Message')
messages.add_message(request, messages.SUCCESS, 'Success Message')
messages.add_message(request, messages.WARNING, 'Warning Message')
messages.add_message(request, messages.ERROR, 'Error Message')
return render(request, 'demo/kitchensink.html', {})
def test_form(request):
if request.method == 'POST':
form = PublisherForm(request.POST)
if form.is_valid():
pass
else:
form = PublisherForm()
return render(request, 'demo/forms.html', {'form': form, 'data': 'Data'})
def error(request, code='400'):
return render(request, 'nuit/generic/%s.html' % code, {}, status=code)
@permission_required('does.not.exist')
def no_access(request):
return 'Go Away'
| apache-2.0 | 8,994,372,609,238,851,000 | 35.405405 | 77 | 0.709725 | false | 3.700549 | false | false | false |
hammerlab/mhctools | mhctools/netmhc3.py | 1 | 1756 | # Copyright (c) 2015-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base_commandline_predictor import BaseCommandlinePredictor
from .parsing import parse_netmhc3_stdout
class NetMHC3(BaseCommandlinePredictor):
def __init__(
self,
alleles,
program_name="netMHC",
default_peptide_lengths=[9]):
BaseCommandlinePredictor.__init__(
self,
program_name=program_name,
alleles=alleles,
parse_output_fn=parse_netmhc3_stdout,
# NetMHC just expects the first arg to be an input FASTA
input_file_flag="",
# NetMHC doesn't have the ability to use a custom
# temporary directory
tempdir_flag=None,
length_flag="--peplen",
allele_flag="--mhc",
extra_flags=["--nodirect"],
supported_alleles_flag="-A",
# because we don't have a tempdir flag, can't run more than
# one predictor at a time
process_limit=1,
default_peptide_lengths=default_peptide_lengths,
group_peptides_by_length=True)
| apache-2.0 | -303,155,215,597,512,100 | 38.909091 | 74 | 0.646355 | false | 4.074246 | false | false | false |
damoguyan8844/fabric-bolt | fabric_bolt/web_hooks/views.py | 10 | 2284 | import datetime
from django.http import HttpResponseRedirect
from django.db.models.aggregates import Count
from django.contrib import messages
from django.views.generic import CreateView, UpdateView, DetailView, DeleteView
from django.core.urlresolvers import reverse_lazy, reverse
from django_tables2 import RequestConfig, SingleTableView
from fabric_bolt.core.mixins.views import MultipleGroupRequiredMixin
from fabric_bolt.web_hooks import forms, tables, models
class HookList(SingleTableView):
"""
Hook List page
"""
table_class = tables.HookTable
model = models.Hook
class HookCreate(MultipleGroupRequiredMixin, CreateView):
"""
Create a new project
"""
group_required = ['Admin', 'Deployer', ]
model = models.Hook
form_class = forms.HookCreateForm
template_name_suffix = '_create'
def get_initial(self):
initial = super(HookCreate, self).get_initial()
initial['project'] = self.kwargs.get('project_id')
return initial
def form_valid(self, form):
"""After the form is valid lets let people know"""
ret = super(HookCreate, self).form_valid(form)
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Hook %s created' % self.object.url)
return ret
class HookDetail(DetailView):
"""
Display the Project Detail/Summary page: Configurations, Stages, and Deployments
"""
model = models.Hook
class HookUpdate(MultipleGroupRequiredMixin, UpdateView):
"""
Update a project
"""
group_required = ['Admin', 'Deployer', ]
model = models.Hook
form_class = forms.HookUpdateForm
template_name_suffix = '_update'
# success_url = reverse_lazy('projects_project_list')
class HookDelete(MultipleGroupRequiredMixin, DeleteView):
"""
Deletes a project by setting the Project's date_deleted. We save projects for historical tracking.
"""
group_required = ['Admin', ]
model = models.Hook
def delete(self, request, *args, **kwargs):
self.success_url = self.get_object().get_absolute_url()
messages.add_message(request, messages.WARNING, 'Hook Successfully Deleted')
return super(HookDelete, self).delete(request, *args, **kwargs)
| mit | -2,128,097,468,239,109,400 | 25.252874 | 102 | 0.69352 | false | 4.064057 | false | false | false |
MeGotsThis/BotGotsThis | lib/cache/_bttv_api.py | 1 | 3085 | import asyncio
import json
from typing import Awaitable, Dict, List, Optional, Tuple # noqa: F401
from ._abc import AbcCacheStore
from ..api import bttv
class BetterTwitchTvApisMixin(AbcCacheStore):
def _bttvGlobalEmoteKey(self) -> str:
return 'emote:bttv'
async def bttv_load_global_emotes(self, *, background: bool=False) -> bool:
key: str = self._bttvGlobalEmoteKey()
ttl: int = await self.redis.ttl(key)
if ttl >= 30 and background:
return True
if ttl >= 0 and not background:
return True
emotes: Optional[Dict[str, str]]
emotes = await bttv.getGlobalEmotes()
if emotes is None:
return False
await self.bttv_save_global_emotes(emotes)
return True
async def bttv_save_global_emotes(self, emotes: Dict[str, str]) -> bool:
await self.redis.setex(self._bttvGlobalEmoteKey(), 3600,
json.dumps(emotes))
return True
async def bttv_get_global_emotes(self) -> Optional[Dict[str, str]]:
key: str = self._bttvGlobalEmoteKey()
value: Optional[str] = await self.redis.get(key)
if value is None:
return None
return json.loads(value)
def _bttvBroadcasterEmoteKey(self, broadcaster: str) -> str:
return f'emote:bttv:{broadcaster}'
async def bttv_load_broadcaster_emotes(self, broadcaster: str, *,
background: bool=False) -> bool:
key: str = self._bttvBroadcasterEmoteKey(broadcaster)
ttl: int = await self.redis.ttl(key)
if ttl >= 30 and background:
return True
if ttl >= 0 and not background:
return True
emotes: Optional[Dict[str, str]] = {}
emotes = await bttv.getBroadcasterEmotes(broadcaster)
if emotes is None:
return False
await self.bttv_save_broadcaster_emotes(broadcaster, emotes)
return True
async def bttv_save_broadcaster_emotes(self, broadcaster: str,
emotes: Dict[str, str]) -> bool:
await self.redis.setex(self._bttvBroadcasterEmoteKey(broadcaster),
3600, json.dumps(emotes))
return True
async def bttv_get_broadcaster_emotes(self, broadcaster: str
) -> Optional[Dict[str, str]]:
key: str = self._bttvBroadcasterEmoteKey(broadcaster)
value: Optional[str] = await self.redis.get(key)
if value is None:
return None
return json.loads(value)
async def bttv_get_cached_broadcasters(self) -> Dict[str, int]:
keys: List[str] = []
cur: str = '0'
ckeys: List[str]
while cur:
cur, ckeys = await self.redis.scan(cur, match='emote:bttv:*')
keys.extend(ckeys)
ttlValues: Tuple[int, ...] = await asyncio.gather(
*[self.redis.ttl(key) for key in keys]
)
return {key[10:]: ttl for key, ttl in zip(keys, ttlValues)}
| gpl-3.0 | -1,112,745,212,235,990,500 | 37.08642 | 79 | 0.585737 | false | 3.450783 | false | false | false |
spacecowboy/jkutils | jkutils/misc.py | 1 | 7628 | """
Functions which do not sort well under another name. Will all be imported
by __init__.py to package level.
"""
from __future__ import division
import numpy as np
import pandas as pd
def split_dataframe_class_columns(df, upper_lim=5, lower_lim=3, int_only=True):
'''
Splits columns of a dataframe where rows can only take a limited
amount of valid values, into seperate columns
for each observed value. The result is a number of columns which are
exclusive with each other: only one can be 1 at any time.
Parameters:
- df, pandas dataframe to work with
- upper_lim, only consider columns with less unique values (default 5)
- lower_lim, only consider equal or more unique values (default 3)
- int_only, if True only include columns with all integers
Returns:
A new pandas dataframe with the same columns as df, except those columns
which have been split.
Note: This function preserves NaNs.
'''
ndf = pd.DataFrame()
for col in df.columns:
uniques = np.unique(df[col])
# Dont count nans as unique values
nans = np.isnan(uniques)
uniques = uniques[~nans]
# If class variable
if ((len(uniques) >= lower_lim and len(uniques) < upper_lim) and
(not int_only or np.all(uniques.astype(int) == uniques))):
# Split it, one col for each unique value
for val in uniques:
# A human-readable name
ncol = "{}{}".format(col, val)
# Set values
ndf[ncol] = np.zeros_like(df[col])
ndf[ncol][df[col] == val] = 1
# Also transfer NaNs
ndf[ncol][df[col].isnull()] = np.nan
else:
# Not a class variable
ndf[col] = df[col]
return ndf
def replace_dataframe_nans(df, binary_median=False):
'''
Replaces the NaNs of a pandas dataframe with
the mean of the column, in case of continuous
values. If the column is binary, it can be replaced
with the median value if desired.
Parameters:
- df, the dataframe to replace NaNs in
'''
for col in df.columns:
uniques = np.unique(df[col])
# Dont count nans as unique values
nans = np.isnan(uniques)
uniques = uniques[~nans]
nans = np.isnan(df[col])
if binary_median and len(uniques) == 2:
# Binary, use median
df[col][nans] = df[col].median()
else:
# Use mean
df[col][nans] = df[col].mean()
def normalize_dataframe(dataframe, cols=None, binvals=None):
'''
Normalize a pandas dataframe. Binary values are
forced to (-1,1), and continuous (the rest) variables
are forced to zero mean and standard deviation = 1
Parameters:
- dataframe, the pandas dataframe to normalize column-wise
- cols, (optional iterable) the column names in the dataframe to normalize.
- binvals, (default (0,1)) tuple giving the (min,max) binary values to use.
Note: this function preserves NaNs.
'''
if cols is None:
cols = dataframe.columns
if binvals is None:
binvals = (-1, 1)
for col in cols:
# Check if binary
uniques = np.unique(dataframe[col])
if len(uniques) == 2:
# Binary, force into 0 and 1
mins = dataframe[col] == np.min(uniques)
maxs = dataframe[col] == np.max(uniques)
dataframe[col][mins] = binvals[0]
dataframe[col][maxs] = binvals[1]
else:
# Can still be "binary"
if len(uniques) == 1 and (uniques[0] == 0 or uniques[0] == 1):
# Yes, single binary value
continue
# Continuous, zero mean with 1 standard deviation
mean = dataframe[col].mean()
std = dataframe[col].std()
dataframe[col] -= mean
# Can be single value
if std > 0:
dataframe[col] /= std
def normalize_array(array, cols):
'''
Normalize a numpy array. Binary values are
forced to 0-1, and continuous (the rest) variables
are forced to zero mean and standard deviation = 1
Parameters:
- array, the array to normalize column-wise
- cols, (iterable) the column indices in the array to normalize.
'''
for col in cols:
# Check if binary
uniques = np.unique(array[col])
if len(uniques) == 2:
# Binary, force into 0 and 1
mins = array[col] == np.min(uniques)
maxs = array[col] == np.max(uniques)
array[mins, col] = 0
array[maxs, col] = 1
else:
# Can still be "binary"
if len(uniques) == 1 and (uniques[0] == 0 or uniques[0] == 1):
# Yes, single binary value
continue
# Continuous, zero mean with 1 standard deviation
mean = array[col].mean()
std = array[col].std()
array[col] -= mean
# Can be single value
if std > 0:
array[col] /= std
def sample_wr(population, k):
'''Selects k random elements (with replacement) from a population.
Returns an array of indices.
'''
return np.random.randint(0, len(population), k)
def bagging(data, count=None):
'''Samples len elements (with replacement) from data and returns a view of those elements.'''
if count is None:
count = len(data)
return data[np.random.randint(0, len(data), count)]
def bagging_stratified(data, column, count=None):
'''Samples with replacement from the data set but guarantees that
the ratio of values in column remains the same.
Column is expected to be a binary column (any two values)
'''
vals = np.unique(data[:, column])
if len(vals) != 2:
raise ValueError("Column {} is not a binary column. Number of values are: {}".format(column, len(vals)))
group1 = data[data[:, column] == vals[0]]
group2 = data[data[:, column] == vals[1]]
if count is None:
count = len(data)
count1 = int(round(len(group1)*count/len(data)))
count2 = int(round(len(group2)*count/len(data)))
retval = np.append(bagging(group1, count1), bagging(group2, count2), axis=0)
np.random.shuffle(retval)
return retval
def divide_stratified(data, column, frac):
'''Divides the data set in two pieces, one being frac*len(data).
Stratifies for the designated column to guarantee that the ratio
remains the same. Column must be binary but can have any values.
Returns (example frac=1/3) a tuple which has two lists of indices:
(subdata of size 2/3, subdata of size 1/3)
'''
if (frac <= 0 or frac >= 1):
raise ValueError("Frac must be a fraction between 0 and 1, not: {}".format(frac))
vals = np.unique(data[:, column])
if len(vals) != 2:
raise ValueError("Column {} is not a binary column. Number of values are: {}".format(column, len(vals)))
idx = np.arange(0, len(data))
np.random.shuffle(idx)
group1 = idx[data[:, column] == vals[0]]
group2 = idx[data[:, column] == vals[1]]
group1_num = int(round(frac*len(group1)))
group2_num = int(round(frac*len(group2)))
group1_test = group1[:group1_num]
group1_trn = group1[group1_num:]
group2_test = group2[:group2_num]
group2_trn = group2[group2_num:]
trn = np.append(group1_trn, group2_trn)
test = np.append(group1_test, group2_test)
np.random.shuffle(trn)
np.random.shuffle(test)
return (trn, test)
| gpl-2.0 | -533,927,163,518,691,100 | 31.459574 | 112 | 0.597929 | false | 3.789369 | true | false | false |
rickyrem/garrulous-api | model/Users.py | 1 | 6046 | # Garrulous API
# Authors: Michael Pierre and Richard Meyers
"""
Copyright (C) 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import collections
import hashlib
import time
import logging
import pprint
from Database import Database
class Users(Database):
def __init__(self):
super(Users, self).__init__()
# Create user table is not exist.
def createIfNotExists(self):
self.write("""CREATE TABLE IF NOT EXISTS `users` (
`uid` INTEGER PRIMARY KEY AUTOINCREMENT,
`username` TEXT,
`first_name` TEXT,
`last_name` TEXT,
`email` TEXT,
`phone` TEXT,
`password` TEXT,
`date_joined` INTEGER
)""")
# Create
# Create New User
def createUser(self, user_name="", password="", first_name="", last_name="", email="", phone=""):
#This time is the date they joined
times = int(time.time())
hash = hashlib.md5(password)
hashword = hash.hexdigest()
if self.username_exists(user_name):
return "Username already taken"
if self.write("INSERT INTO users (username,first_name,last_name,email,password,phone,date_joined) "
"VALUES (?,?,?,?,?,?,?) ", (user_name, first_name, last_name, email, hashword,
phone, times)):
return True
return False
def updateUserByUid(self, uid, user_name=None, password=None, first_name=None, last_name=None, email=None,
phone=None):
# This needs to build the query out of the amount of parameters that exist. That way a all the existing
# data doesn't get overwritten.\
success = True
if email:
if not self.write('UPDATE users SET email=? '
'WHERE uid=?', (email, uid)):
success = False
if last_name:
if self.write('UPDATE users SET last_name=? '
'WHERE uid=?', (last_name, uid)):
success = False
if first_name:
if self.write('UPDATE users SET first_name=? '
'WHERE uid=?', (first_name, uid)):
success = False
if password:
hash = hashlib.md5(password)
hashword = hash.hexdigest()
if self.write('UPDATE users SET password=? '
'WHERE uid=?', (hashword, uid)):
success = False
if user_name:
if self.write('UPDATE users SET user_name=? '
'WHERE uid=?', (user_name, uid)):
success = False
return success
def authenticateUser(self, user_name="", password="", phone="", email=""):
hash = hashlib.md5(password)
hashword = hash.hexdigest()
# This gets the one row and returns only the first column
try:
rows = self.queryOne("SELECT uid FROM users WHERE username = ? and password = ?", (user_name, hashword))[0]
except TypeError:
return False
return rows
def username_exists(self, username):
"""
Check if this username exists already.
:param username:
:return:
"""
try:
ifexist = self.queryOne("SELECT username FROM users WHERE username = ?", (username,))[0]
except TypeError:
return False
if ifexist:
return True
return False
# Read All Users
def getUsers(self):
# We are not returning all the rows
# We definitely don't want to return the password column, that is only used for auth.
# There should be the option of passing in the row quantity.
rows = self.query("SELECT uid, username, first_name, last_name, email FROM users")
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
d['email'] = row[4]
objects_list.append(d)
return objects_list
def getUsersLike(self, search):
search = "%" + search + "%"
rows = self.query("SELECT uid, username, first_name, last_name FROM users WHERE username LIKE ? OR first_name LIKE ? OR last_name LIKE ? LIMIT 20", (search, search, search,))
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
objects_list.append(d)
return objects_list
# Read User Information By User ID.
def getUserByUID(self, uid):
uid = str(uid)
row = self.queryOne("SELECT uid, username, first_name, last_name, email FROM users WHERE uid=?", (uid))
d = {}
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
d['email'] = row[4]
return d
# Read User Information By Username.
def getUserByUsername(self, username):
row = self.queryOne("SELECT uid, username, first_name, last_name, email FROM users WHERE username=%s" % username)
d = {}
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
d['email'] = row[4]
return d | apache-2.0 | -2,604,886,100,791,911,000 | 34.781065 | 182 | 0.565002 | false | 4.172533 | false | false | false |
FBTUG/DevZone | FBTUG_Commander/dialog_PeripheralSetting.py | 1 | 6164 | import Tkinter
import tkMessageBox
import tkFont
#from Tkinter import *
import tkSimpleDialog
class PeripheralSetting(tkSimpleDialog.Dialog):
# ########################################
def __init__(self, master, arg_PinList=[('',0)]):
print 'init'
strFont= 'Arial'
self.__myfont12 = tkFont.Font(family=strFont, size=12)
self.__myfont12_Bold = tkFont.Font(family=strFont, size=12, weight= tkFont.BOLD)
self.__myfont10 = tkFont.Font(family=strFont, size=10)
self.__myfont10_Bold = tkFont.Font(family=strFont, size=10, weight= tkFont.BOLD)
self.__PinList= arg_PinList
self.__MaxRow= 7
self.__CurrentRow= len(arg_PinList)
self.__CurGridRow= self.__CurrentRow
self.__NumberList= range(0, self.__MaxRow+1)
self.__entries_Func= [0]
self.__entries_PinNumb= [0]
self.__btns_clear=[0]
#self.master= master
tkSimpleDialog.Dialog.__init__(self, master, "Peripherals")
# ########################################
def body(self, master):
print 'body of Dialog PERIPHERALS'
Tkinter.Label(master, text="Function", font= self.__myfont12_Bold).grid(row=0, column=0)
Tkinter.Label(master, text="Pin Number", font= self.__myfont12_Bold).grid(row=0, column=1)
Tkinter.Button(master, text= '+', font= self.__myfont12_Bold, command= self.btn_add_click, fg='white',activeforeground= 'white', bg= '#007700', activebackground= '#00aa00').grid(row=0,column=2)
for i in self.__NumberList:
if i==0:
continue
en_func = Tkinter.Entry(master)
#en_func.insert(Tkinter.END, '{0}'.format(i))
self.__entries_Func.append(en_func)
en_pinnumb= Tkinter.Entry(master)
self.__entries_PinNumb.append(en_pinnumb)
btn= Tkinter.Button(master, text= '-', font= self.__myfont12_Bold, command= lambda i=i: self.btn_clear_click(i),fg='white',activeforeground= 'white', bg= '#aa0000', activebackground= '#ee0000')
self.__btns_clear.append(btn)
'''
en_func.grid(row=i+1,column=0)
en_pinnumb.grid(row=i+1, column=1)
btn.grid(row=i+1,column=2)
'''
if i <= self.__CurrentRow:
en_func.insert(Tkinter.END, self.__PinList[i-1][0])
#en_func.insert(Tkinter.END, '{0}'.format(i))
en_pinnumb.insert(Tkinter.END, self.__PinList[i-1][1])
#'''
en_func.grid(row=i,column=0)
en_pinnumb.grid(row=i, column=1)
btn.grid(row=i,column=2)
'''
en_func.grid_remove()
en_pinnumb.grid_remove()
btn.grid_remove()
#'''
#self.add_Row( i)
return self.__entries_Func[0] # initial focus
def apply(self):
try:
self.result=[]
for i in range(1, len(self.__entries_Func)):
r1, r2= self.__entries_Func[i].get(), self.__entries_PinNumb[i].get()
if r1 != '' and r2 != '':
self.result.append([r1,int(r2)])
#print 'result:', self.result
print 'End of dialog' # or something
except ValueError:
tkMessageBox.showwarning("Bad input","Illegal values, please try again")
def btn_clear_click(self, arg_index):
clear_row= self.__NumberList.index(arg_index)
'''
print '============= CLEAR ============'
print 'Clear Row:', clear_row
print 'NumberLIst:', self.__NumberList
print 'clear_index', arg_index
gridInfo= self.__entries_Func[arg_index].grid_info()
#print gridInfo
print 'Clear Grid Row', gridInfo['row']
#'''
#'''
self.__entries_Func[arg_index].delete(0, 'end')
self.__entries_PinNumb[arg_index].delete(0, 'end')
self.__entries_Func[arg_index].grid_forget()
self.__entries_PinNumb[arg_index].grid_forget()
self.__btns_clear[arg_index].grid_forget()
'''
self.__entries_Func[arg_index].grid_remove()
self.__entries_PinNumb[arg_index].grid_remove()
self.__btns_clear[arg_index].grid_remove()
#'''
tmp= self.__NumberList[clear_row]
del self.__NumberList[clear_row]
self.__NumberList.append(tmp)
self.__CurrentRow= self.__CurrentRow-1
#print '__CurrentRow:', self.__CurrentRow
#'''
def btn_add_click(self):
'''
print '============= ADD ============'
print '### Current Row', self.__CurrentRow
print 'NumberLIst:', self.__NumberList
for i in range(1,len(self.__entries_Func)):
tmp= self.__NumberList[i]
gridInfo= self.__entries_Func[tmp].grid_info()
if len(gridInfo)!=0:
print 'Row ',str(i),' Entries List[', str(tmp),']: ', self.__entries_Func[tmp].grid_info()['row']
else:
print 'Row ',str(i),' empty'
#'''
if self.__CurrentRow < self.__MaxRow:
self.__CurrentRow= self.__CurrentRow+1
self.__CurGridRow= self.__CurGridRow+1
#self.__CurGridRow= self.__CurrentRow
add_index= self.__NumberList[self.__CurrentRow]
'''
print 'Added Row:', self.__CurrentRow
print 'add_index (NumberList[{0}]): {1}'.format(self.__CurrentRow,add_index)
print 'Grid Row:', self.__CurGridRow
#'''
self.__entries_Func[add_index].grid(row=self.__CurGridRow, column=0)
#self.__entries_Func[add_index].delete(0, 'end')
self.__entries_PinNumb[add_index].grid(row=self.__CurGridRow, column=1)
#self.__entries_PinNumb[add_index].delete(0, 'end')
self.__btns_clear[add_index].grid(row=self.__CurGridRow, column=2)
#print 'Row ',str(self.__CurrentRow),' Entries List[', str(add_index),']: ', self.__entries_Func[add_index].grid_info()['row']
elif self.__CurrentRow== self.__MaxRow:
print 'Max of Row is ', self.__MaxRow
| mit | -6,206,398,729,244,919,000 | 43.992701 | 205 | 0.542343 | false | 3.581639 | false | false | false |
whelan957/leetcode | python3/LinkedList/leetcode876. Middle of the Linked List.py | 1 | 1168 | # Given a non-empty, singly linked list with head node head, return a middle node of linked list.
# If there are two middle nodes, return the second middle node.
# Example 1:
# Input: [1,2,3,4,5]
# Output: Node 3 from this list (Serialization: [3,4,5])
# The returned node has value 3. (The judge's serialization of this node is [3,4,5]).
# Note that we returned a ListNode object ans, such that:
# ans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next = NULL.
# Example 2:
# Input: [1,2,3,4,5,6]
# Output: Node 4 from this list (Serialization: [4,5,6])
# Since the list has two middle nodes with values 3 and 4, we return the second one.
# Note:
# The number of nodes in the given list will be between 1 and 100.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# time:O(n)
# spce:O(1)
# score:96.49(20ms)
def middleNode(self, head: ListNode) -> ListNode:
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
return slow
| gpl-3.0 | -9,035,342,479,006,186,000 | 27.487805 | 97 | 0.633562 | false | 3.081794 | false | false | false |
foobarbazblarg/stayclean | stayclean-2020-december/venv/lib/python3.8/site-packages/pip/_internal/utils/temp_dir.py | 5 | 8845 | from __future__ import absolute_import
import errno
import itertools
import logging
import os.path
import tempfile
from contextlib import contextmanager
from pip._vendor.contextlib2 import ExitStack
from pip._vendor.six import ensure_text
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.misc import enum, rmtree
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Dict, Iterator, Optional, TypeVar, Union
_T = TypeVar('_T', bound='TempDirectory')
logger = logging.getLogger(__name__)
# Kinds of temporary directories. Only needed for ones that are
# globally-managed.
tempdir_kinds = enum(
BUILD_ENV="build-env",
EPHEM_WHEEL_CACHE="ephem-wheel-cache",
REQ_BUILD="req-build",
)
_tempdir_manager = None # type: Optional[ExitStack]
@contextmanager
def global_tempdir_manager():
# type: () -> Iterator[None]
global _tempdir_manager
with ExitStack() as stack:
old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
try:
yield
finally:
_tempdir_manager = old_tempdir_manager
class TempDirectoryTypeRegistry(object):
"""Manages temp directory behavior
"""
def __init__(self):
# type: () -> None
self._should_delete = {} # type: Dict[str, bool]
def set_delete(self, kind, value):
# type: (str, bool) -> None
"""Indicate whether a TempDirectory of the given kind should be
auto-deleted.
"""
self._should_delete[kind] = value
def get_delete(self, kind):
# type: (str) -> bool
"""Get configured auto-delete flag for a given TempDirectory type,
default True.
"""
return self._should_delete.get(kind, True)
_tempdir_registry = None # type: Optional[TempDirectoryTypeRegistry]
@contextmanager
def tempdir_registry():
# type: () -> Iterator[TempDirectoryTypeRegistry]
"""Provides a scoped global tempdir registry that can be used to dictate
whether directories should be deleted.
"""
global _tempdir_registry
old_tempdir_registry = _tempdir_registry
_tempdir_registry = TempDirectoryTypeRegistry()
try:
yield _tempdir_registry
finally:
_tempdir_registry = old_tempdir_registry
class _Default(object):
pass
_default = _Default()
class TempDirectory(object):
"""Helper class that owns and cleans up a temporary directory.
This class can be used as a context manager or as an OO representation of a
temporary directory.
Attributes:
path
Location to the created temporary directory
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
Methods:
cleanup()
Deletes the temporary directory
When used as a context manager, if the delete attribute is True, on
exiting the context the temporary directory is deleted.
"""
def __init__(
self,
path=None, # type: Optional[str]
delete=_default, # type: Union[bool, None, _Default]
kind="temp", # type: str
globally_managed=False, # type: bool
):
super(TempDirectory, self).__init__()
if delete is _default:
if path is not None:
# If we were given an explicit directory, resolve delete option
# now.
delete = False
else:
# Otherwise, we wait until cleanup and see what
# tempdir_registry says.
delete = None
# The only time we specify path is in for editables where it
# is the value of the --src option.
if path is None:
path = self._create(kind)
self._path = path
self._deleted = False
self.delete = delete
self.kind = kind
if globally_managed:
assert _tempdir_manager is not None
_tempdir_manager.enter_context(self)
@property
def path(self):
# type: () -> str
assert not self._deleted, (
"Attempted to access deleted path: {}".format(self._path)
)
return self._path
def __repr__(self):
# type: () -> str
return "<{} {!r}>".format(self.__class__.__name__, self.path)
def __enter__(self):
# type: (_T) -> _T
return self
def __exit__(self, exc, value, tb):
# type: (Any, Any, Any) -> None
if self.delete is not None:
delete = self.delete
elif _tempdir_registry:
delete = _tempdir_registry.get_delete(self.kind)
else:
delete = True
if delete:
self.cleanup()
def _create(self, kind):
# type: (str) -> str
"""Create a temporary directory and store its path in self.path
"""
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(kind))
)
logger.debug("Created temporary directory: %s", path)
return path
def cleanup(self):
# type: () -> None
"""Remove the temporary directory created and reset state
"""
self._deleted = True
if not os.path.exists(self._path):
return
# Make sure to pass unicode on Python 2 to make the contents also
# use unicode, ensuring non-ASCII names and can be represented.
# This is only done on Windows because POSIX platforms use bytes
# natively for paths, and the bytes-text conversion omission avoids
# errors caused by the environment configuring encodings incorrectly.
if WINDOWS:
rmtree(ensure_text(self._path))
else:
rmtree(self._path)
class AdjacentTempDirectory(TempDirectory):
"""Helper class that creates a temporary directory adjacent to a real one.
Attributes:
original
The original directory to create a temp directory for.
path
After calling create() or entering, contains the full
path to the temporary directory.
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
"""
# The characters that may be used to name the temp directory
# We always prepend a ~ and then rotate through these until
# a usable name is found.
# pkg_resources raises a different error for .dist-info folder
# with leading '-' and invalid metadata
LEADING_CHARS = "-~.=%0123456789"
def __init__(self, original, delete=None):
# type: (str, Optional[bool]) -> None
self.original = original.rstrip('/\\')
super(AdjacentTempDirectory, self).__init__(delete=delete)
@classmethod
def _generate_names(cls, name):
# type: (str) -> Iterator[str]
"""Generates a series of temporary names.
The algorithm replaces the leading characters in the name
with ones that are valid filesystem characters, but are not
valid package names (for both Python and pip definitions of
package).
"""
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i - 1):
new_name = '~' + ''.join(candidate) + name[i:]
if new_name != name:
yield new_name
# If we make it this far, we will have to make a longer name
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i):
new_name = '~' + ''.join(candidate) + name
if new_name != name:
yield new_name
def _create(self, kind):
# type: (str) -> str
root, name = os.path.split(self.original)
for candidate in self._generate_names(name):
path = os.path.join(root, candidate)
try:
os.mkdir(path)
except OSError as ex:
# Continue if the name exists already
if ex.errno != errno.EEXIST:
raise
else:
path = os.path.realpath(path)
break
else:
# Final fallback on the default behavior.
path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(kind))
)
logger.debug("Created temporary directory: %s", path)
return path
| mit | 1,267,157,679,403,959,000 | 30.144366 | 79 | 0.596269 | false | 4.433584 | false | false | false |
adfernandes/pcp | build/ci/release-bintray.py | 1 | 7751 | #!/usr/bin/env python3
# pylint: disable=too-many-arguments
import sys
import os
import argparse
import yaml
import requests
PACKAGE_LICENSES = ['GPL-2.0']
PACKAGE_VCS_URL = 'https://github.com/performancecopilot/pcp'
class TimeoutHTTPAdapter(requests.adapters.HTTPAdapter):
"""
https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/
"""
def __init__(self, *args, **kwargs):
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
else:
raise Exception('Please specify a timeout.')
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
class BintrayApi:
def __init__(self, subject: str, user: str, apikey: str, gpg_passphrase: str, endpoint='https://api.bintray.com',
timeout=20*60):
self.subject = subject
self.user = user
self.apikey = apikey
self.gpg_passphrase = gpg_passphrase
self.endpoint = endpoint
self.session = requests.Session()
retries = requests.packages.urllib3.util.retry.Retry(
total=3, backoff_factor=10, status_forcelist=[429, 500, 502, 503, 504])
self.session.mount(self.endpoint, TimeoutHTTPAdapter(timeout=timeout, max_retries=retries))
def setup_repository(self, repository, repository_type, repository_description):
r = self.session.get(
f"{self.endpoint}/repos/{self.subject}/{repository}",
auth=(self.user, self.apikey),
)
if r.status_code == 404:
print(f"Creating repository bintray.com/{self.subject}/{repository}")
r = self.session.post(
f"{self.endpoint}/repos/{self.subject}/{repository}",
auth=(self.user, self.apikey),
json={
'type': repository_type,
'desc': repository_description,
'gpg_use_owner_key': True,
},
)
print(r.text)
r.raise_for_status()
print()
def setup_package(self, repository, package):
r = self.session.get(
f"{self.endpoint}/packages/{self.subject}/{repository}/{package}",
auth=(self.user, self.apikey),
)
if r.status_code == 404:
print(f"Creating package bintray.com/{self.subject}/{repository}/{package}")
r = self.session.post(
f"{self.endpoint}/packages/{self.subject}/{repository}",
auth=(self.user, self.apikey),
json={
'name': package,
'licenses': PACKAGE_LICENSES,
'vcs_url': PACKAGE_VCS_URL
},
)
print(r.text)
r.raise_for_status()
print()
def upload(self, repository, package, version, params, path):
file_name = os.path.basename(path)
params = ';'.join([f"{k}={v}" for k, v in params.items()])
print(f"Uploading {file_name} to bintray.com/{self.subject}/{repository}/{package}/{version}")
with open(path, 'rb') as f:
r = self.session.put(
f"{self.endpoint}/content/{self.subject}/{repository}/{package}/{version}/{file_name};{params}",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
data=f,
)
print(r.text)
if r.status_code not in [200, 409]:
# ignore HTTP 409: An artifact with the path ... already exists [under another version]
r.raise_for_status()
print()
def sign_version(self, repository, package, version):
print(f"Signing version bintray.com/{self.subject}/{repository}/{package}/{version}")
r = self.session.post(
f"{self.endpoint}/gpg/{self.subject}/{repository}/{package}/versions/{version}",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
)
print(r.text)
r.raise_for_status()
print()
def sign_metadata(self, repository, package, version):
print(f"Signing metadata of bintray.com/{self.subject}/{repository}")
r = self.session.post(
f"{self.endpoint}/calc_metadata/{self.subject}/{repository}",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
)
print(r.text)
r.raise_for_status()
print()
def publish(self, repository, package, version):
print(f"Publish version bintray.com/{self.subject}/{repository}/{package}/{version}")
r = self.session.post(
f"{self.endpoint}/content/{self.subject}/{repository}/{package}/{version}/publish",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
)
print(r.text)
r.raise_for_status()
print()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--subject', default=os.environ.get('BINTRAY_SUBJECT', 'pcp'))
parser.add_argument('--package', default=os.environ.get('BINTRAY_PACKAGE', 'pcp'))
parser.add_argument('--user', default=os.environ.get('BINTRAY_USER'))
parser.add_argument('--apikey', default=os.environ.get('BINTRAY_APIKEY'))
parser.add_argument('--gpg_passphrase', default=os.environ.get('BINTRAY_GPG_PASSPHRASE'))
parser.add_argument('--version', required=True)
parser.add_argument('--source')
parser.add_argument('artifact', nargs='*')
args = parser.parse_args()
if not args.user or not args.apikey or not args.gpg_passphrase:
parser.print_help()
sys.exit(1)
bintray = BintrayApi(args.subject, args.user, args.apikey, args.gpg_passphrase)
repositories_to_publish = []
if args.source:
bintray.upload('source', args.package, args.version, {}, args.source)
repositories_to_publish.append('source')
for artifact_dir in args.artifact:
# ex. build-fedora31-container
artifact, platform_name, _runner = os.path.basename(artifact_dir).split('-')
if artifact != 'build':
continue
platform_def_path = os.path.join(os.path.dirname(__file__), f"platforms/{platform_name}.yml")
with open(platform_def_path) as f:
platform = yaml.safe_load(f)
if 'bintray' not in platform:
print(f"Skipping {platform_name}: bintray is not configured in {platform_name}.yml")
continue
bintray_params = platform['bintray'].get('params', {})
repository_params = platform['bintray']['repository']
repository = repository_params['name']
bintray.setup_repository(repository, repository_params['type'], repository_params['description'])
bintray.setup_package(repository, args.package)
for artifact_filename in os.listdir(artifact_dir):
artifact_filepath = os.path.join(artifact_dir, artifact_filename)
bintray.upload(repository, args.package, args.version, bintray_params, artifact_filepath)
bintray.sign_version(repository, args.package, args.version)
bintray.sign_metadata(repository, args.package, args.version)
repositories_to_publish.append(repository)
# publish new version for all distributions at the same time
for repository in repositories_to_publish:
bintray.publish(repository, args.package, args.version)
if __name__ == '__main__':
main()
| lgpl-2.1 | 8,498,716,302,200,754,000 | 38.146465 | 117 | 0.599278 | false | 3.930527 | false | false | false |
Openlights/firemix | patterns/stripes.py | 1 | 4028 | # This file is part of Firemix.
#
# Copyright 2013-2020 Jonathan Evans <[email protected]>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
import colorsys
import random
import math
import numpy as np
import ast
from lib.pattern import Pattern
from lib.parameters import FloatParameter, IntParameter, StringParameter
from lib.color_fade import ColorFade
class StripeGradient(Pattern):
_fader = None
def setup(self):
self.add_parameter(FloatParameter('audio-brightness', 1.0))
self.add_parameter(FloatParameter('audio-stripe-width', 100.0))
self.add_parameter(FloatParameter('audio-speed', 0.0))
self.add_parameter(FloatParameter('speed', 0.01))
self.add_parameter(FloatParameter('angle-speed', 0.1))
self.add_parameter(FloatParameter('stripe-width', 20))
self.add_parameter(FloatParameter('center-orbit-distance', 200))
self.add_parameter(FloatParameter('center-orbit-speed', 0.1))
self.add_parameter(FloatParameter('hue-step', 0.1))
self.add_parameter(IntParameter('posterization', 8))
self.add_parameter(StringParameter('color-gradient', "[(0,0,1), (0,0,1), (0,1,1), (0,1,1), (0,0,1)]"))
self.add_parameter(FloatParameter('stripe-x-center', 0.5))
self.add_parameter(FloatParameter('stripe-y-center', 0.5))
self.hue_inner = random.random() + 100
self._center_rotation = random.random()
self.stripe_angle = random.random()
self.locations = self.scene().get_all_pixel_locations()
def parameter_changed(self, parameter):
fade_colors = ast.literal_eval(self.parameter('color-gradient').get())
self._fader = ColorFade(fade_colors, self.parameter('posterization').get())
def reset(self):
pass
def tick(self, dt):
super(StripeGradient, self).tick(dt)
dt *= 1.0 + self.parameter('audio-speed').get() * self._app.mixer.audio.getLowFrequency()
self.hue_inner += dt * self.parameter('speed').get()
self._center_rotation += dt * self.parameter('center-orbit-speed').get()
self.stripe_angle += dt * self.parameter('angle-speed').get()
def render(self, out):
if self._app.mixer.is_onset():
self.hue_inner = self.hue_inner + self.parameter('hue-step').get()
stripe_width = self.parameter('stripe-width').get() + self.parameter('audio-stripe-width').get() * self._app.mixer.audio.smoothEnergy
cx, cy = self.scene().center_point()
cx += math.cos(self._center_rotation) * self.parameter('center-orbit-distance').get()
cy += math.sin(self._center_rotation) * self.parameter('center-orbit-distance').get()
sx = self.parameter('stripe-x-center').get()
sy = self.parameter('stripe-y-center').get()
posterization = self.parameter('posterization').get()
x, y = self.locations.T
dx = x - cx
dy = y - cy
x = dx * math.cos(self.stripe_angle) - dy * math.sin(self.stripe_angle)
y = dx * math.sin(self.stripe_angle) + dy * math.cos(self.stripe_angle)
x = (x / stripe_width) % 1.0
y = (y / stripe_width) % 1.0
x = np.abs(x - sx)
y = np.abs(y - sy)
hues = np.int_(np.mod(x+y, 1.0) * posterization)
np.copyto(out, self._fader.color_cache[hues])
out['light'] += self._app.mixer.audio.getEnergy() * self.parameter('audio-brightness').get()
out['hue'] += self.hue_inner
| gpl-3.0 | -1,271,171,592,617,550,000 | 41.851064 | 141 | 0.654419 | false | 3.44568 | false | false | false |
shub0/algorithm-data-structure | python/generate_parenthesis.py | 1 | 1205 | #! /usr/bin/python
'''
Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
For example, given n = 3, a solution set is:
"((()))", "(()())", "(())()", "()(())", "()()()"
'''
class Solution:
def validParenthesis(self, string):
check = 0
for char in string:
if char == '(':
check += 1
continue
check -= 1
if check < 0:
return False
return check == 0
# @param {integer} n
# @return {string[]}
def generateParenthesis(self, n):
if n < 1:
return set()
string_dict = {'(': 1}
for index in range(1, 2 * n):
current_dict = dict()
for string in string_dict.keys():
check_num = string_dict[string]
current_dict[string+'('] = check_num + 1
if check_num > 0:
current_dict[string+')'] = check_num - 1
string_dict = current_dict
return filter(lambda x: string_dict[x] == 0, string_dict.keys())
if __name__ == '__main__':
solution = Solution()
print solution.generateParenthesis(4)
| bsd-3-clause | 813,236,506,331,725,800 | 28.390244 | 103 | 0.493776 | false | 3.95082 | false | false | false |
lexnederbragt/inverted_repeats | clip_inverted_repeats.py | 1 | 7738 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
# Some Illumina reads from ancient DNA samples were found to contain an inverted repeat
# with a different sequence inbetween
# in other words, the first x bases of a read are the reverse complement of the last x bases
# This script is meant to clip the 3' part of an inverted repeat when present
# A new fastq file is generated mentioning in the sequences ID which sequence was clipped, if any
# Two metrics files on the repeats found (and clipped) are produced as well
# When an entire sequence is its own reverse complement, this does not get clipped
# but a mention is made sequence identifier and
# these are also reported in the metrics file
#
# Written by Lex Nederbragt, with input from Bastiaan Star
# Version 1.0 release candidate 1, May 2013
#
# requires biopython, os and argparse modules
# on the UiO supercomputer "Abel", needs 'module load python2'
#
# run as 'python script_name.py -h' for instructions
# <codecell>
from Bio import Seq, SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import os
import argparse
# <codecell>
# help text and argument parser
desc = '\n'.join(["Strips off the 3' copy of an terminal inverted repeat, when present.",
"Input: one fastq file.",
"Output:",
"1) a new fastq file with cleaned sequences: 'infile.fastq' gives 'infile.clean.fastq'",
"2) a file called 'infile.inv_reps.txt' with the stripped sequences and their counts",
"3) a file called 'infile.inv_rep_lengths.txt' with the length distribution of the stripped sequences.",
"An optional argument -s/--shortest_length can be used to set the minumum length of repeat to clip (default: 4 bp)"
])
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-i','--input', help='Input file name',required=True)
parser.add_argument('-s', '--shortest_length', help='Shortest repeat length to clip', type=int, default=4, required = False)
# <codecell>
def get_outfnames(infile):
"""Adds '.clean' to input filename (before the extension).
Example: 'infile.fastq' becomes 'infile.clean.fastq'"""
out_fnames = []
in_fname = os.path.basename(infile)
[in_fname, in_ext] = os.path.splitext(in_fname)
# fastq outfile
out_fnames.append(in_fname + '.clean' + in_ext)
# inv_rep sequences + counts
out_fnames.append(in_fname + '.inv_reps.txt')
# inv_rep lengths + counts
out_fnames.append(in_fname + '.inv_rep_lengths.txt')
return out_fnames
# <codecell>
def find_inv_repeat(seq, seq_rc):
"""Check for inverted repeat:
whether first x bases are the reverse complement of the last x bases
Returns starting position of the inverted repeat (or 0)"""
inv_rep_length = 0
# need to test whether seq is a reverse complement of itself
if seq == seq_rc:
inv_rep_length = len(seq)
else:
# check if first x bases are a reverse complement of the last x bases
for i in range(1, len(seq)):
if seq_rc[-i:] == seq[-i:]:
inv_rep_length = i
else:
break
return inv_rep_length
# <codecell>
def extract_inv_repeat(seq, shortest_length_to_clip):
"""After finding position of inverted repeat - if any -
returns Bio.SeqRecord with the inverted repeated part removed,
and the sequence of the removed part"""
assert shortest_length_to_clip > 0, "Shortest length to remove needs to be larger than zero, not %s" % shortest_length_to_clip
# expects a Bio.SeqRecord.SeqRecord
assert type(seq) == SeqRecord, "Not a sequence record: '%s'" % str(seq)
# get sequence and reverse complement as text format
seq_txt = str(seq.seq)
rc_seq_txt = str(seq.reverse_complement().seq)
# locate the inverted repeat - if any
inv_rep_length = find_inv_repeat(seq_txt, rc_seq_txt)
# process results
if inv_rep_length == len(seq_txt):
# sequence is its own reverse complement
new_seq = seq
inv_rep = seq_txt
new_seq.description += ' self_reverse_complement'
elif inv_rep_length >= shortest_length_to_clip:
# hit
new_seq = seq[:-inv_rep_length]
inv_rep = str(seq[-inv_rep_length:].seq)
new_seq.description += ' cleaned_off_' + inv_rep
else:
# either no hit, or hit shorter than minimum to report
new_seq = seq
inv_rep = ''
return [new_seq, inv_rep, inv_rep_length]
# <codecell>
def test(seq, shortest_length_to_clip):
"""Performs the 'unit tests'"""
result = extract_inv_repeat(SeqRecord(Seq(seq, IUPAC.unambiguous_dna)), shortest_length_to_clip)
return [str(result[0].seq), result[1], result[2]]
# <codecell>
# set of 'unit tests'
# first/last 10 RC of eachother
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGAAGCTACGACT', 1) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGA', 'AGCTACGACT', 10]
# one base
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAT', 1) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTA', 'T', 1]
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAT', 4) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAT', '', 1]
# no inv_rep
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAA', 1) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAA', '', 0]
# entire sequence it's own reverse complement
assert test('ACACAGGCCTGTGT', 1) == ['ACACAGGCCTGTGT', 'ACACAGGCCTGTGT', 14]
# empty sequence
assert test('', 4) == ['', '', 0]
# <codecell>
def process(infile, shortest_length_to_clip):
""" Does the actual work:
Goes through the input file and streams the content through the inverted_repeat locator
Collects the new sequences and repeats found and reports them """
# test for existing inut file
assert os.path.exists(infile), "Input file '%s' appears not to exist." %infile
[out_fname, out_rname, out_lname] = get_outfnames(infile)
inv_reps = {}
lengths = {}
total_trimmed = 0
total_skipped = 0
processed = 0
max_rec_to_process = 1e30
print "Processing sequences..."
with open(out_fname, 'w') as out_fh:
for rec in SeqIO.parse(infile, "fastq"):
processed += 1
if len(rec) < 1:
# skip zero-length sequences
total_skipped += 1
continue
new_rec, inv_rep, inv_rep_length = extract_inv_repeat(rec, shortest_length_to_clip)
out_fh.write(new_rec.format("fastq"))
if inv_rep_length >= shortest_length_to_clip:
inv_reps[inv_rep] = inv_reps.get(inv_rep, 0) +1
total_trimmed += 1
lengths[inv_rep_length] = lengths.get(inv_rep_length, 0) +1
if processed == max_rec_to_process:
break
print "Writing summary files..."
with open(out_rname, "w") as p_out_fh:
p_out_fh.write("inverted_repeat\tcount\n")
for p in inv_reps.keys():
p_out_fh.write("%s\t%s\n" %(p, inv_reps[p]))
with open(out_lname, "w") as l_out_fh:
l_out_fh.write("repeat_length\tcount\n")
for l in sorted(lengths.iterkeys()):
l_out_fh.write("%s\t%s\n" %(l, lengths[l]))
print "\nProcessed %i records:\n- skipped %i (because of zero-length)\n- found %i inverted repeat(s)\n" % (processed, total_skipped, total_trimmed)
# <codecell>
if __name__ == "__main__":
args = parser.parse_args()
print ("Input file: %s" % args.input )
print ("Shortest length to clip: %s" % args.shortest_length)
process(args.input, args.shortest_length)
| unlicense | 329,393,044,782,642,940 | 37.884422 | 151 | 0.651848 | false | 3.417845 | true | false | false |
tarvitz/django-unity-asset-server-http-client | duashttp/router.py | 1 | 1729 | # coding: utf-8
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
DUAS_ENABLE_DB_WRITE = getattr(settings, 'DUAS_ENABLE_DB_WRITE', False)
DUAS_DB_ROUTE_PREFIX = getattr(settings, 'DUAS_DB_ROUTE_PREFIX',
'unity_asset_server')
class UnityAssetServerRouter(object):
"""
Router for unity asset server data base
"""
def db_for_read(self, model, **hints):
"""
Attempts to read auth models go to duashttp.
"""
if model._meta.app_label == 'duashttp':
return DUAS_DB_ROUTE_PREFIX
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to duashttp.
"""
if model._meta.app_label == 'duashttp':
if not DUAS_ENABLE_DB_WRITE:
raise ImproperlyConfigured(
"Set `DUAS_ENABLE_DB_WRITE` to True in your settings to enable "
"write operations on unity asset server database"
)
return DUAS_DB_ROUTE_PREFIX
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
if obj1._meta.app_label == 'duashttp' or \
obj2._meta.app_label == 'duashttp':
return True
return None
def allow_migrate(self, db, model):
"""
Make sure the auth app only appears in the 'duashttp'
database.
"""
if db == DUAS_DB_ROUTE_PREFIX:
return model._meta.app_label == 'duashttp'
elif model._meta.app_label == 'duashttp':
return False
return None
| mit | -6,610,124,107,798,775,000 | 31.018519 | 84 | 0.567958 | false | 3.920635 | false | false | false |
jaraco/hgtools | hgtools/tests/test_git.py | 1 | 1226 | import os
import pytest
from hgtools import managers
from hgtools.managers import cmd
from hgtools.managers import subprocess
def test_subprocess_manager_invalid_when_exe_missing():
"""
If the hg executable dosen't exist, the manager should report
False for .is_valid().
"""
non_existent_exe = '/non_existent_executable'
assert not os.path.exists(non_existent_exe)
mgr = subprocess.GitManager()
mgr.exe = non_existent_exe
assert not mgr.is_valid()
@pytest.mark.usefixtures("git_repo")
class TestTags:
def setup_method(self, method):
self.mgr = managers.GitManager('.')
def teardown_method(self, method):
del self.mgr
def test_single_tag(self):
assert self.mgr.get_tags() == set([])
self.mgr._invoke('tag', '-am', "Tagging 1.0", '1.0')
assert self.mgr.get_tags() == set(['1.0'])
self.mgr._invoke('checkout', '1.0')
assert self.mgr.get_tags() == set(['1.0'])
class TestParseVersion:
def test_simple(self):
assert cmd.Git._parse_version('git version 1.9.3') == '1.9.3'
def test_trailing_mess(self):
val = cmd.Git._parse_version('git version 1.9.3 (Mac OS X)')
assert val == '1.9.3'
| mit | -2,926,431,106,773,995,500 | 26.863636 | 69 | 0.632137 | false | 3.37741 | true | false | false |
googleads/google-ads-python | google/ads/googleads/v7/enums/types/batch_job_status.py | 1 | 1129 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.enums",
marshal="google.ads.googleads.v7",
manifest={"BatchJobStatusEnum",},
)
class BatchJobStatusEnum(proto.Message):
r"""Container for enum describing possible batch job statuses. """
class BatchJobStatus(proto.Enum):
r"""The batch job statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
RUNNING = 3
DONE = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,685,226,113,718,705,000 | 28.710526 | 74 | 0.689105 | false | 3.947552 | false | false | false |
oelson/LittreParser | parse.py | 1 | 17906 | #-*- coding: utf-8 -*-
__all__ = ["parser", "entry"]
from textwrap import TextWrapper
import xml.etree.ElementTree as ET
from unidecode import unidecode
from LittreParser.error import EntryNotFound
def _xml2dict(root):
"""
Convert an XML node to a dictionnary.
Recursive.
"""
out = []
for child in root.iterfind("./*"):
out.append({
"tag": child.tag,
"text": child.text,
"attrib": child.attrib,
"children": _xml2dict(child)
})
return out
def _gettext(elem):
"""
Équivalent DOM de la propriété "innerText"
"""
return "".join(elem.itertext())
class parser:
"""
Un parseur du dictionnaire XMLittré.
"""
# piscine à parseur pour les 26 lettres de l'alphabet
_ET_parsers = {}
def __init__(self, xml_directory):
self._xml_directory = xml_directory
def load_xml_file(self, letter):
"""
Le dictionnaire est subdivisé en 26 fichiers xml, nommés d'après les
lettres de l'alphabet.
Instancie un noeud Element à partir du contenu du fichier correspondant
à la lettre et le range dans un dictionnaire.
"""
if not isinstance(letter, str) or not len(letter) == 1:
raise ValueError("need a letter from the alphabet")
xml_path = "{}/{}.xml".format(
self._xml_directory,
letter
)
with open(xml_path, 'r') as xml_file:
xml_content = xml_file.read()
self._ET_parsers[letter] = ET.fromstring(xml_content)
def get_parser(self, letter):
"""
Obtient (éventuellement en l'instanciant à la volée) le bon parseur en
fonction d'une lettre de l'alphabet.
"""
letter = unidecode(letter.lower())
if letter not in self._ET_parsers:
self.load_xml_file(letter)
return self._ET_parsers[letter]
def get_entries(self, name):
"""
Retourne une liste de noeuds XML <entry> correspondant au terme donné.
Un terme peut correspondre à plusieurs entrées (sens multiples).
TODO: récupérer une liste de mots proches en cas d'échec (mauvais accents, faute de frappe, etc...)
"""
name = name.upper()
# récupère le parseur adéquat
p = self.get_parser(name[0])
# Une entrée peut avoir plusieurs "sens" et par conséquent être
# dupliquée
entries = []
for node in p.iterfind("./entree"):
terme = node.attrib["terme"]
# Cherche à établir une correspondance entre le masculin et le
# féminin
mal, fem = self.build_female(terme)
if name == mal or name == fem:
entries.append(node)
if len(entries) == 0:
raise EntryNotFound("the entry \"{}\" does not exist".format(name))
return entries
def build_female(self, word):
"""
Construit le féminin d'un terme à partir de son masculin et de son
suffixe féminin.
"""
# extrait le radical (avec la marque du masculin) et le suffixe féminin
values = word.split(",")
rad = values[0].strip()
# le terme est simple
if len(values) == 1 or not values[1]:
fem = ""
# le terme est double
else:
fem_suffix = values[1].strip()
# la première lettre du suffixe féminin doit
# concorder avec celle du suffixe masculin
first_fem_letter = fem_suffix[0]
# extrait le radical brut (enlève le masculin)
masc_pos = rad.rfind(first_fem_letter)
prefix = rad[:masc_pos]
# construit le féminin
fem = prefix + fem_suffix
return rad, fem
def get_entries_as_dict(self,
name,
no_quotes=False,
no_synonyms=False,
no_history=False,
no_etymology=False):
"""
Retourne les différents sens d'un mot sous la forme d'un dictionnaire
dont les clés sont les indices de sens et les valeurs des entrées
formattées sous forme d'arborescence.
"""
name = name.upper()
definition = {
"terme": name,
"sens": {}
}
for i, node in enumerate(self.get_entries(name)):
e = entry(name, node)
key = entry.get_sens_id() or i
definition["sens"][key] = e.format_as_dict(
no_quotes,
no_synonyms,
no_history,
no_etymology
)
return definition
class entry:
"""
Une entrée du dictionnaire générée par le parseur.
Une entrée correspond à une définition.
"""
def __init__(self, mot, entry):
self.mot = mot
self.entry = entry
def get_sens_id(self):
"""
Retourne l'indice de sens de la définition.
"""
return int(self.entry.attrib.get("sens") or 1)
def get_variante_text(self, v):
"""
Retourne le texte définissant une variante.
Ce texte s'étale éventuellement sur des noeuds collés à des morceaux
de texte.
"""
text = v.text.replace("\n", "") if v.text else ""
# workaround: "find()" ne fonctionne pas, certainement à cause de
# l'imbrication de noeuds texte et non-texte au sein d'un même
# élément
for sem in v.iter("semantique"):
if sem.text:
text += sem.text.replace("\n", "")
if sem.tail:
text += sem.tail.replace("\n", "")
return text
def get_variantes(self, corps_, no_quotes=False):
"""
Retounre les variantes incluses dans le corps de l'entrée sous la forme
d'un dictionnaire.
"""
variantes = []
for v_ in corps_.iter("variante"):
variante = {
"num": int(v_.attrib.get("num") or -1),
"text": self.get_variante_text(v_),
"indent": []
}
# adjoint les éventuelles citations propres à une variante
if not no_quotes:
variante["cit"] = self.get_citations(v_)
# recherche les sous-parties
for i_ in v_.iter("indent"):
#subtext = _gettext(i_).rstrip()
subtext = i_.text or ""
subtext = subtext.rstrip()
# wordaround
for s_ in i_.iter("semantique"):
s_text = s_.text or ""
s_text = s_text.rstrip()
subtext += s_text
# les sous-parties peuvent contenir des citations
if not no_quotes:
citations = self.get_citations(i_)
variante["indent"].append({
"text": subtext,
"cit": citations
})
variantes.append(variante)
return variantes
def get_citations(self, parent_):
"""
Retounre les citations incluses dans un noeud sous la forme d'une liste
de dictionnaires.
"""
citations = []
for c in parent_.iterfind("./cit"):
citation = {
"aut": c.attrib["aut"] or "aut. inc.",
"ref": c.attrib["ref"] or "ref. inc.",
"text": c.text
}
citations.append(citation)
return citations
def get_synonymes(self, entry_):
"""
Retourne les synonymes d'une entrée sous la forme d'une liste.
"""
synonymes = []
for synonymes_ in entry_.iterfind("./rubrique[@nom='SYNONYME']"):
for syn in synonymes_.iter("indent"):
synonymes.append(syn.text.rstrip())
return synonymes
def get_historique(self, entry_):
"""
Retounre l'historique d'une entrée sous la forme d'une liste de
dictionnaires.
"""
historique = []
rubrique_ = entry_.find("./rubrique[@nom='HISTORIQUE']")
if not rubrique_:
return
for indent in rubrique_.iter("indent"):
# siècle
date = indent.text.rstrip()
# citations associées au siècle
citations = self.get_citations(indent)
historique.append({
"date":date,
"cit": citations
})
return historique
def get_etymologie(self, entry_):
"""
Retourne l'étymologie d'une entrée sous la forme d'une liste.
"""
etymologies = []
rubrique_ = entry_.find("./rubrique[@nom='ÉTYMOLOGIE']")
for indent in rubrique_.iter("indent"):
etymologies.append(_gettext(indent).rstrip())
return etymologies
def format_as_dict(self,
no_quotes=False,
no_synonyms=False,
no_history=False,
no_etymology=False):
"""
Parcours l'entrée et la retourne sous la forme d'un dictionnaire.
"""
entete_ = self.entry.find("./entete")
corps_ = self.entry.find("./corps")
prononciation_ = entete_.find("./prononciation")
nature_ = entete_.find("./nature")
e = {
"entete": {
"prononciation": prononciation_.text,
"nature": nature_.text
}
}
# Variantes
variantes = self.get_variantes(corps_, no_quotes)
if variantes:
e["variantes"] = variantes
# Synonymes
if not no_synonyms:
synonymes = self.get_synonymes(self.entry)
if synonymes:
e["synonymes"] = synonymes
# Historique
if not no_history:
historique = self.get_historique(self.entry)
if historique:
e["historique"] = historique
# Étymologie
if not no_etymology:
etymologies = self.get_etymologie(self.entry)
if etymologies:
e["etymologie"] = etymologies
return e
class entry_formatter:
"""
Classe de formattage d'une entrée.
Les formats supportés sont:
* le texte simple
* le HTML
"""
# Éléments de formattage en texte simple
_nbsp = u"\u00A0"
_bullet = u"\u2219\u25E6"
_q = u"\u201C\u201D"
# Séparateur de parties de la définition
_subpart_separator = "\u2015"*24
# Format de citation
_citation_format = "{} ({}): "+_q[0]+"{}"+_q[1]
# Nombre d'espace par niveau d'indentation
_indent_factor = 2
# Découpage propre du texte
_display_width = 78
def __init__(self, entries, fit_text=True):
self.entries = entries
self.fit_text = fit_text
self.tw = TextWrapper(
width=self._display_width,
# l'indentation initiale sera toujours générée par /list_item/
initial_indent = ""
)
def fill(self, text, subsequent_indent=0):
"""
Retourne le texte passé en paramètre coupé au mot près à la largeur
définie comme constante /_display_width/.
Le paramètre /subsequent_indent/ est la taille du préfixe du texte,
typiquement la taille de la liste à puce (espaces préc. compris).
"""
self.tw.subsequent_indent = self._nbsp*subsequent_indent
return self.tw.fill(text)
def list_item(self, level=2, li_type=0, li_count=-1):
"""
Formatte un élement de liste à puce.
Si /li_type/ vaut -1, alors la liste est numérique et la puce aura pour
valeur la variable /li_count/.
"""
if li_type == -1:
# l'item de liste est un nombre
bullet = str(li_count)+"."
else:
# l'item de liste est une "puce"
bullet = self._bullet[li_type]
return self._nbsp * level * self._indent_factor + bullet + self._nbsp
def format_entete(self, entete):
"""
Formatte une entête de définition en texte simple.
"""
text = "PRONONCIATION: '{}'\nNATURE: {}".format(
entete["prononciation"],
entete["nature"]
)
return text
def format_citation(self, cit, level=0, li_style=0):
"""
Formatte une citation en texte simple.
"""
li = self.list_item(level, li_style)
cit = self._citation_format.format(
cit["aut"],
cit["ref"],
cit["text"]
)
text = li + cit
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
return text + "\n"
def format_variantes(self, variantes, base_indent_level=1):
"""
Formatte les variantes en texte simple.
"""
paragraph = ""
for li_count, v_ in enumerate(variantes):
# Construit un item de liste numérique
if v_["num"] == -1:
li_index = li_count+1
else:
li_index = v_["num"]
li = self.list_item(base_indent_level, -1, li_index)
text = li + v_["text"]
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
text += "\n"
# Adjoint les éventuelles citations
if "cit" in v_:
for c_ in v_["cit"]:
text += self.format_citation(c_, base_indent_level+1, 0)
# Adjoint les éventuelles sous-parties
for ind in v_["indent"]:
li = self.list_item(base_indent_level+1, 0)
_text = li + ind["text"]
# Coupe le texte proprement si il dépasse
if self.fit_text:
_text = self.fill(_text, len(li))
text += _text + "\n"
# citations liées à la sous-partie
for cit in ind["cit"]:
text += self.format_citation(cit, base_indent_level+2, 1)
paragraph += text
return paragraph
def format_synonymes(self, synonymes, base_indent_level=1):
"""
Formatte une liste de synonymes en texte simple.
"""
paragraph = ""
for syn in synonymes:
li = self.list_item(base_indent_level, 1)
text = li + syn
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
paragraph += text + "\n"
return paragraph
def format_historique(self, historique, base_indent_level=1):
"""
Formatte une historique de définition en texte simple.
"""
paragraph = ""
for his in historique:
li = self.list_item(base_indent_level, 0)
text = li + his["date"]
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
text += "\n"
for cit in his["cit"]:
text += self.format_citation(cit, base_indent_level+1, 1)
paragraph += text
return paragraph
def format_etymologies(self, etymologie, base_indent_level=1):
"""
Formatte une liste d'étymologie en texte simple.
"""
paragraph = ""
for ety in etymologie:
li = self.list_item(base_indent_level, 0)
text = li + ety
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
paragraph += text + "\n"
return paragraph
def format(self):
"""
Formatte l'entrée en texte simple.
"""
text = "TERME: {}\n".format(self.entries["terme"])
# les différents sens d'une entrée sont indexés par un indice numérique
if len(self.entries["sens"]) == 1:
initial_indent_level = 0
print_sens_separator = False
else:
initial_indent_level = 1
print_sens_separator = True
for sens_id, definition in self.entries["sens"].items():
if print_sens_separator:
text += "{}\nSENS #{}\n".format(
self._subpart_separator,
sens_id
)
# Variantes du terme
if "variantes" in definition:
text += "\nVARIANTES:\n"
text += self.format_variantes(
definition["variantes"],
initial_indent_level
)
# Synonymes
if "synonymes" in definition:
text += "\nSYNONYMES:\n"
text += self.format_synonymes(
definition["synonymes"],
initial_indent_level
)
# Historique
if "historique" in definition:
text += "\nHISTORIQUE:\n"
text += self.format_historique(
definition["historique"],
initial_indent_level
)
# Étymologie
if "etymologie" in definition:
text += "\nÉTYMOLOGIE:\n"
text += self.format_etymologies(
definition["etymologie"],
initial_indent_level
)
return text
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.format())
| gpl-2.0 | -451,669,834,033,869,900 | 31.054054 | 107 | 0.515346 | false | 3.64176 | false | false | false |
openEduConnect/eduextractor | eduextractor/sis/powerschool/powerschool_exporter.py | 1 | 1716 | import pandas as pd
from ...config import _load_secrets
import sqlalchemy
import os
from tqdm import tqdm
class PowerSchoolSQLInterface:
"""A class, representing a interface to the Powerschool frontend
which most teachers/students/admins have access to.
"""
def __init__(self, secrets=None):
if secrets is None:
secrets = _load_secrets()
try:
SECRETS = secrets['powerschool']
self.username = SECRETS['username']
self.password = SECRETS['password']
self.host = SECRETS['host']
self.dbname = SECRETS['dbname']
except KeyError:
print("Please check the configuration of your config file")
engine = sqlalchemy.create_engine('oracle://' + self.username +
':' + self.password +
'@' + self.host + ':' +
"1521" + '/' +
self.dbname)
self.conn = engine.connect()
def query_to_df(query):
"""executes query, converts to pd.dataframe"""
df = pd.read_sql(query, conn)
return df
def _list_queries(file_dir='./sql'):
return os.listdir(file_dir)
def download_files():
files = self._list_queries()
for file_name in tqdm(files):
with open('./sql/' + file_name, 'r') as filebuf:
data = filebuf.read()
df = query_to_df(data)
file_name = file_name.replace('.sql','.csv')
df.to_csv('/tmp/' + file_name)
if __name__ == '__main__':
PowerSchoolSQLInferface.download_files()
| mit | -6,560,301,399,092,877,000 | 34.020408 | 72 | 0.515734 | false | 4.247525 | false | false | false |
ysabel31/Python | Dialogues/dialogues/migrations/versions/3528db092ec4_.py | 1 | 2683 | """empty message
Revision ID: 3528db092ec4
Revises:
Create Date: 2017-10-22 17:53:34.149039
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3528db092ec4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('creators',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=80), nullable=True),
sa.Column('lastname', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('media',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=True),
sa.Column('password', sa.String(length=25), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('media_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('EAN', sa.Integer(), nullable=True),
sa.Column('ASIN', sa.Integer(), nullable=True),
sa.Column('ASIN_LINK_AMAZON', sa.String(length=1024), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('synopsys', sa.String(length=1024), nullable=True),
sa.Column('creation_date', sa.DateTime(), nullable=True),
sa.Column('modification_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['media_id'], ['media.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('item_creator',
sa.Column('item_id', sa.Integer(), nullable=False),
sa.Column('creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['creator_id'], ['creators.id'], ),
sa.ForeignKeyConstraint(['item_id'], ['items.id'], ),
sa.PrimaryKeyConstraint('item_id', 'creator_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('item_creator')
op.drop_table('items')
op.drop_table('users')
op.drop_table('media')
op.drop_table('creators')
op.drop_table('categories')
# ### end Alembic commands ###
| mit | -6,968,749,891,703,271,000 | 34.302632 | 73 | 0.654491 | false | 3.430946 | false | false | false |
python-20161108/temp | wanghn/python_js/ids/ids_health.py | 2 | 5614 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "wanghn"
import httplib
import socket
import time
import os
import ConfigParser
import json
import urllib2
import re
def monitorwebapp(ip_result):
webcip_state = {}
timenow = time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime( time.time( ) ) )
for m in xrange( len( ip_result ) ):
ping_cmd = os.popen( 'ping %s -c 1 | grep -c time=' % ip_result[m] ).read( )
if ping_cmd != '0\n':
webcip_state[ip_result[m]] = True
else:
webcip_state[ip_result[m]] = False
# print 'monitorwebapp result:',webcip_state
return webcip_state
def conport(ip_result,port_result):
webcp_state = {}
for n in xrange( len( port_result ) ):
ip_port = (ip_result[n], int( port_result[n] ))
sk = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
sk.settimeout( 1 )
# print ip_port
try:
sk.connect(ip_port)
if ip_result[n] in webcp_state:
webcp_state[ip_result[n]].update({port_result[n]:True})
else:
webcp_state[ip_result[n]]=({port_result[n]:True})
except Exception:
if ip_result[n] in webcp_state:
webcp_state[ip_result[n]].update({port_result[n]:False})
else:
webcp_state[ip_result[n]]=({port_result[n]:False})
sk.close( )
# print 'conport result:', webcp_state
return webcp_state
def servicestate(service_result):
ser = {}
for i in xrange( len( service_result ) ):
ret = os.popen( 'ps -ef|grep %s|grep -v grep' % service_result[i] ).readlines( )
if len( ret ) > 0:
ser[service_result[i]]=True
else:
ser[service_result[i]]=False
# print 'servicestate resut:',ser
return ser
def urlhealthcheck():
urlhealthcheckresult = {}
try:
responsecheck = urllib2.urlopen( "http://ids122.avict.com:8080/nidp/app/heartbeat" ).read( )
if 'Success' in responsecheck:
urlhealthcheckresult['heartbeat'] = True
return urlhealthcheckresult
# print '[+]responsecheck:',responsecheck
except Exception, e:
urlhealthcheckresult['heartbeat'] = False
return urlhealthcheckresult
def nldapcheck():
nldapcheckresult = {}
try:
check_cmd = os.popen( 'nldap_check' ).read( ).strip( ).replace( '\n', '' ).split( '.' )
matchtcp=re.match(".*is listening(.*)TCP.*",check_cmd[0])
matchtls=re.match(".*is listening(.*)TLS.*",check_cmd[1])
# print '[+]matchtcp:',matchtcp.group()
# print '[+]matchtls:',matchtls.group()
if matchtcp and matchtls:
nldapcheckresult['TCP'] = True
nldapcheckresult['TLS'] = True
elif not matchtcp and matchtls:
nldapcheckresult['TCP'] = False
nldapcheckresult['TLS'] = True
elif matchtcp and not matchtls:
nldapcheckresult['TCP'] = True
nldapcheckresult['TLS'] = False
else:
nldapcheckresult['TCP'] = False
nldapcheckresult['TLS'] = False
return nldapcheckresult
except Exception,e:
nldapcheckresult['TCP'] = False
nldapcheckresult['TLS'] = False
return nldapcheckresult
if __name__ == '__main__':
cf = ConfigParser.RawConfigParser( )
cf.read( "/root/python/ids_health_config.ini" )
smmodifytime = os.stat( r"/root/python/ids_health_config.ini" ).st_mtime
ipaddr = cf.get( "HostAgent", "ipaddr" )
port = cf.get( "HostAgent", "port" )
servi = cf.get( "HostAgent", "services" )
url = cf.get( "HostAgent", "url" )
datetime = cf.get( "HostAgent", "datetime" )
servstate = cf.get( "HostAgent", "servstate" )
webaddress = cf.get( "HostAgent", "webaddress" )
webport = cf.get( "HostAgent", "webport" )
webcipstatus = cf.get( "HostAgent", "webcipstatus" )
webcpstatus = cf.get( "HostAgent", "webcpstatus" )
nldapstatus = cf.get( "HostAgent", "nldapstatus" )
urlcheckstatus = cf.get( "HostAgent", "urlcheckstatus" )
service_result = servi.split( ',' )
ip_result = webaddress.split( ',' )
port_result = webport.split( ',' )
ctrlags = 1
num = True
ser = servicestate( service_result )
webcip_state = monitorwebapp( ip_result )
webcp_state = conport( ip_result, port_result )
nldap_check_state = nldapcheck()
url_health_check_state = urlhealthcheck()
time_nu = time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime( time.time( ) ) )
params = {
'ids_health':{
servstate: ser,
webcipstatus: webcip_state,
webcpstatus: webcp_state,
nldapstatus: nldap_check_state,
urlcheckstatus: url_health_check_state
}
}
# print "params::::",params
data = json.dumps(params)
print "result:",data
try:
# headers = {"Content-type": "application/json"}
# middletime = time.time( )
# httpClient = httplib.HTTPSConnection( ipaddr, port, timeout=None )
# httpClient.request( "POST", url, data, headers )
# response = httpClient.getresponse( )
# print 'response:',response.read( )
url1 = 'https://%s:%s%s' % (ipaddr, port, url)
request = os.popen(
r"curl -k -H 'Content-type:application/json' -X POST --data '%s' '%s' 2>/dev/null" % (data, url1) )
print '[+]request:', request.read( )
except Exception, e:
print 'err',e
# finally:
# if httpClient:
# httpClient.close( ) | gpl-3.0 | 8,447,014,818,641,653,000 | 34.764331 | 111 | 0.579622 | false | 3.446286 | false | false | false |
clstoulouse/motu-client-python | src/python/motu_utils/utils_stream.py | 1 | 2638 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Python motu client
#
# Motu, a high efficient, robust and Standard compliant Web Server for Geographic
# Data Dissemination.
#
# http://cls-motu.sourceforge.net/
#
# (C) Copyright 2009-2010, by CLS (Collecte Localisation Satellites) -
# http://www.cls.fr - and Contributors
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import io, sys, logging
def copy(sourceHandler, destHandler, callback = None, blockSize = 65535 ):
"""Copy the available content through the given handler to another one. Process
can be monitored with the (optional) callback function.
sourceHandler: the handler through witch downloading content
destHandler: the handler into which writing data
callback: the callback function called for each block read. Signature: f: sizeRead -> void
blockSize: the size of the block used to read data
returns the total size read
"""
read = 0
while 1:
block = sourceHandler.read(blockSize)
if(isinstance(block, bytes)):
exit_condition = b''
else:
exit_condition = ''
if block == exit_condition:
break
read += len(block)
try:
if type(destHandler) == io.StringIO:
if sys.version_info > (3, 0):
destHandler.write( str(block, 'utf-8') )
else:
destHandler.write( unicode(block, 'utf-8') )
else:
destHandler.write(block)
except Exception as inst:
log = logging.getLogger("utils_stream:copy")
log.error("Exception while copying remote data")
log.error(" - Type = %s", type(inst)) # the exception instance
if hasattr(inst, 'args'):
log.error(" - Attributes = %s", inst.args) # arguments stored in .args
log.error(" - Full exception = %s", inst)
callback(read)
return read | lgpl-3.0 | 5,978,035,052,498,620,000 | 37.246377 | 94 | 0.650114 | false | 4.009119 | false | false | false |
yshalenyk/openprocurement.ocds.export | openprocurement/ocds/export/contrib/client.py | 2 | 2609 | import requests
import requests.adapters
import requests.exceptions
from gevent.pool import Pool
import logging
logger = logging.getLogger(__name__)
VERSION = 'X-Revision-N'
VERSION_HASH = 'X-Revision-Hash'
PREV_VERSION = 'X-Revision-Hash'
class APIClient(object):
def __init__(self, api_key, api_host, api_version, **options):
self.base_url = "{}/api/{}".format(api_host, api_version)
self.session = requests.Session()
if api_key:
self.session.auth = (api_key, '')
self.session.headers = {
"Accept": "applicaiton/json",
"Content-type": "application/json"
}
self.historical = options.get('historical', False)
resourse = options.get('resourse', 'tenders')
self.resourse_url = '{}/{}'.format(self.base_url, resourse)
APIAdapter = requests.adapters.HTTPAdapter(max_retries=5,
pool_connections=50,
pool_maxsize=50)
self.session.mount(self.resourse_url, APIAdapter)
# retreive a server cookie
resp = self.session.head("{}/{}".format(self.base_url, 'spore'))
resp.raise_for_status()
def get_tenders(self, params=None):
if not params:
params = {'feed': 'chages'}
resp = self.session.get(self.resourse_url, params=params)
if resp.ok:
return resp.json()
def get_tender(self, tender_id, version=''):
args = dict()
url = '{}/{}'.format(self.resourse_url, tender_id)
if self.historical:
url += '/historical'
args.update(dict(headers={VERSION: version}))
args.update(url=url)
try:
resp = self.session.get(**args)
if resp.ok:
#if self.historical and version and version != resp.headers.get(VERSION, ''):
# import pdb;pdb.set_trace()
# raise requests.exceptions.HTTPError
data = resp.json().get('data', '')
if data:
return resp.headers.get(VERSION, ''), data
except requests.exceptions.HTTPError as e:
logger.warn('Request failed. Error: {}'.format(e))
return '', {}
def get_retreive_clients(api_key, api_host, api_version, **kw):
forward = APIClient(api_key, api_host, api_version, **kw)
backward = APIClient(api_key, api_host, api_version, **kw)
origin_cookie = forward.session.cookies
backward.session.cookies = origin_cookie
return origin_cookie, forward, backward
| apache-2.0 | 4,707,716,879,600,359,000 | 36.271429 | 93 | 0.57225 | false | 3.888227 | false | false | false |
yosinski/python-mirametrix-network-client | calibClient.py | 1 | 1305 | #! /usr/bin/env python
import sys
from socket import *
from time import sleep
from helper import getLocalIP
'''
Research code... may not work
'''
def main():
if len(sys.argv) > 1:
HOST = sys.argv[1]
else:
HOST = getLocalIP()
PORT = 4242
BUFSIZ = 1024
ADDR = (HOST, PORT)
print 'Attempting to connect to %s...' % repr(ADDR),
sys.stdout.flush()
tcpCliSock = socket(AF_INET, SOCK_STREAM)
tcpCliSock.connect(ADDR)
print 'connected!'
# Eye-tracker API specific
tcpCliSock.sendall(str.encode('<SET ID="CALIBRATE_SHOW" STATE="0" />\r\n"'))
sleep(1)
tcpCliSock.sendall(str.encode('<SET ID="CALIBRATE_SHOW" STATE="1" />\r\n"'))
sleep(1)
tcpCliSock.sendall(str.encode('<SET ID="CALIBRATE_START" STATE="1" />\r\n"'))
#tcpCliSock.sendall(str.encode('\r\n"'))
#tcpCliSock.sendall(str.encode('\r\n"'))
#
#import pdb; pdb.set_trace()
#tcpCliSock.sendall(str.encode('<SET ID="ENABLE_SEND_POG_FIX" STATE="1" />\r\n"'))
#tcpCliSock.sendall(str.encode('<SET ID="ENABLE_SEND_DATA" STATE="1" />\r\n"'))
# Loop forever
while True:
data = tcpCliSock.recv(1024)
foo = bytes.decode(data)
print 'got something', foo
tcpCliSock.close()
if __name__ == '__main__':
main()
| gpl-3.0 | -4,517,076,597,275,788,000 | 21.118644 | 86 | 0.602299 | false | 2.965909 | false | false | false |
jtacoma/geometriki | geometriki/controllers/error.py | 1 | 2494 | # This file is part of geometriki.
#
# geometriki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# geometriki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with geometriki, in a file named COPYING. If not,
# see <http://www.gnu.org/licenses/>.
import cgi
from paste.urlparser import PkgResourcesParser
from pylons import request
from pylons import tmpl_context as c
from pylons.controllers.util import forward
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from geometriki.lib.base import BaseController, render
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
code = cgi.escape(request.GET.get('code', ''))
message = cgi.escape(request.GET.get('message', ''))
if resp:
code = code or cgi.escape(str(resp.status_int))
message = literal(resp.status) or message
if not code:
raise Exception('No status code was found.')
c.code = code
c.message = message
return render('/derived/error/document.mako')
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request.environ['PATH_INFO'] = '/%s' % path
return forward(PkgResourcesParser('pylons', 'pylons'))
| agpl-3.0 | -8,312,929,771,086,351,000 | 36.787879 | 72 | 0.69567 | false | 4.129139 | false | false | false |
Spirals-Team/librepair | src/scripts/patched_builds.py | 1 | 4289 | """
This is a script to create a txt-file from every build repairnator has patched
with the following info:
Build URL
Commit URL
All patches along with the name of the tool that generated them
Requirements:
selenium webdriver for python
pymongo
geckodriver (for linux most likely)
To use:
Fill in the constants below with the appropriate info
"""
import pymongo
from pymongo import MongoClient
from selenium import webdriver
"""
Constants used in the mongodb connection
"""
user="" # Username for the database
pwd="" # Password for the above used
db="" # Name of the authentication database (may be left empty)
ip="" # Ip-address of the database
port="" # Port of the database
"""
If one wishes to specify dates, fill these in. By default it will run
for every script
"""
dateFrom=None # From which date to look for patched builds
dateTo=None # To which date to look for patched builds
"""
Query for each document in inspector and write to a file.
"""
def patches_query(mongoDB, inspectorJson):
# Fetch the info from patches
return mongoDB.patches.find({"buildId" : inspectorJson['buildId']})
"""
We will be parsing html, so we need to change the < and > and & icons
"""
def replace_spec_chars(string):
string = string.replace("&", "&")
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace("\"", """)
string = string.replace("\'", "'")
return string
"""
Query the inspector once and return all documents
"""
def inspector_query(mongoDB):
# Filter depends on whether we want it to filter dates or not
global dateFrom
global dateTo
inspectorFilter = {"status" : "PATCHED"}
if(dateFrom != None and dateTo != None):
inspectorFilter = {"buildFinishedDate" : { "$gt" : dateFrom, "$lt" : dateTo}, "status" : "PATCHED"}
elif(dateFrom != None and dateTo == None):
inspectorFilter = {"buildFinishedDate" : { "$gt" : dateFrom}, "status" : "PATCHED"}
elif(dateFrom == None and dateTo != None):
inspectorFilter = {"buildFinishedDate" : { "$lt" : dateTo}, "status" : "PATCHED"}
return mongoDB.inspector.find(inspectorFilter).batch_size(50)
"""
Build the string that will ultimately be written to the txt-file
patchDocs - all docs with the same buildId, so that we gather all patches
diffs - a string of the different diffse
"""
def file_builder(patchDocs, inspectorJson):
global driver
buildURL = inspectorJson['travisURL']
driver.get(buildURL)
links = driver.find_elements_by_xpath("//a[contains(@href, 'github') and contains(@href, 'commit')]")
if(len(links) == 1):
commitURL = links[0].get_attribute("href")
else:
return None
# Where we do have a commit url we build the html file
f = open(str(inspectorJson['buildId']) + ".html", "w")
# Write commit and travis url
f.write("<html>\n<body>\n")
f.write("<p><a href=\"" + buildURL + "\" id=\"travis-url\">" + buildURL + "</a></p>\n")
f.write("<p><a href=\"" + commitURL + "\" id=\"commit-url\">" + commitURL + "</a></p>\n")
index = 0
for json in patchDocs:
diff = json['diff']
tool = json ['toolname']
diff = replace_spec_chars(diff)
if diff != None and diff != "" and isinstance(diff, str) and tool != None:
f.write("<pre>" + tool +
"<code id=\" " + str(index) + "\" class=\"patch\" title=\"" + tool + "\">\n"
+ diff +
"</code></pre>\n")
index += 1
f.write("</body>\n</html>\n")
f.close()
return 0
"""
Fetch info and write a file for each build found
"""
def main():
global db, ip, port, user, pwd
# Connect by the connection String URI
client = MongoClient("mongodb://" + user + ":" + pwd + "@" +ip + ":" + port + "/" + db)
mongoDB = client.repairnator
for inspectorJson in inspector_query(mongoDB):
patchesDocs = patches_query(mongoDB, inspectorJson)
file_builder(patchesDocs, inspectorJson)
print(inspectorJson['buildId'])
# Start a webdriver to make sure we can fetch the correct url
driver = webdriver.Firefox()
driver.implicitly_wait(5)
main()
| agpl-3.0 | -5,967,810,801,157,670,000 | 28.57931 | 107 | 0.62672 | false | 3.72309 | false | false | false |
kilico/KBotCartero | KComEmilio.py | 1 | 13308 | # This python file uses the following encoding: utf-8
import random
import os
import re
import imaplib
import smtplib
import email
import email.errors
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
import ConfigParser
class KComEmilio():
class Excepcion(Exception):
""" Excepción de propósito general y clase base para las demás """
pass
class Error(Excepcion):
"""
Para errores que implican una imposibilidad de completar
la acción a nivel del motor
"""
pass
def __init__(self, FichConfig):
cfg = ConfigParser.SafeConfigParser()
cfg.read(FichConfig)
seccion = 'Correo'
self._DireccionServidorIMAP4 = cfg.get(seccion,
'DireccionServidorIMAP4')
self._DireccionServidorSMTP = cfg.get(seccion,
'DireccionServidorSMTP')
self._UserIMAP4 = cfg.get(seccion, 'UserIMAP4')
self._UserSMTP = cfg.get(seccion, 'UserSMTP')
self._PassIMAP4 = cfg.get(seccion, 'PassIMAP4')
self._PassSMTP = cfg.get(seccion, 'PassSMTP')
self._MailboxDepuracion = cfg.get(seccion, 'MailboxDepuracion')
self._SoloEnvio = cfg.getboolean(seccion, 'SoloEnvio')
self.IMAP4 = None
self.SSMTP = None
self.m_MensajesPendientes = []
self.m_IdxMensajesAEliminar = []
self._SubjectPorDefecto = cfg.get(seccion, 'Subject')
# compilamos la regex para FROMaIDP
# Match para [email protected] ó [email protected]
self.m_FaI = re.compile(r'\d\d\d\d\d\d@(celes\.)?unizar\.es')
self.m_IaF = re.compile(r'\d\d\d\d\d\d')
# Variable para almacenar el namespace
self._Namespace = ''
def Conectado(self):
Res = self.SSMTP is not None
if not self._SoloEnvio:
Res = Res and self.IMAP4 is not None
return Res
def IniciaConexion(self):
self._IniciaConexionSMTP()
if not self._SoloEnvio:
self._IniciaConexionIMAP()
def _IniciaConexionSMTP(self):
try:
Txt = u'No se ha podido establecer correctament la conexión '
Txt += u'con el servidor SMTP'
# El constructor llama a connect
self.SSMTP = smtplib.SMTP()
Res = self.SSMTP.connect(self._DireccionServidorSMTP)
Res = self.SSMTP.ehlo()
Res = self.SSMTP.starttls()
Res = self.SSMTP.ehlo
Res = self.SSMTP.login(self._UserSMTP, self._PassSMTP)
except smtplib.SMTPException as e:
self.SSMTP = None
Txt += '\n' + e.message
raise self.Error(Txt)
except Exception as e:
self.SSMTP = None
Txt += '\n' + e.message
raise self.Error(Txt)
def _IniciaConexionIMAP(self):
try:
Txt = 'No se ha podido establecer correctamente la conexion '
Txt += 'con el servidor IMAP'
self.IMAP4 = imaplib.IMAP4_SSL(self._DireccionServidorIMAP4)
if self.IMAP4.login(self._UserIMAP4,
self._PassIMAP4)[0] != 'OK':
raise self.Error(Txt)
# obtenemos el namespace
typ, dat = self.IMAP4.namespace()
if typ == 'OK':
aux = dat[0][dat[0].find('((')+2:dat[0].find('))')]
ini = aux.find('"')
fin = aux.find('"', ini + 1)
self._Namespace = aux[ini+1:fin]
else:
raise self.Error(Txt)
# seleccionamos INBOX
typ, dat = self.IMAP4.select()
if typ != 'OK':
raise self.Error(Txt)
except imaplib.IMAP4.error as e:
Txt += '\n' + e.message
raise self.Error(Txt)
except Exception as e:
Txt += '\n' + e.message
raise self.Error(Txt)
def TerminaConexion(self):
if self.SSMTP is not None:
self.SSMTP.close()
self.SSMTP = None
if self.IMAP4 is not None:
self.IMAP4.expunge()
self.IMAP4.close()
self.IMAP4.logout() # cerramos la conexión
self.IMAP4 = None
# Funciones para convertir la clase en un context manager
def __enter__(self):
self.IniciaConexion()
return self
def __exit__(self, ext, exv, trb):
self.TerminaConexion()
return False # para que relance las excepciones generadas
def _CreaBandejaParaDepuracion(self):
"""
Comprueba si no existen la bandeja para depuración y en su caso la
crea
"""
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
NamesMailBox = self._Namespace + self._MailboxDepuracion
# comprobamos si ya existe
typ, dat = self.IMAP4.list()
if typ == 'OK':
lfold = []
for k in dat:
ult = k.rfind('"')
lfold.append(k[k[:ult].rfind('"')+1:ult])
if NamesMailBox not in lfold:
typ, dat = self.IMAP4.create(NamesMailBox)
if typ == 'OK':
typ, dat = self.IMAP4.subscribe(NamesMailBox)
def _ObtenListaDeIdentificadoresDeMensaje(self):
"""
Devuelve una lista con los UID como strings
lista vacía en caso de fallo
"""
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
luid = []
typ, dat = self.IMAP4.uid('SEARCH', 'ALL')
if typ == 'OK':
luid = dat[0].split()
return luid
def _ObtenCuerpo(self, Mes):
if not Mes.is_multipart():
pl = Mes.get_payload(decode = True)
# if Mes.get_content_type() == 'text/plain':
codec = Mes.get_content_charset()
try:
if codec is None:
aux = pl
else:
aux = pl.decode(codec)
except:
aux = pl
Salida = (True, aux)
else:
pl = Mes.get_payload()
Salida = (False, '')
for submes in pl:
Salida = self._ObtenCuerpo(submes)
if Salida[0]:
break
return Salida
def ObtenMensajes(self, luid = None):
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
if luid is None:
luid = self._ObtenListaDeIdentificadoresDeMensaje()
# Los descargamos aleatoriamente para evitar que un mensaje
# que genere una excepción (incontrolada) bloquee a todos
# los demás
random.shuffle(luid)
DMen = {}
for it in luid:
typ, dat = self.IMAP4.uid('FETCH', it, '(RFC822)')
if typ == 'OK':
Dit = {}
Mes = email.message_from_string(dat[0][1])
From = Mes['From']
Dit['FROM'] = From
ini, fin = From.find('<'), From.find('>')
if ini != -1 and fin != -1:
Dit['TAG_PAX'] = self._FROMaIDP(From[ini+1:fin])
else:
Dit['TAG_PAX'] = self._FROMaIDP(From)
Salida = self._ObtenCuerpo(Mes)
Dit['BODY'] = '--NO SE HA INTERPRETADO EL MENSAJE--'
if Salida[0]:
Dit['BODY'] = Salida[1]
DMen[it] = Dit
return DMen
def MarcaMensajesParaBorrar(self, luid):
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
for it in luid:
typ, dat = self.IMAP4.uid('STORE', it, '+FLAGS', '(\\Deleted)')
def MueveMensajesADepuracion(self, luid):
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
luidBorrar = []
for it in luid:
MBDepuracion = self._Namespace + self._MailboxDepuracion
typ, dat = self.IMAP4.uid('COPY', it, MBDepuracion)
if typ == 'OK': luidBorrar.append(it)
self.MarcaMensajesParaBorrar(luidBorrar)
def _FROMaIDP(self, FROM):
if self.m_FaI.match(FROM) is not None: IDP = FROM[0:6]
else: IDP = FROM
return IDP
def _IDPaFROM(self, IDP):
#if self.m_IaF.match(IDP) is not None: FROM = IDP + '@celes.unizar.es'
if self.m_IaF.match(IDP) is not None: FROM = IDP + '@unizar.es'
else: FROM = IDP
return FROM
def EnviaCorreo(self, Dest, Txt, Adjuntos = (), Subject = None):
"""
Envia un correo de respuesta
Dest -> Destinatario (en version corta o larga)
Txt -> Texto del mensaje principal
Adjuntos -> Colección de adjuntos en la forma
((NombreFich, (Blow, FMT)), ...)
"""
Exito, Inf = True, u''
# si no hay Destinatario, volvemos
if Dest.strip() == '':
Exito, Inf = False, 'No se han establecido destinatarios'
# Construccion del mensaje
if Exito:
try:
OMenTxt = MIMEText(Txt, _subtype = 'plain', _charset = 'utf-8')
if Adjuntos is None or len(Adjuntos) == 0: # sin adjuntos?
OMen = OMenTxt
else: # el mensaje lleva adjuntos
OMen = MIMEMultipart()
OMen.attach(OMenTxt)
for nom, (blow, fmt) in Adjuntos:
if fmt.upper() == 'PDF':
OMenPdf = MIMEApplication( blow, _subtype = 'pdf')
OMenPdf.add_header('Content-Disposition',
'attachment', filename = nom + '.pdf')
OMen.attach(OMenPdf)
elif fmt.upper() == 'ODS':
OMenOds = MIMEApplication( blow, _subtype = 'ods')
OMenOds.add_header('Content-Disposition',
'attachment', filename = nom + '.ods')
OMen.attach(OMenOds)
elif fmt.upper() == 'XLS':
OMenXls = MIMEApplication( blow, _subtype = 'xls')
OMenXls.add_header('Content-Disposition',
'attachment', filename = nom + '.xls')
OMen.attach(OMenXls)
elif fmt.upper() == 'TXT':
OMenTxt = MIMEText(blow,
'plain', 'utf-8')
OMenTxt.add_header('Content-Disposition',
'attachment', filename = nom + '.txt')
OMen.attach(OMenTxt)
else:
Txt = 'no se como hacer un attach del formato'
Txt += ' %s'%fmt
raise self.Error(Txt)
OMen['To'] = self._IDPaFROM(Dest)
OMen['From'] = self._UserSMTP
if Subject is None:
Subject = self._SubjectPorDefecto
OMen['Subject'] = Subject
except email.errors.MessageError as e:
Inf = 'Error en la composición del mensaje: %s'%e
Exito = False
# Envío del mensaje
if Exito:
try:
self.SSMTP.sendmail(self._UserSMTP, OMen['To'],
OMen.as_string())
except Exception as e:
Inf = 'Error en el envío del mensaje: %s'%e
Exito = False
return Exito, Inf
if __name__ == "__main__":
MEN = u"""
ACCION = CORRECCION
TAREA = TAR_01
PIG = 3.2
"""
with KComEmilio() as KCE:
#KCE.EnviaMensajeTexto('[email protected]', MEN)
#KCE.EnviaMensajeTexto('[email protected]', MEN)
#KCE.EnviaMensajeTexto('[email protected]', MEN)
#KCE._CreaBandejaParaDepuracion()
luid = KCE._ObtenListaDeIdentificadoresDeMensaje()
#Dit = KCE._ObtenMensajes(luid)
#print Dit
KCE._MarcaMensajesParaBorrar(luid)
#KCE._MueveMensajesADepuracion(luid)
"""
Dest = '[email protected]'
Txt = 'yeeepa'
Subj = 'probando yepa'
with open('DatosParaPruebas/PE01_ENU.pdf', 'rb') as fp:
blw = fp.read()
Adj = (('yepa1.pdf', (blw, 'PDF')),
('yepa2.pdf', (blw, 'PDF')),
('yepa3.pdf', (blw, 'PDF')),
('yepa4.pdf', (blw, 'PDF')),
('yepa5.pdf', (blw, 'PDF')),
('yepa6.pdf', (blw, 'PDF')))
print KCE.EnviaCorreoRespuesta(Dest, Txt, Subject = Subj, Adjuntos = Adj)
"""
| gpl-3.0 | 1,054,293,651,292,944,900 | 34.63807 | 81 | 0.494095 | false | 3.534432 | false | false | false |
florensacc/snn4hrl | bonus_evaluators/grid_bonus_evaluator.py | 1 | 17262 | import numpy as np
import gc
import os.path as osp
from rllab.misc import logger
from rllab.misc import tensor_utils
import collections
from functools import reduce
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sandbox.snn4hrl.bonus_evaluators.base import BonusEvaluator
class GridBonusEvaluator(BonusEvaluator):
def __init__(self,
env_spec=None,
mesh_density=50,
visitation_bonus=0,
snn_H_bonus=0,
virtual_reset=False, # the paths are split by latents and every switch gets the robot to 0 (xy,ori)
switch_lat_every=0,
survival_bonus=0,
dist_from_reset_bonus=0,
start_bonus_after=0):
self.mesh_density = mesh_density
self.furthest = 0
self.visitation_all = np.zeros((1, 1), dtype=int)
self.num_latents = 0 # this will simply not be used if there are no latents (the same for the following 2)
self.dict_visit = collections.OrderedDict() # keys: latents (int), values: np.array with number of visitations
self.visitation_by_lat = np.zeros((1, 1), dtype=int) # used to plot: matrix with a number for each lat/rep
self.visitation_bonus = visitation_bonus
self.snn_H_bonus = snn_H_bonus
self.virtual_reset = virtual_reset
self.switch_lat_every = switch_lat_every
self.survival_bonus = survival_bonus
self.dist_from_reset_bonus = dist_from_reset_bonus
self.start_bonus_after = start_bonus_after
# in case I'm gridding all the obs_dim (not just the com) --> for this I should use hashing, or too high dim
if env_spec:
obs_dim = env_spec.observation_space.flat_dim
def fit_before_process_samples(self, paths):
if 'env_infos' in paths[0].keys() and 'full_path' in paths[0]['env_infos'].keys():
paths = [tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path']) for path in paths]
if 'env_infos' in list(paths[0].keys()) and 'com' in list(paths[0]['env_infos'].keys()):
coms_xy = [np.array(path['env_infos']['com'][:, 0:2]) for path in paths] # no z coord
else:
coms_xy = [np.array(path['observations'][:, -3:-1])[:, [1, 0]] for path in paths]
if self.virtual_reset: # change the com according to switch_lat_every or resets
for k, com_xy in enumerate(coms_xy):
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = paths[k]['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
self.furthest = np.ceil(np.max(np.abs(np.concatenate(coms_xy))))
# now translate and scale the coms!
coms = [np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int) for com_xy in coms_xy]
if 'agent_infos' in list(paths[0].keys()) and (('latents' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['latents'])) or
('selectors' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['selectors']))):
selectors_name = 'selectors' if 'selectors' in list(paths[0]['agent_infos'].keys()) else 'latents'
self.num_latents = np.size(paths[0]["agent_infos"][selectors_name][0])
# set all the labels for the latents and initialize the entries of dict_visit
size_grid = int(2 * self.furthest * self.mesh_density + 1)
for i in range(self.num_latents): # use integer to define the latents
self.dict_visit[i] = np.zeros((size_grid, size_grid))
lats = [[np.nonzero(lat)[0][0] for lat in path['agent_infos'][selectors_name]]
for path in paths] # list of all lats by idx
for k, com in enumerate(coms): # this iterates through paths
start = 0
for i, xy in enumerate(com):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
pass
else:
self.dict_visit[lats[k][i]][tuple(xy)] += 1
self.visitation_all = reduce(np.add, [visit for visit in self.dict_visit.values()])
else: # If I don't have latents. I also assume no virtual reset and no start_bonus_after!!
self.visitation_all = np.zeros(
(2 * self.furthest * self.mesh_density + 1, 2 * self.furthest * self.mesh_density + 1))
for com in np.concatenate(coms):
self.visitation_all[tuple(com)] += 1
def predict_count(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
if 'env_infos' in list(path.keys()) and 'com' in list(path['env_infos'].keys()):
com_xy = np.array(path['env_infos']['com'][:, 0:2])
else:
com_xy = np.array(path['observations'][:, -3:-1])[:, [1, 0]]
if self.virtual_reset: # change the com according to switch_lat_every or resets
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = path['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
# now translate and scale the coms!
coms = np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int)
counts = []
start = 0
for i, com in enumerate(coms):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
counts.append(np.inf) # this is the way of zeroing out the reward for the first steps
else:
counts.append(self.visitation_all[tuple(com)])
return 1. / np.maximum(1., np.sqrt(counts))
def predict_entropy(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
if 'env_infos' in list(path.keys()) and 'com' in list(path['env_infos'].keys()):
com_xy = np.array(path['env_infos']['com'][:, 0:2])
else:
com_xy = np.array(path['observations'][:, -3:-1])[:, [1, 0]]
if self.virtual_reset: # change the com according to switch_lat_every or resets
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = path['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
# now translate and scale the coms!
coms = np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int)
freqs = []
lats = [np.nonzero(lat)[0][0] for lat in path['agent_infos']['latents']]
start = 0
for i, com in enumerate(coms):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
freqs.append(
1.) # this is tricky because it will be higher than the other rewards!! (negatives) -> at least bonus for staying alife until the transition
else:
freqs.append(self.dict_visit[lats[i]][tuple(com)] / self.visitation_all[tuple(com)])
return np.log(freqs)
def predict_dist_from_reset(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
if 'env_infos' in list(path.keys()) and 'com' in list(path['env_infos'].keys()):
com_xy = np.array(path['env_infos']['com'][:, 0:2])
else:
com_xy = np.array(path['observations'][:, -3:-1])[:, [1, 0]]
if self.virtual_reset: # change the com according to switch_lat_every or resets
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = path['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
# now translate and scale the coms!
coms = np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int)
dists_from_reset = []
start = 0
for i, com in enumerate(coms):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
dists_from_reset.append(
0.) # this is tricky because it will be higher than the other rewards!! (negatives) -> at least bonus for staying alife until the transition
else:
dists_from_reset.append(np.linalg.norm(com - coms[start + self.start_bonus_after]))
return np.array(dists_from_reset)
def predict(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
expanded_path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
else: # when it comes from log_diagnostics it's already expanded (or if it was never aggregated)
expanded_path = path
bonus = self.visitation_bonus * self.predict_count(expanded_path) + \
self.dist_from_reset_bonus * self.predict_dist_from_reset(expanded_path)
if self.snn_H_bonus: # I need the if because the snn bonus is only available when there are latents
bonus += self.snn_H_bonus * self.predict_entropy(expanded_path)
total_bonus = bonus + self.survival_bonus * np.ones_like(bonus)
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
aggregated_bonus = []
full_path_rewards = path['env_infos']['full_path']['rewards']
total_steps = 0
for sub_rewards in full_path_rewards:
aggregated_bonus.append(np.sum(total_bonus[total_steps:total_steps + len(sub_rewards)]))
total_steps += len(sub_rewards)
total_bonus = aggregated_bonus
return np.array(total_bonus)
def fit_after_process_samples(self, samples_data):
pass
def log_diagnostics(self, paths):
if 'env_infos' in paths[0].keys() and 'full_path' in paths[0]['env_infos'].keys():
paths = [tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path']) for path in paths]
fig, ax = plt.subplots()
overlap = 0 # keep track of the overlap
delta = 1. / self.mesh_density
y, x = np.mgrid[-self.furthest:self.furthest + delta:delta, -self.furthest:self.furthest + delta:delta]
if 'agent_infos' in list(paths[0].keys()) and (('latents' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['latents'])) or
('selectors' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['selectors']))):
# fix the colors for each latent
num_colors = self.num_latents + 2 # +2 for the 0 and Repetitions NOT COUNTING THE WALLS
# create a matrix with entries corresponding to the latent that was there (or other if several/wall/nothing)
size_grid = int(2 * self.furthest * self.mesh_density + 1)
self.visitation_by_lat = np.zeros(
(size_grid, size_grid))
for i, visit in self.dict_visit.items():
lat_visit = np.where(visit == 0, visit, i + 1) # transform the map into 0 or i+1
self.visitation_by_lat += lat_visit
overlap += np.sum(np.where(self.visitation_by_lat > lat_visit)) # add the overlaps of this latent
self.visitation_by_lat = np.where(self.visitation_by_lat <= i + 1, self.visitation_by_lat,
num_colors - 1) # mark overlaps
cmap = plt.get_cmap('nipy_spectral', num_colors)
map_plot = ax.pcolormesh(x, y, self.visitation_by_lat, cmap=cmap, vmin=0.1,
vmax=self.num_latents + 1) # before 1 (will it affect when no walls?)
color_len = (num_colors - 1.) / num_colors
ticks = np.arange(color_len / 2., num_colors - 1, color_len)
cbar = fig.colorbar(map_plot, ticks=ticks)
latent_tick_labels = ['latent: ' + str(i) for i in list(self.dict_visit.keys())]
cbar.ax.set_yticklabels(
['No visitation'] + latent_tick_labels + ['Repetitions']) # horizontal colorbar
else:
plt.pcolormesh(x, y, self.visitation_all, vmax=self.mesh_density)
overlap = np.sum(
np.where(self.visitation_all > 1, self.visitation_all, 0)) # sum of all visitations larger than 1
ax.set_xlim([x[0][0], x[0][-1]])
ax.set_ylim([y[0][0], y[-1][0]])
log_dir = logger.get_snapshot_dir()
exp_name = log_dir.split('/')[-1] if log_dir else '?'
ax.set_title('visitation_Bonus: ' + exp_name)
plt.savefig(osp.join(log_dir, 'visitation_Gbonus.png')) # this saves the current figure, here f
plt.close()
plt.cla()
plt.clf()
plt.close('all')
# del fig, ax, cmap, cbar, map_plot
gc.collect()
visitation_different = np.count_nonzero(self.visitation_all)
logger.record_tabular('VisitationDifferents', visitation_different)
logger.record_tabular('VisitationOverlap', overlap)
logger.record_tabular('VisitationMin', np.min(self.visitation_all))
logger.record_tabular('VisitationMax', np.max(self.visitation_all))
if self.snn_H_bonus:
avg_grid_entropy_bonus = np.mean([np.sum(self.predict_entropy(path)) for path in paths])
logger.record_tabular('AvgPath_Grid_EntropyBonus', avg_grid_entropy_bonus)
# if self.visitation_bonus:
avg_grid_count_bonus = np.mean([np.sum(self.predict_count(path)) for path in paths])
logger.record_tabular('AvgPath_Grid_CountBonus', avg_grid_count_bonus)
# if self.visitation_bonus:
avg_grid_dist_bonus = np.mean([np.sum(self.predict_dist_from_reset(path)) for path in paths])
logger.record_tabular('AvgPath_Grid_DistBonus', avg_grid_dist_bonus)
# if self.survival_bonus:
avg_survival_bonus = np.mean([len(path['rewards']) for path in paths])
logger.record_tabular('AvgPath_SurviBonus', avg_survival_bonus)
avg_grid_bonus = np.mean([np.sum(self.predict(path)) for path in paths])
logger.record_tabular('AvgPathGridBonus', avg_grid_bonus)
| mit | -7,675,450,768,337,061,000 | 51.468085 | 161 | 0.550805 | false | 3.478839 | false | false | false |
modlinltd/ModelAdmin-Mixins | modeladmin_utils/mixins/limited_inlines.py | 2 | 1256 | from django.conf import settings
from django.forms.models import BaseInlineFormSet
class LimitedInlineFormset(BaseInlineFormSet):
"""
A specialized subclass of BaseInlineFormSet which limits the queryset
to a maximum (specified in settings: default to 15).
"""
limiting_inlines = True
def get_queryset(self):
if not hasattr(self, "_queryset"):
qs = super(LimitedInlineFormset, self).get_queryset()
limit = getattr(settings, "INLINES_MAX_LIMIT", 15)
self.total_count = qs.count()
self._queryset = qs[:limit]
self.limited_count = self._queryset.count()
return self._queryset
class LimitInlinesAdminMixin(object):
"""
Set ModelAdmin.limit_inlines to a tuple of InlineModelAdmin
classes you wish to be limited.
Overrides the inline formset with `LimitedInlineFormset`.
"""
def get_formsets(self, request, obj=None):
limit_inlines = getattr(self, "limit_inlines", [])
for inline in self.get_inline_instances(request, obj):
kwargs = {}
if inline.__class__ in limit_inlines:
kwargs['formset'] = LimitedInlineFormset
yield inline.get_formset(request, obj, **kwargs)
| mit | 2,485,422,627,217,576,000 | 34.885714 | 73 | 0.650478 | false | 4.331034 | false | false | false |
degoldschmidt/pytrack-analysis | scripts/run_posttracking.py | 1 | 18972 | import argparse
import subprocess, os
import os.path as op
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pytrack_analysis import Multibench
from pytrack_analysis.dataio import VideoRawData
from pytrack_analysis.profile import get_profile, get_scriptname, show_profile
from pytrack_analysis.image_processing import ShowOverlay, WriteOverlay, PixelDiff
import pytrack_analysis.preprocessing as prp
import pytrack_analysis.plot as plot
from pytrack_analysis.yamlio import write_yaml
from scipy import signal
from scipy.signal import hilbert
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def remove_mistrack(x, y, ma, mi, thr=100.*0.0333, forced=False):
xnew, ynew = x.copy(), y.copy()
dx, dy = np.append(0, np.diff(x)), np.append(0, np.diff(y))
displ = np.sqrt(dx**2 + dy**2)
area = np.multiply(ma,mi)
xnew[area > 10] = np.nan
ynew[area > 10] = np.nan
xnew[area < 2] = np.nan
ynew[area < 2] = np.nan
#print(displ)
ides = np.where(displ > thr)[0]
#print(ides)
"""
for jj, each in enumerate(ides):
if jj == 0:
print(each)
if len(ides) > 1:
xnew[ides[jj]:ides[jj+1]] = np.nan
ynew[ides[jj]:ides[jj+1]] = np.nan
else:
xnew[ides[jj]:] = np.nan
ynew[ides[jj]:] = np.nan
if jj < len(ides)-1:
print(jj, np.mean(ma[ides[jj]:ides[jj+1]])*np.mean(mi[ides[jj]:ides[jj+1]]), ma[each]*mi[each])
if forced or np.mean(ma[ides[jj]:ides[jj+1]])*np.mean(mi[ides[jj]:ides[jj+1]]) > 10 or np.mean(ma[ides[jj]:ides[jj+1]])*np.mean(mi[ides[jj]:ides[jj+1]]) < 2:
xnew[ides[jj]:ides[jj+1]] = np.nan
ynew[ides[jj]:ides[jj+1]] = np.nan
"""
ma[np.isnan(xnew)] = np.mean(ma)
mi[np.isnan(xnew)] = np.mean(mi)
nans, xind = nan_helper(xnew)
xnew[nans]= np.interp(xind(nans), xind(~nans), xnew[~nans])
nans, yind = nan_helper(ynew)
ynew[nans]= np.interp(yind(nans), yind(~nans), ynew[~nans])
return xnew, ynew, ma, mi
### TODO: move this to signal processing module
def gaussian_filter(_X, _len=16, _sigma=1.6):
norm = np.sqrt(2*np.pi)*_sigma ### Scipy's gaussian window is not normalized
window = signal.gaussian(_len+1, std=_sigma)/norm
convo = np.convolve(_X, window, "same")
## eliminate boundary effects
convo[:_len] = _X[:_len]
convo[-_len:] = _X[-_len:]
return convo
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('basedir', metavar='basedir', type=str, help='directory where your data files are')
parser.add_argument('--option', action='store', type=str)
parser.add_argument('--overwrite', action='store_true')
BASEDIR = parser.parse_args().basedir
OVERWRITE = parser.parse_args().overwrite
if parser.parse_args().option is None:
OPTION = 'all'
else:
OPTION = parser.parse_args().option
return BASEDIR, OPTION, OVERWRITE
def main():
BASEDIR, OPTION, OVERWRITE = get_args()
### Define raw data structure
colnames = ['datetime', 'elapsed_time', 'frame_dt', 'body_x', 'body_y', 'angle', 'major', 'minor']
if not op.isdir(op.join(BASEDIR, 'pytrack_res')):
os.mkdir(op.join(BASEDIR, 'pytrack_res'))
RESULT = op.join(BASEDIR, 'pytrack_res')
if not op.isdir(op.join(RESULT,'post_tracking')):
os.mkdir(op.join(RESULT,'post_tracking'))
if not op.isdir(op.join(RESULT,'pixeldiff')):
os.mkdir(op.join(RESULT,'pixeldiff'))
if not op.isdir(op.join(RESULT,'jumps')):
os.mkdir(op.join(RESULT,'jumps'))
raw_data = VideoRawData(BASEDIR, VERBOSE=(OPTION == 'registration'))
if OPTION == 'registration':
return 1
### go through all session
for iv, video in enumerate(raw_data.videos):
if iv > 28:
continue
Nflies = 4
print('\n{}: {}'.format(iv, video.name))
### arena + food spots
video.load_arena()
### trajectory data
video.load_data()
### rename columns
video.data.reindex(colnames)
### data to timestart
video.data.to_timestart(video.timestart)
### calculate displacements
x, y, tx, ty, bx, by = [], [], [], [], [], []
jumps, dr, dddr, thr, flipped = [], [], [], [], []
wo = WriteOverlay(video.fullpath, outfolder=op.join(RESULT,'jumps'))
### plotting speed, major/minor, decision points etc
f, axes = plt.subplots(12, figsize=(6,10)) ### TODO
print('extract trajectories...')
for i in range(Nflies):
"""
Extract some kinematics
"""
ff = int(video.data.dfs[i].index[0])
lf = int(video.data.dfs[i].index[-1])
st = 0
en = min(lf-ff, 108100)
xpos = video.data.dfs[i]['body_x'].interpolate().fillna(method='ffill').fillna(method='bfill')
ypos = video.data.dfs[i]['body_y'].interpolate().fillna(method='ffill').fillna(method='bfill')
m = video.data.dfs[i]['major'].interpolate().fillna(method='ffill').fillna(method='bfill')
angle = video.data.dfs[i]['angle'].interpolate().fillna(method='ffill').fillna(method='bfill')
x.append(xpos+0.5*m*np.cos(angle))
y.append(ypos+0.5*m*np.sin(angle))
tx.append(xpos-0.5*m*np.cos(angle))
ty.append(ypos-0.5*m*np.sin(angle))
bx.append(xpos)
by.append(ypos)
"""
PixelDiff Algorithm
"""
print('pixeldiff...')
_ofile = op.join(RESULT,'pixeldiff','pixeldiff_{}.csv'.format(video.timestr))
if op.isfile(_ofile):
pxd_data = pd.read_csv(_ofile, index_col='frame')
else:
pxdiff = PixelDiff(video.fullpath, start_frame=video.data.first_frame)
px, tpx = pxdiff.run((x,y), (tx,ty), en, show=False)
pxd_data = pd.DataFrame({ 'headpx_fly1': px[:,0], 'tailpx_fly1': tpx[:,0],
'headpx_fly2': px[:,1], 'tailpx_fly2': tpx[:,1],
'headpx_fly3': px[:,2], 'tailpx_fly3': tpx[:,2],
'headpx_fly4': px[:,3], 'tailpx_fly4': tpx[:,3],})
pxd_data.to_csv(_ofile, index_label='frame')
print('head detection...')
for i in range(Nflies):
ff = int(video.data.dfs[i].index[0])
lf = int(video.data.dfs[i].index[-1])
st = 0
en = min(lf-ff, 108100)
xpos = video.data.dfs[i]['body_x'].interpolate().fillna(method='ffill').fillna(method='bfill')
ypos = video.data.dfs[i]['body_y'].interpolate().fillna(method='ffill').fillna(method='bfill')
m = video.data.dfs[i]['major'].interpolate().fillna(method='ffill').fillna(method='bfill')
angle = video.data.dfs[i]['angle'].interpolate().fillna(method='ffill').fillna(method='bfill')
mi = video.data.dfs[i]['minor'].interpolate().fillna(method='ffill').fillna(method='bfill')
if np.any(np.isnan(xpos)) or np.any(np.isnan(ypos)) or np.any(np.isnan(m)) or np.any(np.isnan(angle)):
print(np.any(np.isnan(xpos)), np.any(np.isnan(ypos)), np.any(np.isnan(m)), np.any(np.isnan(angle)))
dt = video.data.dfs[i]['frame_dt']
dx, dy = np.append(0, np.diff(xpos)), np.append(0, np.diff(-ypos))
dx, dy = np.divide(dx, dt), np.divide(dy, dt)
theta = np.arctan2(dy, dx)
### pixel data from pixeldiff
hpx = np.array(pxd_data['headpx_fly{}'.format(i+1)])
wlen = 36
hpx = gaussian_filter(hpx, _len=wlen, _sigma=0.1*wlen)
tpx = np.array(pxd_data['tailpx_fly{}'.format(i+1)])
tpx = gaussian_filter(tpx, _len=wlen, _sigma=0.1*wlen)
"""
diff of diff of displacements (spikes are more pronounced)
"""
dr.append(np.sqrt(dx*dx+dy*dy)/float(video.arena[i]['scale']))
ddr = np.append(0, np.diff(dr[-1]))
dddr.append(np.append(0, np.diff(ddr)))
#wlen = 36
#dr_sm = gaussian_filter(np.array(dr), _len=wlen, _sigma=0.1*wlen)
wlen = 120
dddr_sm = gaussian_filter(np.array(np.abs(dddr[-1])), _len=wlen, _sigma=0.5*wlen)
"""
Thresholding
"""
threshold = 10.*dddr_sm
low, high = 10., 30.
threshold[threshold<low] = low
threshold[threshold>high] = high
thr.append(threshold)
#### TODO
jumps.append(np.array(np.array(dddr[-1])[st:en] > threshold[st:en]))
mistrack_inds = np.where(np.array(dddr[-1])[st:en] > threshold[st:en])[0]
"""
Rolling mean of pixeldiff for flips (window = 10 secs)
"""
pxthr = np.array(tpx[st:en] < hpx[st:en])
pxavg = np.zeros(pxthr.shape)
for frm in range(pxavg.shape[0]):
e = frm + 300
if e >= pxavg.shape[0]:
e = pxavg.shape[0]-1
if frm == e:
pxavg[frm] = pxthr[frm]
else:
pxavg[frm] = np.mean(pxthr[frm:e])
### plot
axes[3*i].plot(dddr[-1][st:en], 'k-', lw=0.5)
axes[3*i].plot(threshold[st:en], '--', color='#fa6800', lw=0.5)
axes[3*i].plot(mistrack_inds, 50.*np.ones(len(mistrack_inds)), 'o', color='#d80073', markersize=2)
axes[3*i].set_ylim([-5,55])
axes[3*i].set_yticks(np.arange(0,60,25))
### plot 2nd
axes[3*i+1].plot(hpx[st:en], '-', color='#fa0078', lw=0.5)
axes[3*i+1].plot(tpx[st:en], '-', color='#00fa64', lw=0.5)
axes[3*i+1].plot(100.*pxthr, '--', color='#6e6e6e', lw=0.5)
axes[3*i+1].plot(100.*pxavg, '-', color='#000000', lw=0.5)
axes[3*i+1].set_ylim([0,255])
axes[3*i+2].plot(m[st:en]/video.arena[i]['scale'], '-', color='#ff2f2f', lw=0.5)
axes[3*i+2].plot(mi[st:en]/video.arena[i]['scale'], '-', color='#008dff', lw=0.5)
axes[3*i+2].plot((m[st:en]*mi[st:en])/video.arena[i]['scale'], '--', color='#6f6f6f', lw=0.5)
axes[3*i+2].set_ylim([-1,6])
axes[3*i+2].set_yticks(np.arange(0,7,2))
####
view = (video.arena[i]['x']-260, video.arena[i]['y']-260, 520, 520)
sf, ef = st+ff, en+ff
total_dur = int((video.data.dfs[i].loc[lf,'elapsed_time'] - video.data.dfs[i].loc[ff,'elapsed_time'])/60.)
secs = int(round(video.data.dfs[i].loc[lf,'elapsed_time'] - video.data.dfs[i].loc[ff,'elapsed_time']))%60
if OPTION == 'jump_detection':
print("fly {}:\tstart@ {} ({} >= {}) total: {}:{:02d} mins ({} frames)".format(i+1, ff, video.data.dfs[i].loc[ff,'datetime'], video.timestart, total_dur, secs, en-st))
thrs = np.array(np.array(dddr[i])[st:en] > threshold[st:en])
flip = np.zeros(thrs.shape)
flipped.append(flip)
thr_ix = np.append(np.append(0, np.where(thrs)[0]), len(flip)+ff)
if OPTION == 'jump_detection':
print('found {} detection points (start, jumps, mistracking, etc.).'.format(len(thr_ix)-1))
count = 0
if len(thr_ix) > 0:
for jj,ji in enumerate(thr_ix[:-1]):
fromfr = thr_ix[jj] + ff
tofr = thr_ix[jj+1] + ff - 1
flip[thr_ix[jj]:thr_ix[jj+1]] = np.mean(pxthr[thr_ix[jj]:thr_ix[jj+1]])>0.5
if flip[thr_ix[jj]] == 1:
x[i].loc[fromfr:tofr], tx[i].loc[fromfr:tofr] = tx[i].loc[fromfr:tofr], x[i].loc[fromfr:tofr]
y[i].loc[fromfr:tofr], ty[i].loc[fromfr:tofr] = ty[i].loc[fromfr:tofr], y[i].loc[fromfr:tofr]
clip_st, clip_en = fromfr-60, fromfr+60
if clip_st < int(video.data.dfs[i].index[0]):
clip_st = int(video.data.dfs[i].index[0])
if clip_en > int(video.data.dfs[i].index[-1]):
clip_en = int(video.data.dfs[i].index[-1])
if clip_en - clip_st < 30:
continue
count += 1
_ofile = op.join(RESULT,'jumps','{}'.format(video.name[:-4]), 'fly{}_{:06d}.avi'.format(i+1, fromfr))
if not op.isfile(_ofile):
wo.run((bx[i].loc[clip_st:clip_en], by[i].loc[clip_st:clip_en]), (x[i].loc[clip_st:clip_en], y[i].loc[clip_st:clip_en]), clip_st, clip_en, fromfr, view, i, bool=[thr, flip])
video.data.dfs[i].loc[:, 'head_x'] = x[i]
video.data.dfs[i].loc[:, 'head_y'] = y[i]
if OPTION == 'jump_detection':
print('wrote {} videos.'.format(count))
mistracked = np.sum(dr[-1] > 100)
print('Mistracked frames:', mistracked)
window_len = 36
if not op.isdir(op.join(RESULT,'plots')):
os.mkdir(op.join(RESULT,'plots'))
if not op.isdir(op.join(RESULT,'plots', 'posttracking')):
os.mkdir(op.join(RESULT,'plots', 'posttracking'))
f.savefig(op.join(RESULT,'plots', 'posttracking','speed_{}.png'.format(video.timestr)), dpi=600)
if OPTION == 'jump_detection':
continue
labels = ['topleft', 'topright', 'bottomleft', 'bottomright']
print('pack data...')
for i in range(Nflies):
df = video.data.dfs[i].loc[sf:ef-1]
df.is_copy = False
df.loc[:, ('flipped')] = np.array(flipped[i])
df.loc[:, 'jumps'] = jumps[i]
df.loc[:, 'dr'] = dr[i][st:en]
df.loc[:, 'dddr'] = dddr[i][st:en]
df.loc[:, 'threshold'] = thr[i][st:en]
dx, dy = df['head_x'] - df['body_x'], df['body_y'] - df['head_y']
df.loc[:, 'angle'] = np.arctan2(dy, dx)
df.loc[:, 'body_x'] -= video.arena[i]['x']
df.loc[:, 'body_y'] -= video.arena[i]['y']
df.loc[:, 'body_x'] /= video.arena[i]['scale']
df.loc[:, 'body_y'] /= -video.arena[i]['scale']
df.loc[:, 'major'] /= video.arena[i]['scale']
df.loc[:, 'minor'] /= video.arena[i]['scale']
print('x: ', np.amax(df['body_x']), np.amin(df['body_x']))
print('y: ', np.amax(df['body_y']), np.amin(df['body_y']))
print('major/minor: ', np.mean(df['major']), np.mean(df['minor']))
outdf = df[['datetime', 'elapsed_time', 'frame_dt', 'body_x', 'body_y', 'angle', 'major', 'minor', 'flipped']]
outfile = op.join(RESULT,'post_tracking','{}_{:03d}.csv'.format(raw_data.experiment['ID'], i+iv*4))
print('saving to ', outfile)
outdf.to_csv(outfile, index_label='frame')
### metadata
meta = {}
meta['arena'] = video.arena[i]
meta['arena']['layout'] = '6-6 radial'
meta['arena']['name'] = labels[i]
meta['condition'] = ' '.join([v[i] for k,v in raw_data.experiment['Conditions'][video.name].items()])
meta['datafile'] = outfile
meta['datetime'] = video.time
meta['flags'] = {}
meta['flags']['mistracked_frames'] = int(mistracked)
meta['fly'] = {}
meta['fly']['mating'] = raw_data.experiment['Constants']['mating']
meta['fly']['metabolic'] = raw_data.experiment['Constants']['metabolic']
meta['fly']['sex'] = raw_data.experiment['Constants']['sex']
meta['fly']['genotype'] = raw_data.experiment['Conditions'][video.name]['genotype'][i]
meta['fly']['temperature'] = raw_data.experiment['Conditions'][video.name]['temperature'][i]
#meta['fly']['genetic manipulation'] = raw_data.experiment['Conditions'][video.name]['genetic manipulation'][i] === Kir
meta['food_spots'] = video.spots[i]
meta['setup'] = {}
meta['setup']['humidity'] = raw_data.experiment['Constants']['humidity']
meta['setup']['light'] = raw_data.experiment['Constants']['light']
meta['setup']['n_per_arena'] = raw_data.experiment['Constants']['n_per_arena']
meta['setup']['room'] = raw_data.experiment['Constants']['room']
meta['setup']['temperature'] = raw_data.experiment['Conditions'][video.name]['temperature'][i] # raw_data.experiment['Constants']['temperature']
meta['video'] = {}
meta['video']['dir'] = video.dir
meta['video']['file'] = video.fullpath
meta['video']['first_frame'] = int(outdf.index[0])
meta['video']['start_time'] = video.timestart
yamlfile = op.join(RESULT,'post_tracking','{}_{:03d}.yaml'.format(raw_data.experiment['ID'], i+iv*4))
write_yaml(yamlfile, meta)
### plot trajectory
plotfile = op.join(RESULT,'plots','{}_{:03d}.png'.format(raw_data.experiment['ID'], i+iv*4))
f, ax = plt.subplots(figsize=(10,10))
ax = plot.arena(video.arena[i], video.spots[i], ax=ax)
x, y, jumps, major, minor = np.array(df['body_x']), np.array(df['body_y']), np.array(df['jumps']), np.array(df['major']), np.array(df['minor'])
#ax.plot(x, y, c='#595959', zorder=1, lw=.5, alpha=0.5)
xnew, ynew, major, minor = remove_mistrack(x, y, major, minor)
xnew, ynew, major, minor = remove_mistrack(xnew, ynew, major, minor, thr=300.*0.0333, forced=True)
ends = 108100
ax.plot(x[0], y[0], '.', c='#00ff4f', alpha=0.75, zorder=10)
ax.plot(x[ends-1], y[ends-1], '.', c='#ff3d00', alpha=0.75, zorder=10)
#ax.plot(x[:ends], y[:ends], '-', c='#00e0ff', lw=1, alpha=0.5)
ax.plot(xnew[:ends], ynew[:ends], '-', c='#ff00ff', lw=1, alpha=0.5)
color = jumps
color[jumps==1] = '#ff0000'
color[jumps==0] = '#b1b1b1'
#ax.scatter(x, y, c=displ, s=5, cmap=plt.get_cmap('YlOrRd'), alpha=0.9, edgecolors='none', linewidths=0)
f.savefig(plotfile, dpi=300)
###
video.unload_data()
if __name__ == '__main__':
# runs as benchmark test
test = Multibench("", SILENT=False, SLIM=True)
test(main)
del test
| gpl-3.0 | -8,736,967,817,184,372,000 | 48.535248 | 197 | 0.531467 | false | 3.110674 | false | false | false |
liuzzfnst/tp-libvirt | libvirt/tests/src/virsh_cmd/pool/virsh_find_storage_pool_sources_as.py | 6 | 3322 | import logging
import os
from virttest import virsh, utils_test, utils_misc
from autotest.client import utils, lv_utils
from autotest.client.shared import error
def run(test, params, env):
"""
Test command: virsh find-storage-pool-sources-as
1. Prepare env to provide source storage:
1). For 'netfs' source type, setup nfs server
2). For 'iscsi' source type, setup iscsi server
3). For 'logcial' type pool, setup iscsi storage to create vg
2. Find the pool source by running virsh cmd
"""
source_type = params.get("source_type", "")
source_host = params.get("source_host", "127.0.0.1")
source_port = params.get("source_port", "")
options = params.get("extra_options", "")
vg_name = params.get("vg_name", "virttest_vg_0")
ro_flag = "yes" == params.get("readonly_mode", "no")
status_error = "yes" == params.get("status_error", "no")
if not source_type:
raise error.TestFail("Command requires <type> value")
cleanup_nfs = False
cleanup_iscsi = False
cleanup_logical = False
if source_host == "127.0.0.1":
if source_type == "netfs":
# Set up nfs
res = utils_test.libvirt.setup_or_cleanup_nfs(True)
selinux_bak = res["selinux_status_bak"]
cleanup_nfs = True
if source_type in ["iscsi", "logical"]:
# Set up iscsi
try:
iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
# If we got nothing, force failure
if not iscsi_device:
raise error.TestFail("Did not setup an iscsi device")
cleanup_iscsi = True
if source_type == "logical":
# Create VG by using iscsi device
lv_utils.vg_create(vg_name, iscsi_device)
cleanup_logical = True
except Exception, detail:
if cleanup_iscsi:
utils_test.libvirt.setup_or_cleanup_iscsi(False)
raise error.TestFail("iscsi setup failed:\n%s" % detail)
# Run virsh cmd
options = "%s %s " % (source_host, source_port) + options
if ro_flag:
logging.debug("Readonly mode test")
try:
cmd_result = virsh.find_storage_pool_sources_as(
source_type,
options,
ignore_status=True,
debug=True,
readonly=ro_flag)
output = cmd_result.stdout.strip()
err = cmd_result.stderr.strip()
status = cmd_result.exit_status
if not status_error:
if status:
raise error.TestFail(err)
else:
logging.debug("Command outout:\n%s", output)
elif status_error and status == 0:
raise error.TestFail("Expect fail, but run successfully")
finally:
# Clean up
if cleanup_logical:
cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
pv_name = utils.system_output(cmd)
lv_utils.vg_remove(vg_name)
utils.run("pvremove %s" % pv_name)
if cleanup_iscsi:
utils_test.libvirt.setup_or_cleanup_iscsi(False)
if cleanup_nfs:
utils_test.libvirt.setup_or_cleanup_nfs(
False, restore_selinux=selinux_bak)
| gpl-2.0 | -5,908,163,533,441,761,000 | 36.325843 | 78 | 0.569235 | false | 3.926714 | true | false | false |
iwanders/gps_track_pod | gpspod/debug.py | 1 | 5513 | #!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Ivor Wanders
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from .usb_pdml import USBPDML
from .protocol import USBPacket, USBPacketFeed, load_msg
from . import protocol
from . import pmem
import pickle
import json
import gzip
import base64
def load_pdml_usb(path):
# check if we have a cached version available
if (path.endswith(".pickle3")):
with open(path, "rb") as f:
interactions = pickle.load(f)
else:
conversation = USBPDML(path)
conversation.parse_file()
interactions = conversation.interaction()
# write the cached version
with open(path + ".pickle3", "wb") as f:
pickle.dump(interactions, f)
entries = {"incoming": [], "outgoing": []}
start_time = None
index = 0
for msg in interactions:
index += 1
if (start_time is None):
start_time = msg["time"]
t = msg["time"] - start_time
if "data" in msg:
data = bytes(msg["data"])
direction = msg["direction"]
if direction == "<":
entries["incoming"].append((t, data))
else:
entries["outgoing"].append((t, data))
return entries
def load_json_usb(path):
opener = gzip.open if path.endswith(".gz") else open
with opener(path, "rt") as f:
rawentries = json.load(f)
entries = {"incoming": [], "outgoing": []}
for d in entries.keys():
for t, v in rawentries[d]:
entries[d].append((t, base64.b64decode(v)))
return entries
def order_entries_and_combine(entries):
one_list = []
for d in entries.keys():
for z in entries[d]:
one_list.append((z[0], d, z[1]))
return sorted(one_list, key=lambda d: d[0])
def load_usb_transactions(path):
if (path.count(".xml") != 0):
data = load_pdml_usb(path)
return data
if (path.count(".json")):
data = load_json_usb(path)
return data
def reconstruct_filesystem(path, output_file):
data = load_usb_transactions(path)
fs_bytes = bytearray(pmem.FILESYSTEM_SIZE)
touched_fs = bytearray(pmem.FILESYSTEM_SIZE)
feed = USBPacketFeed()
for t, v in data["incoming"]:
usb_packet = USBPacket.read(v)
res = feed.packet(usb_packet)
if (res):
msg = load_msg(res)
if (type(msg) == protocol.DataReply):
pos = msg.position()
length = msg.length()
fs_bytes[pos:pos+length] = bytes(msg.content())
touched_fs[pos:pos+length] = bytearray(
[1 for i in range(length)])
missing = False
for i in range(len(touched_fs)):
v = touched_fs[i]
if (v == 0):
if (missing is False):
print("Missing from 0x{:0>4X}".format(i), end="")
missing = True
else:
if (missing is True):
print(" up to 0x{:0>4X}".format(i))
missing = False
if (missing is True):
print(" up to 0x{:0>4X}".format(i))
with open(output_file, "wb") as f:
f.write(fs_bytes)
def print_interaction(path):
dir_specific = {
"incoming": {
"feed": USBPacketFeed(),
"color": "\033[1;32m{0}\033[00m",
},
"outgoing": {
"feed": USBPacketFeed(),
"color": "\033[1;34m{0}\033[00m",
}
}
data = load_usb_transactions(path)
# lets just start with outgoing always.
combined_entries = order_entries_and_combine(data)
start_time = combined_entries[0][0]
packet_counter = 0
for time, direction, data in combined_entries:
packet_counter += 1
reltime = time - start_time
usb_packet = USBPacket.read(data)
res = dir_specific[direction]["feed"].packet(usb_packet)
if (res):
# print(" ".join(["{:0>2X}".format(a) for a in bytes(res)]))
message = load_msg(res)
if (not message):
print("Something is very wrong, message: {}".format(message))
continue
print(dir_specific[direction]["color"].format(
"#{:0>5d}, t: {:0>6.3f} {:r}".format(packet_counter,
reltime, message)))
# print(" ".join(["{:0>2X}".format(a) for a in bytes(message)]))
| mit | -6,124,315,373,774,978,000 | 31.815476 | 79 | 0.583711 | false | 3.86335 | false | false | false |
mread/buck | src/com/facebook/buck/parser/buck.py | 1 | 15641 | from __future__ import with_statement
import __future__
import copy
import fnmatch
import functools
import glob as glob_module
import imp
import inspect
import itertools
import json
from pathlib import Path
import optparse
import os
import os.path
import re
import sys
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
BUILD_FUNCTIONS = []
BUILD_RULES_FILE_NAME = 'BUCK'
class BuildContextType(object):
"""
Identifies the type of input file to the processor.
"""
BUILD_FILE = 'build_file'
INCLUDE = 'include'
class BuildFileContext(object):
"""
The build context used when processing a build file.
"""
type = BuildContextType.BUILD_FILE
def __init__(self, base_path, dirname):
self.globals = {}
self.includes = set()
self.base_path = base_path
self.dirname = dirname
self.rules = {}
class IncludeContext(object):
"""
The build context used when processing an include.
"""
type = BuildContextType.INCLUDE
def __init__(self):
self.globals = {}
self.includes = set()
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
return self.func(*args, **updated_kwargs)
def provide_for_build(func):
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `{}()` at the top-level of an included file."
.format(rule['type']))
# Include the base path of the BUILD file so the reader consuming this
# JSON will know which BUILD file the rule came from.
if 'name' not in rule:
raise ValueError(
'rules must contain the field \'name\'. Found %s.' % rule)
rule_name = rule['name']
if rule_name in build_env.rules:
raise ValueError('Duplicate rule definition found. Found %s and %s' %
(rule, build_env.rules[rule_name]))
rule['buck.base_path'] = build_env.base_path
build_env.rules[rule_name] = rule
@provide_for_build
def glob(includes, excludes=[], include_dotfiles=False, build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `glob()` at the top-level of an included file.")
search_base = Path(build_env.dirname)
return glob_internal(includes, excludes, include_dotfiles, search_base)
def glob_internal(includes, excludes, include_dotfiles, search_base):
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(includes, basestring), \
"The first argument to glob() must be a list of strings."
assert not isinstance(excludes, basestring), \
"The excludes argument must be a list of strings."
def includes_iterator():
for pattern in includes:
for path in search_base.glob(pattern):
# TODO(user): Handle hidden files on Windows.
if path.is_file() and (include_dotfiles or not path.name.startswith('.')):
yield path.relative_to(search_base)
def is_special(pat):
return "*" in pat or "?" in pat or "[" in pat
non_special_excludes = set()
match_excludes = set()
for pattern in excludes:
if is_special(pattern):
match_excludes.add(pattern)
else:
non_special_excludes.add(pattern)
def exclusion(path):
if str(path) in non_special_excludes:
return True
for pattern in match_excludes:
result = path.match(pattern, match_entire=True)
if result:
return True
return False
return sorted(set([str(p) for p in includes_iterator() if not exclusion(p)]))
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
Returns: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
"""
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `get_base_path()` at the top-level of an included file.")
return build_env.base_path
@provide_for_build
def add_deps(name, deps=[], build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `add_deps()` at the top-level of an included file.")
if name not in build_env.rules:
raise ValueError(
'Invoked \'add_deps\' on non-existent rule %s.' % name)
rule = build_env.rules[name]
if 'deps' not in rule:
raise ValueError(
'Invoked \'add_deps\' on rule %s that has no \'deps\' field'
% name)
rule['deps'] = rule['deps'] + deps
class BuildFileProcessor(object):
def __init__(self, project_root, implicit_includes=[]):
self._cache = {}
self._build_env_stack = []
self._project_root = project_root
self._implicit_includes = implicit_includes
lazy_functions = {}
for func in BUILD_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func)
lazy_functions[func.__name__] = func_with_env
self._functions = lazy_functions
def _merge_globals(self, src, dst):
"""
Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
hidden = set([
'include_defs',
])
for key, val in src.iteritems():
if not key.startswith('_') and key not in hidden:
dst[key] = val
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in self._functions.itervalues():
function.build_env = build_env
def _install_functions(self, namespace):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in self._functions.iteritems():
namespace[name] = function.invoke
def _get_include_path(self, name):
"""
Resolve the given include def name to a full path.
"""
# Find the path from the include def name.
if not name.startswith('//'):
raise ValueError(
'include_defs argument "%s" must begin with //' % name)
relative_path = name[2:]
return os.path.join(self._project_root, name[2:])
def _include_defs(self, name, implicit_includes=[]):
"""
Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._build_env_stack[-1]
# Resolve the named include to its path and process it to get its
# build context and module.
path = self._get_include_path(name)
inner_env, mod = self._process_include(
path,
implicit_includes=implicit_includes)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = inspect.currentframe()
while frame.f_globals['__name__'] == __name__:
frame = frame.f_back
self._merge_globals(mod.__dict__, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(path)
build_env.includes.update(inner_env.includes)
def _push_build_env(self, build_env):
"""
Set the given build context as the current context.
"""
self._build_env_stack.append(build_env)
self._update_functions(build_env)
def _pop_build_env(self):
"""
Restore the previous build context as the current context.
"""
self._build_env_stack.pop()
if self._build_env_stack:
self._update_functions(self._build_env_stack[-1])
def _process(self, build_env, path, implicit_includes=[]):
"""
Process a build file or include at the given path.
"""
# First check the cache.
cached = self._cache.get(path)
if cached is not None:
return cached
# Install the build context for this input as the current context.
self._push_build_env(build_env)
# The globals dict that this file will be executed under.
default_globals = {}
# Install the implicit build functions and adding the 'include_defs'
# functions.
self._install_functions(default_globals)
default_globals['include_defs'] = functools.partial(
self._include_defs,
implicit_includes=implicit_includes)
# If any implicit includes were specified, process them first.
for include in implicit_includes:
include_path = self._get_include_path(include)
inner_env, mod = self._process_include(include_path)
self._merge_globals(mod.__dict__, default_globals)
build_env.includes.add(include_path)
build_env.includes.update(inner_env.includes)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
with open(path) as f:
contents = f.read()
# Enable absolute imports. This prevents the compiler from trying to
# do a relative import first, and warning that this module doesn't
# exist in sys.modules.
future_features = __future__.absolute_import.compiler_flag
code = compile(contents, path, 'exec', future_features, 1)
exec(code, module.__dict__)
# Restore the previous build context.
self._pop_build_env()
self._cache[path] = build_env, module
return build_env, module
def _process_include(self, path, implicit_includes=[]):
"""
Process the include file at the given path.
"""
build_env = IncludeContext()
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def _process_build_file(self, path, implicit_includes=[]):
"""
Process the build file at the given path.
"""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(
path, self._project_root).replace('\\', '/')
len_suffix = -len('/' + BUILD_RULES_FILE_NAME)
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(base_path, dirname)
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def process(self, path):
"""
Process a build file returning a dict of it's rules and includes.
"""
build_env, mod = self._process_build_file(
os.path.join(self._project_root, path),
implicit_includes=self._implicit_includes)
values = build_env.rules.values()
values.append({"__includes": [path] + sorted(build_env.includes)})
return values
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUILD file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUILD files under the project root. If no paths to BUILD files are
# specified, then it will traverse the project root for BUILD files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUILD files will be printed
# to stdout by a JSON parser. That means that printing out other information
# for debugging purposes will likely break the JSON parsing, so be careful!
def main():
parser = optparse.OptionParser()
parser.add_option(
'--project_root',
action='store',
type='string',
dest='project_root')
parser.add_option(
'--include',
action='append',
dest='include')
parser.add_option(
'--server',
action='store_true',
dest='server',
help='Invoke as a server to parse individual BUCK files on demand.')
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
project_root = os.path.abspath(options.project_root)
buildFileProcessor = BuildFileProcessor(
project_root,
implicit_includes=options.include or [])
for build_file in args:
values = buildFileProcessor.process(build_file)
if options.server:
print json.dumps(values)
else:
for value in values:
print json.dumps(value)
if options.server:
# "for ... in sys.stdin" in Python 2.x hangs until stdin is closed.
for build_file in iter(sys.stdin.readline, ''):
values = buildFileProcessor.process(build_file.rstrip())
print json.dumps(values)
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
sys.stdout.close()
except IOError:
pass
| apache-2.0 | 680,049,148,045,231,000 | 32.709052 | 90 | 0.630778 | false | 4.238753 | false | false | false |
big-max/ansible | playbooks/deploy/callback_plugins/callback.py | 1 | 7031 | import os
import time
import json
import datetime
import pdb
import ConfigParser
from pymongo import MongoClient
from ansible.plugins.callback import CallbackBase
from websocket import create_connection,WebSocket
#coding=utf8
def get_conf(cls,key):
cf=ConfigParser.ConfigParser()
cf.read('/opt/tornado/conf/app.ini')
retData=cf.get(cls,key)
return retData
TIME_FORMAT='%Y-%m-%d %H:%M:%S'
statuscode = {'started':0, 'ongoing':1, 'ok':2,'skipped':4, 'unreachable':3, 'failed':3}
mongoinfo = {"host":"127.0.0.1","port":"27017","user":"","password":"","dbname":"ams"}
class CallbackModule(CallbackBase):
def __init__(self):
self.completed_task = 0
self.playbookuuid = None
self.task = None
self.res=None
self.iplist=None
self.errip=None # an error occured on an ip
self.ws=None # websocket
def db(self):
dbhost = mongoinfo['host']
dbport = mongoinfo['port']
dbuser = mongoinfo['user']
dbpwd = mongoinfo['password']
dbname = mongoinfo['dbname']
uri = 'mongodb://%s:%s'%(dbhost,dbport)
client = MongoClient(uri)
db = client.ams
return db
# create ws
def ws_create_server(self):
ip=get_conf('websocket','host')
if self.ws == None:
self.ws = create_connection("ws://"+ip+"/itoa/updatePlaybookStatus")
#update the uuid's status
def ws_send_status(self,uuid,status):
last_status = str(uuid) +','+str(status)
self.ws.send(last_status)
def ws_receive_status(self):
result=self.ws.recv()
# destroy the server
def ws_close_server(self):
if self.ws != None:
self.ws.close()
self.ws = None
def v2_playbook_on_start(self, playbook):
now=time.time()
self.playbook=playbook
if self.playbook._entries[0]._variable_manager.extra_vars['playbook-uuid']:
self.playbookuuid=self.playbook._entries[0]._variable_manager.extra_vars['playbook-uuid']
iplist=self.playbook._entries[0]._variable_manager.extra_vars['ip_list']
self.iplist=iplist
hostnamelist=self.playbook._entries[0]._variable_manager.extra_vars['hostname_list']
newdict=dict(zip(iplist,hostnamelist))
for (key,value) in newdict.items():
self.db().servers.update({'ip':key},{'$set':{'name':value}})
uuids= self.playbookuuid.encode('gbk')
self.db().playbooks.update({"uuid":uuids},{'$set':{"status":statuscode.get('ongoing'),"updated_at":now}})
self.ws_create_server()
#time.sleep(3) # sleep for page lazy load show
self.ws_send_status(uuids,statuscode.get('ongoing'))
self.ws_receive_status()
self.ws_close_server()
def v2_playbook_on_play_start(self, play):
pass
def v2_playbook_on_task_start(self, task, is_conditional):
now = time.time()
self.task=task
allips=self.iplist
if self.errip:
if self.errip in allips:
allips.remove(self.errip)
for okip in allips:
self.db().tasks.update({"name":task.get_name(),"host":okip,"playbook_uuid":self.playbookuuid},{'$set':{"status":statuscode.get('ongoing'),"updated_at":now}},\
upsert=False,multi=False)
def v2_playbook_on_stats(self, stats):
self.stats = stats
self.ws_create_server()
if self.stats.dark or self.stats.failures :
self.playbook_final_status(self.playbookuuid,'failed')
self.ws_send_status(self.playbookuuid,statuscode.get('failed'))
self.ws_receive_status()
else:
self.playbook_final_status(self.playbookuuid,'ok')
self.ws_send_status(self.playbookuuid,statuscode.get('ok'))
self.ws_receive_status()
self.ws_close_server()
def playbook_final_status(self,playbookuuid,status):
now = time.time()
self.db().playbooks.update({'uuid':playbookuuid},{'$set':{"status":statuscode.get(status),"updated_at":now}},upsert=False,multi=False)
def v2_on_any(self, *args, **kwargs):
pass
def v2_runner_on_ok(self, result):
self.res=result
if result.is_changed():
self.UpdateLog(self.res,self.playbookuuid,'ok')
else:
self.UpdateLog(self.res,self.playbookuuid, 'ok')
def v2_runner_on_unreachable(self, result):
self.res = result
self.UpdateLog(self.res,self.playbookuuid, 'unreachable')
def v2_runner_on_failed(self, result, ignore_errors=False):
self.res = result
self.UpdateLog(self.res,self.playbookuuid, 'failed')
def v2_runner_on_skipped(self, result):
self.res=result
self.UpdateLog(self.res,self.playbookuuid, 'skipped')
# method nameMapIP is used to translate hostname to ip
def nameMapIP(self,namelist,iplist):
dict={}
namelen=len(namelist)
iplen = len(iplist)
if namelen == iplen:
i=0
while i < namelen:
dict[namelist[i]]=iplist[i]
i = i+1
return dict
def UpdateLog(self,values , playbook_uuid, status, type=None):
now = time.time()
if status == 'started' or str(self.task) == 'TASK: setup':
pass
else:
hostsdict=dict(zip(self.task.get_variable_manager().extra_vars['hostname_list'],self.task.get_variable_manager().extra_vars['ip_list']))
if self.errip:
for (key,value) in hostsdict.items(): #judge if exists errip
if value == self.errip:
hostsdict.pop(key)
host=None
if values._host.get_name() =='localhost' or values._host.get_name() =='127.0.0.1':
host='127.0.0.1'
else:
host=str(hostsdict[values._host.get_name()])
#print playbook_uuid + host + self.task.get_name() +'xxx'+ str(statuscode.get(status))
self.db().tasks.update({"playbook_uuid":playbook_uuid, "host":host, "name":self.task.get_name()},{'$set':{"status":statuscode.get(status),"updated_at":now}})
if status == 'failed' or status == 'unreachable':
self.errip=host # where ip has error ,save to errip
self.completed_task = self.completed_task + 1
if values._result.has_key('msg'):
self.db().playbooks.update({'uuid':playbook_uuid},{'$set':{'msg':values._result['msg']}})
if values._result.has_key('stderr'):
self.db().playbooks.update({'uuid':playbook_uuid},{'$set':{'stderr':values._result['stderr']}})
if values._result.has_key('stdout'):
self.db().playbooks.update({'uuid':playbook_uuid},{'$set':{'stdout':values._result['stdout']}})
elif status == 'ok' or status == 'skipped':
self.completed_task = self.completed_task + 1
self.db().playbooks.update({"uuid":playbook_uuid},{'$set':{"completed":self.completed_task, "updated_at":now}})
else:
pass
| gpl-3.0 | 8,567,472,610,853,746,000 | 38.723164 | 170 | 0.602759 | false | 3.455037 | false | false | false |
eduNEXT/edunext-ecommerce | ecommerce/extensions/voucher/tests/test_utils.py | 1 | 33039 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import uuid
import ddt
import httpretty
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import override_settings
from django.utils.translation import ugettext_lazy as _
from factory.fuzzy import FuzzyText
from oscar.templatetags.currency_filters import currency
from oscar.test.factories import (
BenefitFactory,
ConditionalOfferFactory,
OrderFactory,
OrderLineFactory,
RangeFactory,
VoucherFactory,
datetime,
get_model
)
from six.moves import range
from ecommerce.core.url_utils import get_ecommerce_url
from ecommerce.coupons.tests.mixins import CouponMixin, DiscoveryMockMixin
from ecommerce.courses.tests.factories import CourseFactory
from ecommerce.entitlements.utils import create_or_update_course_entitlement
from ecommerce.extensions.api import exceptions
from ecommerce.extensions.catalogue.tests.mixins import DiscoveryTestMixin
from ecommerce.extensions.fulfillment.modules import CouponFulfillmentModule
from ecommerce.extensions.fulfillment.status import LINE
from ecommerce.extensions.offer.models import OFFER_PRIORITY_VOUCHER
from ecommerce.extensions.test.factories import create_order, prepare_voucher
from ecommerce.extensions.voucher.utils import (
create_vouchers,
generate_coupon_report,
get_voucher_and_products_from_code,
get_voucher_discount_info,
update_voucher_offer
)
from ecommerce.tests.factories import UserFactory
from ecommerce.tests.mixins import LmsApiMockMixin
from ecommerce.tests.testcases import TestCase
Basket = get_model('basket', 'Basket')
Benefit = get_model('offer', 'Benefit')
Catalog = get_model('catalogue', 'Catalog')
CouponVouchers = get_model('voucher', 'CouponVouchers')
Order = get_model('order', 'Order')
Product = get_model('catalogue', 'Product')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
StockRecord = get_model('partner', 'StockRecord')
Voucher = get_model('voucher', 'Voucher')
VOUCHER_CODE = "XMASC0DE"
VOUCHER_CODE_LENGTH = 1
@ddt.ddt
@httpretty.activate
class UtilTests(CouponMixin, DiscoveryMockMixin, DiscoveryTestMixin, LmsApiMockMixin, TestCase):
course_id = 'edX/DemoX/Demo_Course'
certificate_type = 'test-certificate-type'
provider = None
def setUp(self):
super(UtilTests, self).setUp()
self.user = self.create_user(full_name="Tešt Ušer", is_staff=True)
self.client.login(username=self.user.username, password=self.password)
self.course = CourseFactory(id='course-v1:test-org+course+run', partner=self.partner)
self.verified_seat = self.course.create_or_update_seat('verified', False, 100)
self.catalog = Catalog.objects.create(partner=self.partner)
self.stock_record = StockRecord.objects.filter(product=self.verified_seat).first()
self.seat_price = self.stock_record.price_excl_tax
self.catalog.stock_records.add(self.stock_record)
self.coupon = self.create_coupon(
title='Tešt product',
catalog=self.catalog,
note='Tešt note',
quantity=1,
max_uses=1,
voucher_type=Voucher.MULTI_USE
)
self.coupon.history.all().update(history_user=self.user)
self.coupon_vouchers = CouponVouchers.objects.filter(coupon=self.coupon)
self.entitlement = create_or_update_course_entitlement(
'verified', 100, self.partner, 'foo-bar', 'Foo Bar Entitlement'
)
self.entitlement_stock_record = StockRecord.objects.filter(product=self.entitlement).first()
self.entitlement_catalog = Catalog.objects.create(partner=self.partner)
self.entitlement_catalog.stock_records.add(self.entitlement_stock_record)
self.entitlement_coupon = self.create_coupon(
title='Tešt Entitlement product',
catalog=self.entitlement_catalog,
note='Tešt Entitlement note',
quantity=1,
max_uses=1,
voucher_type=Voucher.MULTI_USE
)
self.entitlement_coupon_vouchers = CouponVouchers.objects.filter(coupon=self.entitlement_coupon)
self.partner_sku = 'test_sku'
self.data = {
'benefit_type': Benefit.PERCENTAGE,
'benefit_value': 100.00,
'catalog': self.catalog,
'coupon': self.coupon,
'end_datetime': datetime.datetime.now() + datetime.timedelta(days=1),
'enterprise_customer': None,
'enterprise_customer_catalog': None,
'name': "Test voucher",
'quantity': 10,
'start_datetime': datetime.datetime.now() - datetime.timedelta(days=1),
'voucher_type': Voucher.SINGLE_USE
}
def create_benefits(self):
"""
Create all Benefit permutations
- Benefit type: Percentage, Benefit value: 100%
- Benefit type: Percentage, Benefit value: 50%
- Benefit type: Value, Benefit value: seat price
- Benefit type: Value, Benefit value: half the seat price
"""
_range = RangeFactory(products=[self.verified_seat, ])
benefit_percentage_all = BenefitFactory(type=Benefit.PERCENTAGE, range=_range, value=100.00)
benefit_percentage_half = BenefitFactory(type=Benefit.PERCENTAGE, range=_range, value=50.00)
benefit_value_all = BenefitFactory(type=Benefit.FIXED, range=_range, value=self.seat_price)
benefit_value_half = BenefitFactory(type=Benefit.FIXED, range=_range, value=self.seat_price / 2)
return [benefit_percentage_all, benefit_percentage_half, benefit_value_all, benefit_value_half]
def setup_coupons_for_report(self):
""" Create specific coupons to test report generation """
self.data.update({
'benefit_value': 50.00,
'code': VOUCHER_CODE,
'max_uses': 1,
'name': 'Discount',
'quantity': 1,
'voucher_type': Voucher.ONCE_PER_CUSTOMER
})
vouchers = create_vouchers(**self.data)
self.coupon_vouchers.first().vouchers.add(*vouchers)
del self.data['code']
del self.data['max_uses']
self.data.update({
'benefit_type': Benefit.FIXED,
'benefit_value': 100.00,
'voucher_type': Voucher.SINGLE_USE
})
vouchers = create_vouchers(**self.data)
self.coupon_vouchers.first().vouchers.add(*vouchers)
def create_catalog_coupon(
self,
coupon_title='Query coupon',
quantity=1,
catalog_query='*:*',
course_seat_types='verified'
):
self.mock_course_runs_endpoint(self.site_configuration.discovery_api_url)
return self.create_coupon(
title=coupon_title,
quantity=quantity,
catalog_query=catalog_query,
course_seat_types=course_seat_types
)
def create_course_catalog_coupon(self, coupon_title, quantity, course_catalog, course_seat_types):
return self.create_coupon(
title=coupon_title,
quantity=quantity,
course_catalog=course_catalog,
course_seat_types=course_seat_types,
)
def use_voucher(self, order_num, voucher, user, add_entitlement=False, product=None):
"""
Mark voucher as used by provided users
Args:
order_num (string): Order number
voucher (Voucher): voucher to be marked as used
users (list): list of users
"""
order = OrderFactory(number=order_num)
if add_entitlement:
order_line = OrderLineFactory(product=self.entitlement, partner_sku=self.partner_sku)
order.lines.add(order_line)
product = product if product else self.verified_seat
order_line = OrderLineFactory(product=product, partner_sku=self.partner_sku)
order.lines.add(order_line)
voucher.record_usage(order, user)
voucher.offers.first().record_usage(discount={'freq': 1, 'discount': 1})
def validate_report_of_redeemed_vouchers(self, row, username, order_num):
""" Helper method for validating coupon report data for when a coupon was redeemed. """
self.assertEqual(row['Status'], _('Redeemed'))
self.assertEqual(row['Redeemed By Username'], username)
self.assertEqual(row['Order Number'], order_num)
def test_create_vouchers(self):
"""
Test voucher creation
"""
email_domains = 'edx.org,example.com'
self.data.update({
'email_domains': email_domains,
'name': 'Tešt voučher',
'site': self.site
})
vouchers = create_vouchers(**self.data)
self.assertEqual(len(vouchers), 10)
voucher = vouchers[0]
voucher_offer = voucher.offers.first()
coupon_voucher = CouponVouchers.objects.get(coupon=self.coupon)
coupon_voucher.vouchers.add(*vouchers)
self.assertEqual(voucher_offer.benefit.type, Benefit.PERCENTAGE)
self.assertEqual(voucher_offer.benefit.max_affected_items, None)
self.assertEqual(voucher_offer.benefit.value, 100.00)
self.assertEqual(voucher_offer.benefit.range.catalog, self.catalog)
self.assertEqual(voucher_offer.email_domains, email_domains)
self.assertEqual(voucher_offer.priority, OFFER_PRIORITY_VOUCHER)
self.assertEqual(voucher_offer.partner, self.partner)
self.assertEqual(len(coupon_voucher.vouchers.all()), 11)
self.assertEqual(voucher.end_datetime, self.data['end_datetime'])
self.assertEqual(voucher.start_datetime, self.data['start_datetime'])
self.assertEqual(voucher.usage, Voucher.SINGLE_USE)
def test_create_voucher_with_long_name(self):
self.data.update({
'name': (
'This Is A Really Really Really Really Really Really Long '
'Voucher Name That Needs To Be Trimmed To Fit Into The Name Column Of The DB'
)
})
trimmed = (
'This Is A Really Really Really Really Really Really Long '
'Voucher Name That Needs To Be Trimmed To Fit Into The Name Column Of Th'
)
vouchers = create_vouchers(**self.data)
voucher = vouchers[0]
self.assertEqual(voucher.name, trimmed)
@ddt.data(
{'end_datetime': ''},
{'end_datetime': 3},
{'end_datetime': 'nonumbers'},
{'start_datetime': ''},
{'start_datetime': 3},
{'start_datetime': 'nonumbers'},
)
def test_create_vouchers_with_incorrect_datetime_value(self, data):
""" Test calling create vouchers with incorrect start/end datetime value raises exception. """
self.data.update(data)
with self.assertRaises(ValidationError):
create_vouchers(**self.data)
@override_settings(VOUCHER_CODE_LENGTH=VOUCHER_CODE_LENGTH)
def test_regenerate_voucher_code(self):
"""
Test that voucher code will be regenerated if it already exists
"""
self.data.update({
'benefit_value': 90.00,
'quantity': 1
})
for code in 'BCDFGHJKL':
self.data['code'] = code
create_vouchers(**self.data)
del self.data['code']
for __ in range(20):
voucher = create_vouchers(**self.data)
self.assertTrue(Voucher.objects.filter(code__iexact=voucher[0].code).exists())
@override_settings(VOUCHER_CODE_LENGTH=0)
def test_nonpositive_voucher_code_length(self):
"""
Test that setting a voucher code length to a nonpositive integer value
raises a ValueError
"""
with self.assertRaises(ValueError):
create_vouchers(**self.data)
def test_create_discount_coupon(self):
"""
Test discount voucher creation with specified code
"""
self.data.update({
'benefit_value': 25.00,
'code': VOUCHER_CODE,
'quantity': 1
})
discount_vouchers = create_vouchers(**self.data)
self.assertEqual(len(discount_vouchers), 1)
self.assertEqual(discount_vouchers[0].code, VOUCHER_CODE)
with self.assertRaises(IntegrityError):
create_vouchers(**self.data)
def test_create_course_catalog_coupon(self):
"""
Test course catalog coupon voucher creation with specified catalog id.
"""
coupon_title = 'Course catalog coupon'
quantity = 1
course_catalog = 1
course_catalog_coupon = self.create_course_catalog_coupon(
coupon_title=coupon_title,
quantity=quantity,
course_catalog=course_catalog,
course_seat_types='verified',
)
self.assertEqual(course_catalog_coupon.title, coupon_title)
course_catalog_vouchers = course_catalog_coupon.attr.coupon_vouchers.vouchers.all()
self.assertEqual(course_catalog_vouchers.count(), quantity)
course_catalog_voucher_range = course_catalog_vouchers.first().offers.first().benefit.range
self.assertEqual(course_catalog_voucher_range.course_catalog, course_catalog)
def test_create_program_coupon(self):
"""
Test program coupon voucher creation with specified program uuid.
"""
coupon_title = 'Program coupon'
quantity = 1
program_uuid = uuid.uuid4()
program_coupon = self.create_coupon(
title=coupon_title,
quantity=quantity,
program_uuid=program_uuid,
course_seat_types='verified',
)
self.assertEqual(program_coupon.title, coupon_title)
program_vouchers = program_coupon.attr.coupon_vouchers.vouchers.all()
program_voucher_offer = program_vouchers.first().offers.first()
self.assertEqual(program_vouchers.count(), quantity)
self.assertEqual(program_voucher_offer.condition.program_uuid, program_uuid)
def assert_report_first_row(self, row, coupon, voucher):
"""
Verify that the first row fields contain the right data.
Args:
row (list): First row in report
coupon (Product): Coupon for which the report is generated
voucher (Voucher): Voucher associated with the Coupon
"""
offer = voucher.offers.first()
if offer.condition.range.catalog:
discount_data = get_voucher_discount_info(
offer.benefit,
offer.condition.range.catalog.stock_records.first().price_excl_tax
)
coupon_type = _('Discount') if discount_data['is_discounted'] else _('Enrollment')
discount_percentage = _('{percentage} %').format(percentage=discount_data['discount_percentage'])
discount_amount = currency(discount_data['discount_value'])
else:
if offer.benefit.type == Benefit.PERCENTAGE:
coupon_type = _('Discount') if offer.benefit.value < 100 else _('Enrollment')
else:
coupon_type = None
discount_amount = None
discount_percentage = _('{percentage} %').format(
percentage=offer.benefit.value) if offer.benefit.type == Benefit.PERCENTAGE else None
self.assertEqual(row['Coupon Type'], coupon_type)
self.assertEqual(row['Category'], ProductCategory.objects.get(product=coupon).category.name)
self.assertEqual(row['Discount Percentage'], discount_percentage)
self.assertEqual(row['Discount Amount'], discount_amount)
self.assertEqual(row['Client'], coupon.client.name)
self.assertEqual(row['Note'], coupon.attr.note)
self.assertEqual(row['Create Date'], coupon.date_updated.strftime("%b %d, %y"))
self.assertEqual(row['Coupon Start Date'], voucher.start_datetime.strftime("%b %d, %y"))
self.assertEqual(row['Coupon Expiry Date'], voucher.end_datetime.strftime("%b %d, %y"))
def assert_report_row(self, row, voucher):
"""
Verify that the row fields contain the right data.
Args:
row (list): Non first row in report
coupon (Product): Coupon for which the report is generated
voucher (Voucher): Voucher associated with the Coupon
"""
offer = voucher.offers.first()
if voucher.usage == Voucher.SINGLE_USE:
max_uses_count = 1
elif voucher.usage != Voucher.SINGLE_USE and offer.max_global_applications is None:
max_uses_count = 10000
else:
max_uses_count = offer.max_global_applications
self.assertEqual(row['Maximum Coupon Usage'], max_uses_count)
self.assertEqual(row['Code'], voucher.code)
self.assertEqual(
row['URL'],
get_ecommerce_url() + self.REDEMPTION_URL.format(voucher.code)
)
def test_generate_coupon_report_for_entitlement(self):
""" Verify the coupon report is generated properly in case of entitlements. """
self.data['coupon'] = self.entitlement_coupon
self.data['catalog'] = self.entitlement_catalog
self.coupon_vouchers = self.entitlement_coupon_vouchers
self.setup_coupons_for_report()
client = UserFactory()
basket = Basket.get_basket(client, self.site)
basket.add_product(self.entitlement_coupon)
vouchers = self.coupon_vouchers.first().vouchers.all()
self.use_voucher('TESTORDER1', vouchers[1], self.user, add_entitlement=True)
self.mock_course_api_response(course=self.course)
try:
generate_coupon_report(self.coupon_vouchers)
except TypeError:
self.fail("Exception:ErrorType raised unexpectedly!")
def test_generate_coupon_report(self):
""" Verify the coupon report is generated properly. """
self.setup_coupons_for_report()
client = UserFactory()
basket = Basket.get_basket(client, self.site)
basket.add_product(self.coupon)
vouchers = self.coupon_vouchers.first().vouchers.all()
self.use_voucher('TESTORDER1', vouchers[1], self.user)
user2 = UserFactory()
self.use_voucher('TESTORDER2', vouchers[2], self.user)
self.use_voucher('TESTORDER3', vouchers[2], user2)
self.mock_course_api_response(course=self.course)
field_names, rows = generate_coupon_report(self.coupon_vouchers)
self.assertEqual(field_names, [
'Code',
'Coupon Name',
'Maximum Coupon Usage',
'Redemption Count',
'Coupon Type',
'URL',
'Course ID',
'Organization',
'Client',
'Category',
'Note',
'Price',
'Invoiced Amount',
'Discount Percentage',
'Discount Amount',
'Status',
'Order Number',
'Redeemed By Username',
'Create Date',
'Coupon Start Date',
'Coupon Expiry Date',
'Email Domains',
])
voucher = Voucher.objects.get(name=rows[0]['Coupon Name'])
self.assert_report_first_row(rows.pop(0), self.coupon, voucher)
for row in rows:
voucher = Voucher.objects.get(code=row['Code'])
self.assert_report_row(row, voucher)
self.assertNotIn('Catalog Query', field_names)
self.assertNotIn('Course Seat Types', field_names)
self.assertNotIn('Redeemed For Course ID', field_names)
def test_report_for_dynamic_coupon_with_fixed_benefit_type(self):
""" Verify the coupon report contains correct data for coupon with fixed benefit type. """
dynamic_coupon = self.create_coupon(
benefit_type=Benefit.FIXED,
benefit_value=50,
catalog_query='*:*',
course_seat_types='verified',
max_uses=1,
note='Tešt note',
quantity=1,
title='Tešt product',
voucher_type=Voucher.MULTI_USE
)
coupon_voucher = CouponVouchers.objects.get(coupon=dynamic_coupon)
__, rows = generate_coupon_report([coupon_voucher])
voucher = coupon_voucher.vouchers.first()
self.assert_report_first_row(rows[0], dynamic_coupon, voucher)
def test_generate_coupon_report_with_deleted_product(self):
""" Verify the coupon report contains correct data for coupon with fixed benefit type. """
course = CourseFactory(id='course-v1:del-org+course+run', partner=self.partner)
professional_seat = course.create_or_update_seat('professional', False, 100)
query_coupon = self.create_catalog_coupon(catalog_query='course:*')
vouchers = query_coupon.attr.coupon_vouchers.vouchers.all()
first_voucher = vouchers.first()
self.use_voucher('TESTORDER1', first_voucher, self.user, product=professional_seat)
professional_seat.delete()
__, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
self.assert_report_first_row(rows[0], query_coupon, first_voucher)
self.assertDictContainsSubset({'Redeemed For Course ID': 'Unknown'}, rows[2])
def test_report_for_inactive_coupons(self):
""" Verify the coupon report show correct status for inactive coupons. """
self.data.update({
'name': self.coupon.title,
'end_datetime': datetime.datetime.now() - datetime.timedelta(days=1)
})
vouchers = create_vouchers(**self.data)
self.coupon_vouchers.first().vouchers.add(*vouchers)
__, rows = generate_coupon_report(self.coupon_vouchers)
# The data that is the same for all vouchers like Coupon Name, Coupon Type, etc.
# are only shown in row[0]
# The data that is unique among vouchers like Code, Url, Status, etc.
# starts from row[1]
self.assertEqual(rows[0]['Coupon Name'], self.coupon.title)
self.assertEqual(rows[2]['Status'], _('Inactive'))
def test_generate_coupon_report_for_query_coupons(self):
""" Verify empty report fields for query coupons. """
catalog_query = 'course:*'
self.mock_course_runs_endpoint(self.site_configuration.discovery_api_url)
query_coupon = self.create_catalog_coupon(catalog_query=catalog_query)
field_names, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
empty_fields = (
'Discount Amount',
'Price',
)
for field in empty_fields:
self.assertIsNone(rows[0][field])
self.assertNotIn('Course ID', field_names)
self.assertNotIn('Organization', field_names)
self.assertNotIn('Program UUID', field_names)
self.assertIn('Catalog Query', field_names)
self.assertEqual(rows[0]['Catalog Query'], catalog_query)
self.assertIn('Course Seat Types', field_names)
self.assertEqual(rows[0]['Course Seat Types'], 'verified')
self.assertIn('Redeemed For Course ID', field_names)
self.assertNotIn('Redeemed For Course ID', rows[0])
self.assertIn('Redeemed For Course IDs', field_names)
self.assertNotIn('Redeemed For Course IDs', rows[0])
def test_get_voucher_discount_info(self):
""" Verify that get_voucher_discount_info() returns correct info. """
benefits = self.create_benefits()
for benefit in benefits:
discount_info = get_voucher_discount_info(benefit, self.seat_price)
if (benefit.type == "Percentage" and benefit.value == 100.00) or \
(benefit.type == "Absolute" and benefit.value == self.seat_price):
self.assertEqual(discount_info['discount_percentage'], 100.00)
self.assertEqual(discount_info['discount_value'], 100.00)
self.assertFalse(discount_info['is_discounted'])
else:
self.assertEqual(discount_info['discount_percentage'], 50.00)
self.assertEqual(discount_info['discount_value'], 50.00)
self.assertTrue(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(benefit, 0.0)
self.assertEqual(discount_info['discount_percentage'], 0.00)
self.assertEqual(discount_info['discount_value'], 0.00)
self.assertFalse(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(None, 0.0)
self.assertEqual(discount_info['discount_percentage'], 0.00)
self.assertEqual(discount_info['discount_value'], 0.00)
self.assertFalse(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(None, self.seat_price)
self.assertEqual(discount_info['discount_percentage'], 0.00)
self.assertEqual(discount_info['discount_value'], 0.00)
self.assertFalse(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(benefits[-1], 20.00)
self.assertEqual(discount_info['discount_percentage'], 100.00)
self.assertEqual(discount_info['discount_value'], 20.00)
self.assertFalse(discount_info['is_discounted'])
def test_multiple_usage_coupon(self):
"""Test that multiple-usage coupon is created and the usage number decreased on usage."""
# Verify that the created voucher has two possible applications.
voucher = self.coupon.attr.coupon_vouchers.vouchers.first()
self.assertEqual(voucher.offers.first().get_max_applications(), 1)
# Verify that the voucher now has been applied and usage number decreased.
basket = self.apply_voucher(self.user, self.site, voucher)
order = create_order(basket=basket, user=self.user)
lines = order.lines.all()
order, completed_lines = CouponFulfillmentModule().fulfill_product(order, lines)
self.assertEqual(completed_lines[0].status, LINE.COMPLETE)
self.assertEqual(len(basket.applied_offers()), 1)
self.assertEqual(voucher.offers.first().get_max_applications(), 0)
# Verify that the voucher with now 0 usage number wasn't applied to the basket.
new_basket = self.apply_voucher(self.user, self.site, voucher)
self.assertEqual(len(new_basket.applied_offers()), 0)
def test_single_use_redemption_count(self):
"""Verify redemption count does not increment for other, unused, single-use vouchers."""
coupon = self.create_coupon(
title='Test single use',
catalog=self.catalog,
quantity=2
)
vouchers = coupon.attr.coupon_vouchers.vouchers.all()
self.use_voucher('TEST', vouchers[0], self.user)
__, rows = generate_coupon_report([coupon.attr.coupon_vouchers])
# rows[0] - This row is different from other rows
# rows[1] - first voucher header row
# rows[2] - first voucher row with usage information
# rows[3] - second voucher header row
self.assertEqual(len(rows), 4)
self.assertEqual(rows[1]['Redemption Count'], 1)
self.assertEqual(rows[2]['Redeemed By Username'], self.user.username)
self.assertEqual(rows[3]['Redemption Count'], 0)
def test_generate_coupon_report_for_used_query_coupon(self):
"""Test that used query coupon voucher reports which course was it used for."""
catalog_query = '*:*'
self.mock_course_runs_endpoint(
self.site_configuration.discovery_api_url, query=catalog_query, course_run=self.course
)
self.mock_course_runs_contains_endpoint(
course_run_ids=[self.verified_seat.course_id], query=catalog_query,
discovery_api_url=self.site_configuration.discovery_api_url
)
query_coupon = self.create_catalog_coupon(catalog_query=catalog_query)
voucher = query_coupon.attr.coupon_vouchers.vouchers.first()
voucher.offers.first().condition.range.add_product(self.verified_seat)
self.use_voucher('TESTORDER4', voucher, self.user)
field_names, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
self.assertIn('Redeemed For Course ID', field_names)
self.assertIn('Redeemed By Username', field_names)
self.assertEqual(rows[-1]['Redeemed By Username'], self.user.username)
self.assertEqual(rows[-1]['Redeemed For Course ID'], self.course.id)
def test_generate_coupon_report_for_query_coupon_with_multi_line_order(self):
"""
Test that coupon report for a query coupon that was used on multi-line order
contains ids from all courses in that order.
"""
course1 = CourseFactory()
course2 = CourseFactory()
order = OrderFactory(number='TESTORDER')
order.lines.add(
OrderLineFactory(
product=course1.create_or_update_seat('verified', False, 101),
partner_sku=self.partner_sku
)
)
order.lines.add(
OrderLineFactory(
product=course2.create_or_update_seat('verified', False, 110),
partner_sku=self.partner_sku
)
)
query_coupon = self.create_catalog_coupon(catalog_query='*:*')
voucher = query_coupon.attr.coupon_vouchers.vouchers.first()
voucher.record_usage(order, self.user)
field_names, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
expected_redemed_course_ids = '{}, {}'.format(course1.id, course2.id)
self.assertEqual(rows[-1]['Redeemed For Course IDs'], expected_redemed_course_ids)
self.assertEqual(rows[-1].get('Redeemed For Course ID'), None)
self.assertIn('Redeemed For Course ID', field_names)
self.assertIn('Redeemed For Course IDs', field_names)
def test_update_voucher_offer(self):
"""Test updating a voucher."""
self.data['email_domains'] = 'example.com'
vouchers = create_vouchers(**self.data)
voucher = vouchers[0]
voucher_offer = voucher.offers.first()
self.assertEqual(voucher_offer.benefit.type, Benefit.PERCENTAGE)
self.assertEqual(voucher_offer.benefit.value, 100.00)
self.assertEqual(voucher_offer.benefit.range.catalog, self.catalog)
new_email_domains = 'example.org'
new_offer = update_voucher_offer(
voucher_offer, 50.00, Benefit.PERCENTAGE,
email_domains=new_email_domains
)
self.assertEqual(new_offer.benefit.type, Benefit.PERCENTAGE)
self.assertEqual(new_offer.benefit.value, 50.00)
self.assertEqual(new_offer.benefit.range.catalog, self.catalog)
self.assertEqual(new_offer.email_domains, new_email_domains)
def test_get_voucher_and_products_from_code(self):
""" Verify that get_voucher_and_products_from_code() returns products and voucher. """
original_voucher, original_product = prepare_voucher(code=VOUCHER_CODE)
voucher, products = get_voucher_and_products_from_code(code=VOUCHER_CODE)
self.assertIsNotNone(voucher)
self.assertEqual(voucher, original_voucher)
self.assertEqual(voucher.code, VOUCHER_CODE)
self.assertEqual(len(products), 1)
self.assertEqual(products[0], original_product)
def test_no_product(self):
""" Verify that an exception is raised if there is no product. """
voucher = VoucherFactory()
offer = ConditionalOfferFactory()
voucher.offers.add(offer)
with self.assertRaises(exceptions.ProductNotFoundError):
get_voucher_and_products_from_code(code=voucher.code)
def test_get_non_existing_voucher(self):
""" Verify that get_voucher_and_products_from_code() raises exception for a non-existing voucher. """
with self.assertRaises(Voucher.DoesNotExist):
get_voucher_and_products_from_code(code=FuzzyText().fuzz())
def test_generate_coupon_report_for_program_coupon(self):
""" Only program coupon applicable fields should be shown. """
program_uuid = uuid.uuid4()
program_coupon = self.create_coupon(
title='Program Coupon Report',
program_uuid=program_uuid,
)
field_names, rows = generate_coupon_report([program_coupon.attr.coupon_vouchers])
for field in ('Discount Amount', 'Price'):
self.assertIsNone(rows[0][field])
removed_fields = ('Catalog Query', 'Course ID', 'Course Seat Types', 'Organization', 'Redeemed For Course ID',)
for field_name in removed_fields:
self.assertNotIn(field_name, field_names)
self.assertIn('Program UUID', field_names)
self.assertEqual(rows[0]['Program UUID'], program_uuid)
| agpl-3.0 | -5,427,811,836,126,914,000 | 42.231675 | 119 | 0.640619 | false | 3.857627 | true | false | false |
CONSOLNY/rglk | first_app/admin.py | 1 | 2963 | from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from first_app.models import *
# Register your models here.
class CellAdmin(admin.ModelAdmin):
list_display = ("x", "y")
class CharacterAdmin(admin.ModelAdmin):
list_display = ("cell", "hp", "name", "defense", "attack")
class MonsterAdmin(admin.ModelAdmin):
list_display = ("cell", "hp", "name", "defense", "attack")
class LootTypeAdmin(admin.ModelAdmin):
list_display = ("item",)
class LootAdmin(admin.ModelAdmin):
list_display = ("item_type", "item_name", "item_stats")
class InventoryCellAdmin(admin.ModelAdmin):
list_display = ("inv_coord", "inv_content",)
class InventoryCharacterAdmin(admin.ModelAdmin):
list_display = ("char", "content")
class SlotCharacterAdmin(admin.ModelAdmin):
list_display = ("slot", "slot_type")
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = Player
fields = ('name',)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = Player
fields = ('name', 'password', 'is_active', 'is_admin')
def clean_password(self):
return self.initial['password']
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('name', 'char_list', 'is_admin')
list_filter = ('is_admin',)
fieldsets = ((None, {'fields': ('name', 'password')}), ('Permissions', {'fields': ('is_admin',)}),)
add_fieldsets = ((None, {'classes': ('wide',), 'fields': ('name', 'password1', 'password2')}),)
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ()
admin.site.register(Cell, CellAdmin)
admin.site.register(Character, CharacterAdmin)
admin.site.register(Monster, MonsterAdmin)
admin.site.register(LootType, LootTypeAdmin)
admin.site.register(Loot, LootAdmin)
admin.site.register(InventoryCell, InventoryCellAdmin)
admin.site.register(InventoryCharacter, InventoryCharacterAdmin)
admin.site.register(SlotCharacter, SlotCharacterAdmin)
admin.site.register(Player, UserAdmin)
admin.site.unregister(Group)
| mit | -6,099,835,934,507,987,000 | 35.580247 | 103 | 0.697604 | false | 3.736444 | false | false | false |
janbrohl/SimpleTAL | tests/TALESTests/VariablePathTest.py | 1 | 4398 | #!/usr/bn/python
# -*- coding: iso-8859-1 -*-
# Copyright (c) 2016, Jan Brohl <[email protected]>
# All rights reserved.
# See LICENSE.txt
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# If you make any bug fixes or feature enhancements please let me know!
"""
Unit test cases.
"""
from __future__ import unicode_literals
import unittest
import os
import io
import logging
import logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists("logging.ini")):
logging.config.fileConfig("logging.ini")
else:
logging.basicConfig()
def simpleFunction():
return "Hello World"
def nestedFunction():
return {'nest': simpleFunction}
def pathFunction(thePath):
return thePath
class PathTests(unittest.TestCase):
def setUp(self):
self.context = simpleTALES.Context()
self.context.addGlobal(
'colours', {
'blue': 'The sea is blue',
'red': 'The ball is red',
'green': 'The grass is green'
})
self.context.addGlobal('aList', ['blue', 'green'])
self.context.addGlobal('goodColour', 'goodColourPath')
self.context.addGlobal('goodColourPath', 'Black is good')
self.context.addGlobal('noSuchColour', 'pink')
def _runTest_(self, txt, result, errMsg="Error"):
template = simpleTAL.compileHTMLTemplate(txt)
file = io.StringIO()
template.expand(self.context, file)
realResult = file.getvalue()
self.assertEqual(
realResult, result,
"%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s"
% (errMsg, txt, realResult, result, template))
def testRepeatVariablePath(self):
self._runTest_(
'<html><ul><li tal:repeat="colour aList" tal:content="colours/?colour">List</li></ul></html>',
'<html><ul><li>The sea is blue</li><li>The grass is green</li></ul></html>',
'Path variable during repeat failed.')
def testLocalVariablePath(self):
self._runTest_(
'<html><p tal:define="one string:red">It is red: <b tal:content="colours/?one"></b></p></html>',
'<html><p>It is red: <b>The ball is red</b></p></html>',
'Local variable path failed.')
def testGlobalVariablePath(self):
self._runTest_('<html><p tal:content="?goodColour"></p></html>',
'<html><p>Black is good</p></html>',
'Global variable path failed.')
def testNoSuchVariablePath(self):
self._runTest_('<html><p tal:content="?badColour"></p></html>',
'<html><p></p></html>', 'No such variable failed.')
def testNoSuchVariablePath2(self):
self._runTest_(
'<html><p tal:content="colours/?noSuchColour"></p></html>',
'<html><p></p></html>', 'No such variable2 failed.')
| bsd-3-clause | 1,952,199,184,770,537,700 | 37.243478 | 108 | 0.644156 | false | 3.944395 | true | false | false |
MediffRobotics/DeepRobotics | DeepLearnMaterials/tutorials/tensorflowTUT/tf15_tensorboard/full_code.py | 1 | 2777 | # View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
# add one more layer and return the output of this layer
layer_name = 'layer%s' % n_layer
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
tf.histogram_summary(layer_name + '/weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
tf.histogram_summary(layer_name + '/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
tf.histogram_summary(layer_name + '/outputs', outputs)
return outputs
# Make up some real data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# define placeholder for inputs to network
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
ys = tf.placeholder(tf.float32, [None, 1], name='y_input')
# add hidden layer
l1 = add_layer(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, n_layer=2, activation_function=None)
# the error between prediciton and real data
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
tf.scalar_summary('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
sess = tf.Session()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("logs/", sess.graph)
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.global_variables_initializer())
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
result = sess.run(merged,
feed_dict={xs: x_data, ys: y_data})
writer.add_summary(result, i)
# direct to the local dir and run this in terminal:
# $ tensorboard --logdir=logs | gpl-3.0 | -4,064,351,649,322,628,600 | 37.583333 | 109 | 0.652143 | false | 3.255569 | false | false | false |
namecoin/namecoin-core | test/functional/p2p_addr_relay.py | 7 | 8564 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
def __init__(self, test_addr_contents=False):
super().__init__()
self.test_addr_contents = test_addr_contents
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def addr_received(self):
return self.num_ipv4_received != 0
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(BitcoinTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 10 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
| mit | 8,815,426,499,682,522,000 | 39.396226 | 136 | 0.64269 | false | 3.549109 | true | false | false |
sadig/DC2 | components/dc2-appserver/dc2/appserver/rpcmethods/macaddresses.py | 1 | 4532 | # -*- coding: utf-8 -*-
###############################################################################
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
#
# Std. Python Libs
#
import sys
import types
import xmlrpclib
try:
from dc2.lib.db.mongo import Table
from dc2.appserver.helpers import check_record
from dc2.appserver.rpc import rpcmethod
except ImportError:
print "You don't have DC² correctly installed"
sys.exit(1)
try:
from settings import MONGOS
except ImportError:
print "You don't have a settings file"
sys.exit(1)
tbl_server = Table(MONGOS["dc2db"]["database"].get_table("mac_addresses"))
MACADDR_RECORD = {
"server_id": True,
"mac_addr": True,
"device_name": False
}
@rpcmethod(
name="dc2.inventory.servers.macaddr.list",
returns={"list mac_addr_rec": "List of MAC Address Records for a server"},
params={}, is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_list(search):
if search is not None and type(search) is types.DictType:
result = tbl_server.find(search)
else:
result = tbl_server.find()
return result
@rpcmethod(
name="dc2.inventory.servers.macaddr.add",
returns={"string doc_id": "Document ID of new added record"},
params={"dict rec_macaddr": "Record Dictionary"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_add(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
if (check_record(rec_mac, MACADDR_RECORD) and
tbl_server.find_one(
{"mac_addr": rec_mac["mac_addr"]}) is None):
doc_id = tbl_server.save(rec_mac)
return doc_id
return xmlrpclib.Fault(-32501, "Record couldn't be added")
@rpcmethod(
name="dc2.inventory.servers.macaddr.update",
returns={"string doc_id": "Document ID of new added record"},
params={"dict rec_macaddr": "Record Dictionary"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_update(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
if (check_record(rec_mac, MACADDR_RECORD) and
tbl_server.find_one({
"_id": rec_mac["_id"],
"server_id": rec_mac["server_id"]}) is not None):
doc_id = tbl_server.save(rec_mac)
return doc_id
return xmlrpclib.Fault(-32504, "Record couldn't be updated")
@rpcmethod(
name="dc2.inventory.servers.macaddr.delete",
returns={"bool success": "True if action was successful"},
params={"dict rec_macaddr": "Prefilled record dictionary with key _id,"
" or server_id to delete all mac_addr records attached "
"to a server"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_delete(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
if '_id' in rec_mac or 'server_id' in rec_mac:
response = tbl_server.remove(rec_mac)
if response is False:
return xmlrpclib.Fault(-32503, "Record(s) couldn't be deleted")
return True
return xmlrpclib.Fault(-32503, "Record(s) couldn't be deleted")
@rpcmethod(
name="dc2.inventory.servers.macaddr.find",
returns={"bool success": "True if action was successful"},
params={"dict rec_server": "Pre-Filled record dictionary with key _id"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_find(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
response = tbl_server.find(rec_mac)
return response
return xmlrpclib.Fault(-32502, "Record wasn't found!")
| gpl-2.0 | 4,016,057,753,600,655,400 | 36.438017 | 80 | 0.639073 | false | 3.656174 | false | false | false |
charmoniumQ/Surprise | examples/load_custom_dataset_predefined_folds.py | 1 | 1136 | """
This module descibes how to load a custom dataset when folds (for
cross-validation) are predefined by train and test files.
As a custom dataset we will actually use the movielens-100k dataset, but act as
if it were not built-in.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from surprise import BaselineOnly
from surprise import Dataset
from surprise import evaluate
from surprise import Reader
# path to dataset folder
files_dir = os.path.expanduser('~/.surprise_data/ml-100k/ml-100k/')
# This time, we'll use the built-in reader.
reader = Reader('ml-100k')
# folds_files is a list of tuples containing file paths:
# [(u1.base, u1.test), (u2.base, u2.test), ... (u5.base, u5.test)]
train_file = files_dir + 'u%d.base'
test_file = files_dir + 'u%d.test'
folds_files = [(train_file % i, test_file % i) for i in (1, 2, 3, 4, 5)]
data = Dataset.load_from_folds(folds_files, reader=reader)
# We'll use an algorithm that predicts baseline estimates.
algo = BaselineOnly()
# Evaluate performances of our algorithm on the dataset.
evaluate(algo, data)
| bsd-3-clause | -2,546,405,243,802,962,000 | 30.555556 | 79 | 0.715669 | false | 3.21813 | true | false | false |
liminspace/django-mjml | testprj/tests_tcpserver.py | 1 | 1892 | # coding=utf-8
from __future__ import absolute_import
from django.test import TestCase
from mjml import settings as mjml_settings
from testprj.tools import safe_change_mjml_settings, MJMLServers, MJMLFixtures, render_tpl
class TestMJMLTCPServer(MJMLFixtures, MJMLServers, TestCase):
SERVER_TYPE = 'tcpserver'
_settings_manager = None
@classmethod
def setUpClass(cls):
cls._settings_manager = safe_change_mjml_settings()
cls._settings_manager.__enter__()
mjml_settings.MJML_BACKEND_MODE = cls.SERVER_TYPE
super(TestMJMLTCPServer, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestMJMLTCPServer, cls).tearDownClass()
cls._settings_manager.__exit__(None, None, None)
def test_simple(self):
html = render_tpl(self.TPLS['simple'])
self.assertIn('<html ', html)
self.assertIn('<body', html)
self.assertIn('20px ', html)
self.assertIn('Test title', html)
self.assertIn('Test button', html)
with self.assertRaises(RuntimeError):
render_tpl("""
{% mjml %}
123
{% endmjml %}
""")
def test_large_tpl(self):
html = render_tpl(self.TPLS['with_text_context'], {
'text': '[START]' + ('1 2 3 4 5 6 7 8 9 0 ' * 410 * 1024) + '[END]',
})
self.assertIn('<html ', html)
self.assertIn('<body', html)
self.assertIn('[START]', html)
self.assertIn('[END]', html)
def test_unicode(self):
html = render_tpl(self.TPLS['with_text_context_and_unicode'], {'text': self.TEXTS['unicode']})
self.assertIn('<html ', html)
self.assertIn('<body', html)
self.assertIn(u'Український текст', html)
self.assertIn(self.TEXTS['unicode'], html)
self.assertIn(u'©', html)
| mit | -7,038,980,208,716,193,000 | 33.722222 | 102 | 0.596267 | false | 3.598848 | true | false | false |
david672orford/pykarta | pykarta/server/app.py | 1 | 2461 | #! /usr/bin/python3
# pykarta/server/app.py
# Server for use by PyKarta applications.
# Provides geocoding and vector map tiles.
# Last modified: 17 October 2019
import re, os
try:
import pykarta
except ImportError:
# During testing we may run this script from its own directory
import sys
sys.path.insert(1, "../..")
# Import data data provider modules
from pykarta.server.modules.not_found import app as app_not_found
from pykarta.server.modules.geocoder_parcel import app as app_geocoder_parcel
from pykarta.server.modules.geocoder_openaddresses import app as app_geocoder_openaddresses
from pykarta.server.modules.tiles_parcels import app as app_tiles_parcels
from pykarta.server.modules.tiles_osm_vec import app as app_tiles_osm_vec
# Map paths to data provider modules
routes = {
'geocoders/parcel': app_geocoder_parcel,
'geocoders/openaddresses': app_geocoder_openaddresses,
'tiles/parcels': app_tiles_parcels,
'tiles': app_tiles_osm_vec,
None: app_not_found,
}
# The WSGI app
def app(environ, start_response):
stderr = environ['wsgi.errors']
# In production the server administrator will have set DATADIR.
if not 'DATADIR' in environ:
# During testing we use this.
environ['DATADIR'] = os.environ['HOME'] + "/geo_data/processed"
# /tiles/<tileset>/
# /geocoders/<geocoder>/
m = re.match(r'^/([^/]+)/([^/]+)(.*)$', environ['PATH_INFO'])
if not m:
stderr.write("Parse failed: %s\n" % environ['PATH_INFO'])
app = routes[None]
else:
# Level 2 mounts such as /tiles/parcels/
app = routes.get("%s/%s" % (m.group(1), m.group(2)))
if app is not None:
environ['SCRIPT_NAME'] += ("/%s/%s" % (m.group(1), m.group(2)))
environ['PATH_INFO'] = m.group(3)
else:
# Level 1 mounts such as /tiles/
app = routes.get(m.group(1))
if app is not None:
environ['SCRIPT_NAME'] += ("/%s" % m.group(1))
environ['PATH_INFO'] = ("/%s%s" % (m.group(2), m.group(3)))
else:
app = routes[None]
return app(environ, start_response)
# Standalone server for testing
# Start it up and run:
# PYKARTA_SERVER_URL=http://localhost:5000 gpx-trip-planner
if __name__ == "__main__":
import sys
from werkzeug.serving import run_simple
class EnvInsert(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['DATADIR'] = os.environ['DATADIR']
return self.app(environ, start_response)
app = EnvInsert(app)
run_simple('0.0.0.0', 5000, app, threaded=False)
| gpl-2.0 | -7,287,627,514,957,235,000 | 30.551282 | 91 | 0.687525 | false | 2.861628 | false | false | false |
Varbin/EEH | _vendor/ldap3/extend/novell/partition_entry_count.py | 1 | 2031 | """
"""
# Created on 2014.08.05
#
# Author: Giovanni Cannata
#
# Copyright 2014, 2015, 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from pyasn1.type.univ import Integer
from ...core.exceptions import LDAPExtensionError
from ..operation import ExtendedOperation
from ...protocol.rfc4511 import LDAPDN
from ...utils.asn1 import decoder
from ...utils.dn import safe_dn
class PartitionEntryCount(ExtendedOperation):
def config(self):
self.request_name = '2.16.840.1.113719.1.27.100.13'
self.response_name = '2.16.840.1.113719.1.27.100.14'
self.request_value = LDAPDN()
self.response_attribute = 'entry_count'
def __init__(self, connection, partition_dn, controls=None):
ExtendedOperation.__init__(self, connection, controls) # calls super __init__()
if connection.check_names:
partition_dn = safe_dn(partition_dn)
self.request_value = LDAPDN(partition_dn)
def populate_result(self):
substrate = self.decoded_response
try:
decoded, substrate = decoder.decode(substrate, asn1Spec=Integer())
self.result['entry_count'] = int(decoded)
except Exception:
raise LDAPExtensionError('unable to decode substrate')
if substrate:
raise LDAPExtensionError('unknown substrate remaining')
| bsd-2-clause | -3,472,395,893,567,100,000 | 34.631579 | 88 | 0.702117 | false | 3.839319 | false | false | false |
YaleDHLab/lab-workshops | machine-learning/helpers.py | 1 | 7948 | from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
import warnings
# quiet scipy future warnings
warnings.filterwarnings('ignore')
# decision boundary grid colors
grid_colors = ListedColormap([
'#ff8585',
'#6db4f3',
])
# decision boundary point colors
point_colors = ListedColormap([
'#ff0000',
'#0000ff',
])
def plot_decision_boundary(clf, X, labels, margin=0.2, mesh_unit=0.01, proba=False):
'''
Plot the classification decision for each point in a quantized grid
From: http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
@args:
{class} clf: a class that has a method .predict() that takes as input
an array of k dimensional values and returns an array with shape n,1
where n = the number of observations in the input array. This returned
array of values should contain class predictions--one per input element.
nb: if proba=True, the class should contain a method `.decision_function()`
that should return an array with shape n,1 that contains probability
values for a given class prediction. See scikit classifiers for examples
of both methods.
@returns:
void
'''
# find the min value in the first column and subtract `margin`
x_min = X[:, 0].min() - margin
# find the max value in the first column and add `margin`
x_max = X[:, 0].max() + margin
# find the minimum value in the second column and subtract `margin`
y_min = X[:, 1].min() - margin
# find the minimum value in the second column and add `margin`
y_max = X[:, 1].max() + margin
# get a list of values from min to max, counting by `mesh_unit`
x_range = np.arange(x_min, x_max, mesh_unit)
y_range = np.arange(y_min, y_max, mesh_unit)
# create a dense grid with one row for each value in x_range and
# one column for each value in y_range
xx, yy = np.meshgrid(x_range, y_range)
# `np.ravel` flattens a multidimensional array to a single dimension.
# `np.c_` makes its first and second args the first and second columns in a 2D
# array, so np.c_[xx.ravel(), yy.ravel()] has one 2D observation per grid unit
grid_vals = np.c_[xx.ravel(), yy.ravel()]
# plot continuous predictions if proba == True, else discrete classifications
if proba:
# some classifiers use decision_function to return continuous probabilities
# while others use predict_proba
if hasattr(clf, 'decision_function'):
Z = clf.decision_function(grid_vals)
else:
Z = clf.predict_proba(grid_vals)[:,1]
else:
Z = clf.predict(grid_vals)
# reshape Z (a 1D array of classification decisions) to a 2D x by y grid
Z = Z.reshape(xx.shape)
# plot the background decision boundary
cmap = plt.cm.RdBu if proba else grid_colors
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.8)
# plot the observations
plt.scatter(X[:,0], X[:,1], s=30, c=labels, cmap=point_colors, edgecolors='#000000')
def plot_distance(arr):
'''
Given `arr` with two arrays, each of two or three elements,
plot the points at positions `arr[0]` and `arr[1]`
and plot lines between those two points
@args:
arr [arr]: an array composed of 2d or 3d arrays
@returns:
void
'''
if len(arr[0]) == 2:
plot_distance_2d(arr)
elif len(arr[0]) == 3:
plot_distance_3d(arr)
def plot_distance_2d(arr):
'''
Given `arr` with two 2-element arrays, plot the points
at positions `arr[0]` and `arr[1]` and plot lines between
those two points
@args:
arr [arr]: an array composed of 2d arrays
@returns:
void
'''
a, b = arr
df = np.array([a, b])
# point data: pattern for drawing points is:
# ax.scatter(x_vals, y_vals, z_vals)
plt.scatter(df[:,0], df[:,1], s=100, c=['blue', 'orange'], alpha=1.0, edgecolors='#000000')
# add point labels
plt.text(0.05, 0.05, 'a', fontsize=20, horizontalalignment='center')
plt.text(0.95, 0.95, 'b', fontsize=20, horizontalalignment='center')
# line data: pattern for drawing lines is:
# ax.plot([x_start, x_end], [y_start, y_end], zs=[z_start, z_end])
plt.plot( [a[0], b[0]], [a[1], a[1]], c='red' ) # x-line
plt.plot( [b[0], b[0]], [a[1], b[1]], c='purple' ) # y-line
plt.plot( [a[0], b[0]], [a[1], b[1]], c='gray', linestyle=':' ) # direct line
# add axis labels
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
def plot_distance_3d(arr):
'''
Given `arr` with two 3-element arrays, plot the points
at positions `arr[0]` and `arr[1]` and plot lines between
those two points.
@args:
arr [arr]: an array composed of 3d arrays
@returns:
void
'''
a, b = arr
df = np.array([a, b])
fig = plt.figure()
ax = fig.gca(projection='3d')
# point data: pattern for drawing points is:
# ax.scatter(x_vals, y_vals, z_vals)
ax.scatter(df[:,0], df[:,1], df[:,2], s=100, c=['blue', 'orange'], alpha=1.0)
# label points
ax.text(0.1, 0.1, 0, 'a', fontsize=20, horizontalalignment='center')
ax.text(0.9, 0.9, 1.0, 'b', fontsize=20, horizontalalignment='center')
# line data: pattern for drawing lines is:
# ax.plot([x_start, x_end], [y_start, y_end], zs=[z_start, z_end])
ax.plot( [a[0], b[0]], [a[0], a[0]], zs=[a[0], a[0]], c='red' ) # x-line
ax.plot( [b[0], b[0]], [a[0], b[0]], zs=[a[0], a[0]], c='purple' ) # y-line
ax.plot( [b[0], b[0]], [b[0], b[0]], zs=[a[0], b[0]], c='green' ) # z-line
ax.plot( [a[0], b[0]], [a[0], b[0]], zs=[a[0], b[0]], c='gray', linestyle=':' ) # direct line
# add axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def plot_iforest_decision_boundary(*args, **kwargs):
'''
Create and display the decision boundary for an isolation forest.
'''
clf = args[0] # the isolation forest classifier
X = args[1] # the input array of observations used to train the classifier
new_vals = args[2] # the array of observations classified by the classifier
result = args[3] # the classification results from the classifier
margin = kwargs.get('margin', 6) # margin around the plot
mesh = kwargs.get('grid_x', 0.5) # the size of each colormesh grid unit
x_lims = kwargs.get('x_lims', (-13, 12)) # the min max x values to display
y_lims = kwargs.get('y_lims', (-13, 5)) # the min max y values to display
# get the x and y grid domains
x_domain = [ X[:, 0].min() - margin, X[:, 0].max() + margin ]
y_domain = [ X[:, 1].min() - margin, X[:, 1].max() + margin ]
# get a list of values from min to max, counting by `mesh`
x_range = np.arange(x_domain[0], x_domain[1], mesh)
y_range = np.arange(y_domain[0], y_domain[1], mesh)
# create the data with which to color the background grid
xx, yy = np.meshgrid(x_range, y_range)
# classify each unit of the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# reshape Z into a 2D grid
Z = Z.reshape(xx.shape)
# fill in the grid values
plt.contourf(xx, yy, Z, cmap=plt.cm.YlGn)
# add the training points; edgecolors='k' is short for 'edgecolors'='black'
train_p = plt.scatter(X[:,0], X[:,1], c='green', edgecolors='k', alpha=0.4)
# separate new_vals into outliers and "inliers" based on result
outliers = []
inliers = []
for idx, i in enumerate(result):
if result[idx] == 1:
inliers.append(new_vals[idx])
else:
outliers.append(new_vals[idx])
outliers = np.array(outliers)
inliers = np.array(inliers)
# plot the inliers and outliers
in_p = plt.scatter(inliers[:,0], inliers[:,1], c='white', edgecolors='k')
out_p = plt.scatter(outliers[:,0], outliers[:,1], c='red', edgecolors='k')
# limit the axis ranges
plt.xlim(x_lims)
plt.ylim(y_lims)
# add a title to the plot
plt.title('Isolation Forests Decision Boundary')
# add a legend to the plot
plt.legend([train_p, in_p, out_p], [
'training observation',
'classified as non-outlier',
'classified as outlier',
], loc=[0.025, 0.05], framealpha=0.97)
plt.show() | mit | -3,475,078,968,114,607,000 | 35.296804 | 96 | 0.646955 | false | 3.074662 | false | false | false |
little-dude/simple-ostinato | simple_ostinato/constants.py | 1 | 2116 | """
This module containts constants that are for internal use only.
"""
from . import utils
from ostinato.core import ost_pb
class _Protocols(utils.Enum):
"""
Enum for the available protocols
"""
# Layer 1 protocols
MAC = ost_pb.Protocol.kMacFieldNumber
# Layer 2 protocols
ETHERNET_II = ost_pb.Protocol.kEth2FieldNumber
ETHERNET_802_DOT_3 = ost_pb.Protocol.kDot3FieldNumber
LLC = ost_pb.Protocol.kLlcFieldNumber
SNAP = ost_pb.Protocol.kSnapFieldNumber
SVLAN = ost_pb.Protocol.kSvlanFieldNumber
VLAN = ost_pb.Protocol.kVlanFieldNumber
VLAN_STACK = ost_pb.Protocol.kVlanStackFieldNumber
ETHERNET_802_DOT_2_LLC = ost_pb.Protocol.kDot2LlcFieldNumber
ETHERNET_802_DOT_2_SNAP = ost_pb.Protocol.kDot2SnapFieldNumber
# Layer 3 protocols
ARP = ost_pb.Protocol.kArpFieldNumber
IP4 = ost_pb.Protocol.kIp4FieldNumber
IP6 = ost_pb.Protocol.kIp6FieldNumber
IP4_OVER_IP4 = ost_pb.Protocol.kIp4over4FieldNumber
IP4_OVER_IP6 = ost_pb.Protocol.kIp4over6FieldNumber
IP6_OVER_IP4 = ost_pb.Protocol.kIp6over4FieldNumber
IP6_OVER_IP6 = ost_pb.Protocol.kIp6over6FieldNumber
# Layer 4 protocols
TCP = ost_pb.Protocol.kTcpFieldNumber
UDP = ost_pb.Protocol.kUdpFieldNumber
ICMP = ost_pb.Protocol.kIcmpFieldNumber
IGMP = ost_pb.Protocol.kIgmpFieldNumber
MLD = ost_pb.Protocol.kMldFieldNumber
# Layer 5 protocols
TEXT_PROTOCOL = ost_pb.Protocol.kTextProtocolFieldNumber
# Layer independant "protocols"
PAYLOAD = ost_pb.Protocol.kPayloadFieldNumber
SAMPLE = ost_pb.Protocol.kSampleFieldNumber
USER_SCRIPT = ost_pb.Protocol.kUserScriptFieldNumber
HEX_DUMP = ost_pb.Protocol.kHexDumpFieldNumber
| gpl-3.0 | -705,805,642,238,411,100 | 41.32 | 70 | 0.600189 | false | 3.485997 | false | false | false |
mattkenney/feedsquish | update.py | 1 | 2664 | #!/usr/bin/env python
#
# Copyright 2012 Matt Kenney
#
# This file is part of Feedsquish.
#
# Feedsquish is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Feedsquish is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Feedsquish. If not, see <http://www.gnu.org/licenses/>.
#
import calendar
import datetime
import logging
import time
import feedparser
import feeds
import filters
def updateFeed(feedUrl, now, cutoff):
print 'parsing ', feedUrl
parser = feedparser.parse(feedUrl)#, agent='Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)')
print 'status ', str(parser.status)
# if parser.status == 500:
# print news.escape_xml(parser.data)
feedid = "feed/" + filters.encode_segment(feedUrl)
for entry in parser.entries:
link = entry.get('link', '')
if not link:
continue;
artid = "art/" + filters.encode_segment(link)
if feeds.redis.exists(artid):
print 'skipping', link
continue;
print 'saving', link
art = {}
art['name'] = entry.get('title', '')
art['guid'] = entry.get('guid', '')
art['date'] = now
if entry.has_key('published_parsed') and entry.published_parsed:
art['date'] = calendar.timegm(entry.published_parsed)
elif entry.has_key('date_parsed') and entry.date_parsed:
art['date'] = calendar.timegm(entry.date_parsed)
art['category'] = entry.get('category', '')
feeds.redis.hmset(artid, art)
feeds.redis.zadd(feedid, art['date'], artid)
print 'purging ', feedUrl
for artid in feeds.redis.zrangebyscore(feedid, "-inf", cutoff):
feeds.redis.delete(artid)
feeds.redis.zremrangebyscore(feedid, "-inf", cutoff)
def updateAll():
now = int(time.time())
print now
cutoff = now - (60 * 24 * 60 * 60)
feeds.redis.zremrangebyscore("feeds", "-inf", cutoff)
for feedUrl in feeds.redis.zrange("feeds", 0, -1):
try:
updateFeed(feedUrl, now, cutoff)
except Exception, e:
print e
# sleep to spread the server load out over time
time.sleep(30)
if __name__ == '__main__':
updateAll()
| agpl-3.0 | -5,369,418,050,483,782,000 | 31.888889 | 101 | 0.642267 | false | 3.614654 | false | false | false |
pythonindia/wye | wye/reports/views.py | 1 | 8745 | import datetime
import csv
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from django_pandas.io import read_frame, pd
# from wye.base.constants import WorkshopStatus
from wye.workshops.models import Workshop
from wye.profiles.models import Profile
@login_required
def index(request):
context_dict = {}
if not request.user.is_staff:
template_name = '403.html'
return render(request, template_name, context_dict)
workshops = Workshop.objects.filter(is_active=True)
dataframe = read_frame(workshops, fieldnames=[
'requester__location__state__name',
'presenter__id',
'presenter__first_name',
'presenter__last_name',
'workshop_level',
'no_of_participants',
'expected_date'])
# State Based workshop plot
location = dataframe
location.dropna(subset=['presenter__id'], inplace=True)
# location['presenter__id'] = location['presenter__id'].astype(int)
location_based_sum = location.requester__location__state__name.value_counts()
location_list = []
for loc, count in location_based_sum.to_dict().items():
location_list.append(
{"label": loc, "values": count})
context_dict['location'] = location_list
# Top 10 tutors
top_tutor_data = dataframe
presenter_count = top_tutor_data.groupby('presenter__id').count()
top_tutor_data.drop_duplicates(subset=['presenter__id'], inplace=True)
top_tutor_data.index = top_tutor_data.presenter__id
top_tutor_data.drop(["presenter__id"], axis=1, inplace=True)
presenter_count.drop([
'presenter__last_name', 'workshop_level',
'requester__location__state__name',
'no_of_participants',
'expected_date'], axis=1, inplace=True)
presenter_count.rename(columns={
'presenter__first_name': 'conducted_workshop_count'}, inplace=True)
t = top_tutor_data.join(presenter_count)
top_ten_tutors = t.groupby('workshop_level')[
'conducted_workshop_count'].nlargest(10)
top_ten_tutors = dataframe.join(top_ten_tutors)
top_ten_tutors.rename(
columns={'presenter__first_name': 'first_name',
'presenter__last_name': 'last_name'
}, inplace=True)
# Create list of dict as required by nd3 library
d = {}
data = []
for index, row in top_ten_tutors.iterrows():
d.setdefault(row.workshop_level, [])
d[row.workshop_level].append(
{'x': '{} {}'.format(row.first_name, row.last_name),
'y': row.conducted_workshop_count})
for k, v in d.items():
data.append({'key': k, 'values': v})
context_dict['workshop_tutor'] = data
time_series = read_frame(workshops, fieldnames=[
'no_of_participants', 'expected_date'])
# print(time_series)
time_series['no_of_participants'] = pd.to_numeric(
time_series['no_of_participants'])
time_series = time_series.groupby(
'expected_date')[['no_of_participants']].agg('sum')
time_series.fillna(0, inplace=True)
time_series.index = pd.to_datetime(time_series.index)
resampled = time_series.resample('M').sum()
resampled.fillna(0, inplace=True)
# month_list = []
t = resampled.groupby([(resampled.index.year),
(resampled.index.month)]).sum()
d = {}
month_dict = {
1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr',
5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',
9: 'Sept', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
for index, row in t.to_dict()['no_of_participants'].items():
d.setdefault(index[0], [])
d[index[0]].insert(
index[1] - 1, {'x': month_dict.get(index[1]), 'y': row})
ret = []
for index, row in d.items():
ret.append({"key": index, "values": row})
context_dict['line_graph'] = ret
years = [('all', 'All')]
for y in range(2016, int(datetime.datetime.today().strftime('%Y')) + 1):
years.append((y, y))
context_dict['years'] = years
template_name = 'reports/index.html'
return render(request, template_name, context_dict)
# @login_required
# def index_old(request):
# context_dict = {}
# if not request.user.is_staff:
# template_name = '403.html'
# return render(request, template_name, context_dict)
# workshops = Workshop.objects.filter(is_active=True)
# context_dict['workshops'] = {
# 'completed': workshops.filter(status=WorkshopStatus.COMPLETED).count(),
# 'drafted': workshops.filter(status=WorkshopStatus.DRAFT).count(),
# 'hold': workshops.filter(status=WorkshopStatus.HOLD).count(),
# 'feedback_pending': workshops.filter(
# status=WorkshopStatus.FEEDBACK_PENDING).count(),
# }
# workshop_finished = workshops.filter(
# status__in=[WorkshopStatus.COMPLETED,
# WorkshopStatus.FEEDBACK_PENDING])
# tutors_dict = {}
# tutors = [
# user for w in workshop_finished for user in w.presenter.all()]
# for tutor in tutors:
# tutors_dict[tutor.id] = [
# tutor.username,
# tutor.first_name,
# tutor.last_name,
# tutor.profile.get_workshop_completed_count]
# context_dict['tutors'] = tutors_dict
# org_dict = {}
# orgs = [
# w.requester for w in workshop_finished]
# for org in orgs:
# if org.id in org_dict:
# count = org_dict[org.id][1] + 1
# else:
# count = 1
# org_dict[org.id] = [org.name, count, org.location.name]
# context_dict['orgs'] = org_dict
# template_name = 'reports/index.html'
# years = [('all', 'All')]
# for y in range(2016, int(datetime.datetime.today().strftime('%Y')) + 1):
# years.append((y, y))
# context_dict['years'] = years
# return render(request, template_name, context_dict)
@login_required
def get_tutor_college_poc_csv(request):
# if not request.user.is_staff:
# template_name = '403.html'
# return render(request, template_name, {})
usertype = request.POST['usertype']
year = request.POST['years']
workshops = Workshop.objects.filter(is_active=True)
if year != 'all':
workshops = workshops.filter(expected_date__year=year)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="workshops.csv"'
writer = csv.writer(response)
csv_titles = ['Worshop Id', 'Workshop Date', 'Location', 'College']
if usertype == 'tutor':
csv_titles.extend(['Presenter Name', 'Presenter Email'])
elif usertype == 'poc':
csv_titles.extend(['College POC Name', 'College POC Email'])
else:
csv_titles.extend(['Presenter Name', 'Presenter Email'])
csv_titles.extend(['College POC Name', 'College POC Email'])
writer.writerow(csv_titles)
for obj in workshops:
row = [
obj.id, obj.expected_date,
obj.location.name, obj.requester.name]
if usertype == 'tutor':
for u in obj.presenter.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
elif usertype == 'poc':
for u in obj.requester.user.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
else:
for u in obj.presenter.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
for u in obj.requester.user.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
writer.writerow(row)
return response
@login_required
def get_all_user_info(request):
users = User.objects.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="all_users.csv"'
writer = csv.writer(response)
csv_titles = [
'User Id', 'First Name', 'Last Name', 'Email', 'Is Active',
'Is Presenter', 'Is POC', 'Is Organiser']
writer.writerow(csv_titles)
for obj in users:
try:
row = [
obj.id,
obj.first_name,
obj.last_name,
obj.email,
obj.is_active,
Profile.is_presenter(obj),
Profile.is_coordinator(obj),
Profile.is_organiser(obj)]
writer.writerow(row)
except Exception:
pass
return response
| mit | -7,042,324,506,256,734,000 | 37.355263 | 81 | 0.594282 | false | 3.4348 | false | false | false |
rabrahm/ceres | coralie/coraliepipe.py | 1 | 59702 | import sys
import matplotlib
matplotlib.use("Agg")
base = '../'
sys.path.append(base+"utils/Continuum")
sys.path.append(base+"utils/Correlation")
sys.path.append(base+"utils/GLOBALutils")
sys.path.append(base+"utils/OptExtract")
baryc_dir= base+'utils/SSEphem/'
sys.path.append(baryc_dir)
ephemeris='DEc403'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pylab import *
# ceres modules
import coralieutils
import continuum
import correlation
import GLOBALutils
import Marsh
import fabryperot
# other useful modules
import argparse
import ephem
import jplephem
from math import radians as rad
from astropy.io import fits as pyfits
import pickle
import os
import scipy
import scipy.interpolate
from scipy import interpolate
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# Recive input parameters
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-avoid_plot', action="store_true", default=False)
parser.add_argument('-dirout',default='default')
parser.add_argument('-do_class', action="store_true", default=False)
parser.add_argument('-just_extract', action="store_true", default=False)
parser.add_argument('-npools', default=1)
parser.add_argument('-o2do',default='all')
parser.add_argument('-reffile',default='default')
args = parser.parse_args()
dirin = args.directorio
avoid_plot = args.avoid_plot
dirout = args.dirout
DoClass = args.do_class
JustExtract = args.just_extract
npools = int(args.npools)
object2do = args.o2do
reffile = args.reffile
if dirin[-1] != '/':
dirin = dirin + '/'
if dirout == 'default':
dirout = dirin[:-1]+'_red/'
if not os.access(dirout,os.F_OK):
os.system('mkdir '+dirout)
if os.access(dirout+'proc',os.F_OK):
os.system('rm -r '+dirout+'proc')
os.system('mkdir '+dirout+'proc')
f_res = open(dirout+'proc/'+'results.txt','w')
if reffile == 'default':
reffile = dirin+'reffile.txt'
####### GLOBAL VARIABLES #####
force_pre_process = False
force_flat_extract = False
force_thar_extract = False
force_thfp_extract = False
force_tharxc = False
force_thar_wavcal = False
force_thfp_wavcal = False
force_sci_extract = False
force_spectral_file_build = True
force_stellar_pars = False
dumpargon = False
minlines_glob_ob = 700
minlines_glob_co = 500
Inverse_m = True
use_cheby = True
MRMS = 100 # max rms in m/s, global wav solution
trace_degree = 4
Marsh_alg = 0
ext_aperture = 3
NSigma_Marsh = 5
NCosmic_Marsh = 10
S_Marsh = 0.4
N_Marsh = 3 # grado polinomio
min_extract_col = 50
max_extract_col = 2000
n_useful = 70 # up to which order do we care?
# Number of coefficients for the global wavelength solution
ncoef_x = 4
ncoef_m = 6
npar_wsol = (min(ncoef_x,ncoef_m) + 1) * (2*max(ncoef_x,ncoef_m) - min(ncoef_x,ncoef_m) + 2) / 2
models_path = base+"data/COELHO_MODELS/R_40000b/" # path to the synthetic models
order_dir = base+"coralie/wavcals/" # path to reference files for the wavelength solution
#############################
# file containing the log
log = dirout+'night.log'
print "\n\n\tCoralie Euler1.2m PIPELINE\n"
print "\tRAW data is in ",dirin
print "\tProducts of reduction will be in",dirout
print '\n'
# classification of input images according to header info
biases, ob_flats, co_flats, ob_loc, co_loc, ThAr_ref, ThFP_ref,\
simThAr_sci,sim_FP_sci,ThAr_ref_dates,ThFP_ref_dates,obnames,\
obnames_FP,exptimes, exptimes_FP, flats = coralieutils.FileClassify(dirin,log)
# Pre-process
if ( (( len(ob_flats) > 0) and (os.access(dirout+'FlatOb.fits',os.F_OK) == False)) or \
(( len(co_flats) > 0) and (os.access(dirout+'FlatCo.fits',os.F_OK) == False)) or \
(( len(flats) > 0) and (os.access(dirout+'Flat.fits',os.F_OK) == False)) or \
(os.access(dirout+'trace.pkl',os.F_OK) == False) or \
(os.access(dirout+'MasterBias.fits',os.F_OK) == False) or \
(force_pre_process) ):
print "\tNo previous pre-processing files or found"
pre_process = 1
else:
print "\tPre-processing files found, going straight to extraction"
pre_process = 0
if (pre_process == 1):
print "\tGenerating Master calibration frames..."
# median combine Biases
MasterBias, RO_bias, GA_bias = coralieutils.MedianCombine(biases,ZF=0.)
hdu = pyfits.PrimaryHDU( MasterBias )
if (os.access(dirout+'MasterBias.fits',os.F_OK)):
os.remove(dirout+'MasterBias.fits')
hdu.writeto(dirout+'MasterBias.fits')
print "\t\t-> Masterbias: done!"
if len(flats) > 0:
# median combine list of co flats2
Flat,RO_flat,GA_flat = coralieutils.MedianCombine(flats,ZF=MasterBias)
hdu = pyfits.PrimaryHDU(Flat)
if (os.access(dirout+'Flat.fits',os.F_OK)):
os.remove(dirout+'Flat.fits')
hdu.writeto(dirout+'Flat.fits')
if len(ob_flats) > 0:
# median combine list of ob flats
Flat_ob, RO_ob, GA_ob = coralieutils.MedianCombine(ob_flats,ZF=MasterBias)
# save this file for later reference
hdu = pyfits.PrimaryHDU( Flat_ob )
if (os.access(dirout+'FlatOb.fits',os.F_OK)):
os.remove(dirout+'FlatOb.fits')
hdu.writeto(dirout+'FlatOb.fits')
else:
Flat_ob = Flat
if len(co_flats) > 0:
# median combine list of co flats
Flat_co,RO_co,GA_co = coralieutils.MedianCombine(co_flats,ZF=MasterBias)
hdu = pyfits.PrimaryHDU(Flat_co)
if (os.access(dirout+'FlatCo.fits',os.F_OK)):
os.remove(dirout+'FlatCo.fits')
hdu.writeto(dirout+'FlatCo.fits')
else:
Flat_co = Flat
print "\t\t-> Masterflats: done!"
# Find orders & traces
print "\tTracing echelle orders..."
if len(ob_flats)>0 and len(co_flats)>0:
c_ob, nord_ob = GLOBALutils.get_them(Flat_ob, 8, trace_degree,maxords=-1,mode=1)
c_co, nord_co = GLOBALutils.get_them(Flat_co, 8, trace_degree,maxords=-1,startfrom=300,mode=1)
else:
c_all, nord_all = GLOBALutils.get_them(Flat, 5, trace_degree,maxords=-1,mode=1,nsigmas=3)
GA_co,GA_ob = GA_flat, GA_flat
RO_co,RO_ob = RO_flat, RO_flat
c_ob = c_all[:22]
c_co = c_all[22]
i = 23
while i < len(c_all)-1:
c_ob = np.vstack((c_ob,c_all[i]))
c_co = np.vstack((c_co,c_all[i+1]))
i+=2
nord_co, nord_ob = len(c_co),len(c_ob)
print '\t', nord_ob, 'object orders found...'
print '\t', nord_co, 'comparison orders found...'
if len(ob_flats)>0 and len(co_flats)>0:
trace_dict = {'c_ob':c_ob,'c_co':c_co,
'nord_ob':nord_ob, 'nord_co':nord_co,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co}
else:
trace_dict = {'c_all':c_all,'c_ob':c_ob,'c_co':c_co,
'nord_ob':nord_ob, 'nord_co':nord_co,'nord_all':nord_all,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co}
pickle.dump( trace_dict, open( dirout+"trace.pkl", 'w' ) )
else:
trace_dict = pickle.load( open( dirout+"trace.pkl", 'r' ) )
c_co = trace_dict['c_co']
c_ob = trace_dict['c_ob']
nord_ob = trace_dict['nord_ob']
nord_co = trace_dict['nord_co']
if 'c_all' in trace_dict.keys():
c_all = trace_dict['c_all']
nord_all = trace_dict['nord_all']
# recover GA*, RO*
GA_ob = trace_dict['GA_ob']
RO_ob = trace_dict['RO_ob']
GA_co = trace_dict['GA_co']
RO_co = trace_dict['RO_co']
# recover flats & master bias
if len(ob_flats)>0:
h = pyfits.open(dirout+'FlatOb.fits')
Flat_ob = h[0].data
else:
h = pyfits.open(dirout+'Flat.fits')
Flat_ob = h[0].data
if len(co_flats)>0:
h = pyfits.open(dirout+'Flat.fits')
Flat_co = h[0].data
else:
h = pyfits.open(dirout+'Flat.fits')
Flat_co = h[0].data
h = pyfits.open(dirout+'MasterBias.fits')
MasterBias = h[0].data
if len(ob_flats)>0 and len(co_flats)>0:
c_all = GLOBALutils.Mesh(c_ob,c_co)
print '\n\tExtraction of Flat calibration frames:'
# Extract flat spectra, object
P_ob_fits = dirout + 'P_ob.fits'
S_flat_ob_fits = dirout +'S_flat_ob.fits'
P_ob = np.zeros( Flat_ob.shape )
S_flat_ob = np.zeros((nord_ob, 3, Flat_ob.shape[1]) )
if ( os.access(P_ob_fits,os.F_OK) == False ) or ( os.access(S_flat_ob_fits,os.F_OK) == False ) or \
(force_flat_extract):
print "\t\tNo extracted flat object spectra found or extraction forced, extracting and saving..."
print "\t\t\tWill extract",nord_ob,"orders for object fibre..."
P_ob = GLOBALutils.obtain_P(Flat_ob,c_ob,ext_aperture,RO_ob,\
GA_ob,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
S_flat_ob = GLOBALutils.optimal_extraction(Flat_ob,P_ob,c_ob,ext_aperture,\
RO_ob,GA_ob,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
# write P_on and S_flat_ob as fits files
if (os.access(P_ob_fits,os.F_OK)):
os.remove( P_ob_fits )
if (os.access(S_flat_ob_fits,os.F_OK)):
os.remove( S_flat_ob_fits )
hdu = pyfits.PrimaryHDU( P_ob )
hdu.writeto( P_ob_fits )
hdu = pyfits.PrimaryHDU( S_flat_ob )
hdu.writeto( S_flat_ob_fits )
else:
print "\t\tExtracted flat object spectra found, loading..."
P_ob = pyfits.getdata( P_ob_fits )
S_flat_ob = pyfits.getdata( S_flat_ob_fits )
# Extract flat spectra, comparison
P_co_fits = dirout + 'P_co.fits'
S_flat_co_fits = dirout +'S_flat_co.fits'
P_co = np.zeros( Flat_co.shape )
S_flat_co = np.zeros((nord_co, 3, Flat_co.shape[1]) )
if ( os.access(P_co_fits,os.F_OK) == False ) or ( os.access(S_flat_co_fits,os.F_OK) == False ) or (force_flat_extract):
print "\t\tNo extracted flat comparison spectra found or extraction forced, extracting and saving..."
print "\t\t\tWill extract",nord_co,"orders for comparison fibre"
P_co = GLOBALutils.obtain_P(Flat_co,c_co,ext_aperture,RO_co,\
GA_co,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
S_flat_co = GLOBALutils.optimal_extraction(Flat_co,P_co,c_co,ext_aperture,RO_co,GA_co,\
S_Marsh,NCosmic_Marsh,min_extract_col,\
max_extract_col,npools)
# write P_on and S_flat_co as fits files
if (os.access(P_co_fits,os.F_OK)):
os.remove( P_co_fits )
if (os.access(S_flat_co_fits,os.F_OK)):
os.remove( S_flat_co_fits )
hdu = pyfits.PrimaryHDU( P_co )
hdu.writeto( P_co_fits )
hdu = pyfits.PrimaryHDU( S_flat_co )
hdu.writeto( S_flat_co_fits )
else:
print "\t\tExtracted flat comparison spectra found, loading..."
P_co = pyfits.getdata( P_co_fits )
S_flat_co = pyfits.getdata( S_flat_co_fits )
# Normalize flat field spectra.
S_flat_ob_n, maxvals_ob = GLOBALutils.FlatNormalize_single( S_flat_ob, mid=int(0.5*S_flat_ob.shape[2]))
S_flat_co_n, maxvals_co = GLOBALutils.FlatNormalize_single( S_flat_co, mid=int(0.5*S_flat_co.shape[2]))
print '\n\tExtraction of ThAr calibration frames:'
# Extract all ThAr files
for fsim in ThAr_ref:
hthar = pyfits.open( fsim )
dthar = coralieutils.OverscanTrim( pyfits.getdata( fsim ) )
ron = hthar[0].header['HIERARCH ESO CORA CCD RON']
gain = hthar[0].header['HIERARCH ESO CORA CCD GAIN']
thar_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(thar_fits_ob,os.F_OK) == False ) or \
( os.access(thar_fits_co,os.F_OK) == False ) or \
(force_thar_extract):
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
thar_S_ob = GLOBALutils.optimal_extraction(dthar,P_ob,c_ob,ext_aperture,ron,gain,\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
thar_S_co = GLOBALutils.optimal_extraction(dthar,P_co,c_co,ext_aperture,ron,gain,\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(thar_fits_ob,os.F_OK)):
os.remove( thar_fits_ob )
if (os.access(thar_fits_co,os.F_OK)):
os.remove( thar_fits_co )
hdu = pyfits.PrimaryHDU( thar_S_ob )
hdu.writeto( thar_fits_ob )
hdu = pyfits.PrimaryHDU( thar_S_co )
hdu.writeto( thar_fits_co )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
print "\n\tWavelength solution of ThAr calibration spectra:"
# compute wavelength calibration files
sorted_ThAr_dates = np.argsort( ThAr_ref_dates )
p0_array = np.zeros( (len(ThAr_ref_dates), npar_wsol) )
for i in range(len(sorted_ThAr_dates)):
index = sorted_ThAr_dates[i]
wavsol_pkl = dirout + ThAr_ref[index].split('/')[-1][:-8]+'wavsolpars.pkl'
thar_fits_ob = dirout + ThAr_ref[index].split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + ThAr_ref[index].split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tComputing wavelength solution of ThAr file", ThAr_ref[index]
hthar = pyfits.open( ThAr_ref[index] )
mjd, mjd0 = coralieutils.mjd_fromheader( hthar )
thar_S_ob = pyfits.getdata( thar_fits_ob )
thar_S_co = pyfits.getdata( thar_fits_co )
lines_thar_ob = thar_S_ob[:,1,:]
iv_thar_ob = thar_S_ob[:,2,:]
lines_thar_co = thar_S_co[:,1,:]
iv_thar_co = thar_S_co[:,2,:]
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
for order in range(n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_ob[order,:]
IV = iv_thar_ob[order,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths,\
rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration(order_dir+'order_'+order_s+'o.iwdat',\
thar_order,order,wei,rmsmax=5000000,\
minlines=10,FixEnds=True,Dump_Argon=dumpargon,\
Dump_AllLines=True, Cheby=use_cheby)
if (order == 35):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 1023, len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order )
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
p0 = np.zeros( npar_wsol )
p0[0] = (35+89) * Global_ZP
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths, All_Orders,\
np.ones(All_Intensities.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m,minlines=700,order0=89, \
ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
# Now calibrate COMPARISON orders. Use p1 above as p0
All_Pixel_Centers_co = np.array([])
All_Wavelengths_co = np.array([])
All_Orders_co = np.array([])
All_Centroids_co = np.array([])
All_Sigmas_co = np.array([])
All_Intensities_co = np.array([])
for order in range(22,n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co[order-22,:]
IV = iv_thar_co[order-22,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, \
rmsmax=5000000, minlines=10,FixEnds=True,Dump_Argon=dumpargon, \
Dump_AllLines=True, Cheby=use_cheby)
All_Pixel_Centers_co = np.append( All_Pixel_Centers_co, pixel_centers )
All_Wavelengths_co = np.append( All_Wavelengths_co, wavelengths )
All_Orders_co = np.append( All_Orders_co, np.zeros( len(pixel_centers) ) + order )
All_Centroids_co = np.append( All_Centroids_co, centroids)
All_Sigmas_co = np.append( All_Sigmas_co, sigmas)
All_Intensities_co = np.append( All_Intensities_co, intensities )
p1_co, G_pix_co, G_ord_co, G_wav_co, II_co, rms_ms_co, G_res_co = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co, All_Wavelengths_co, All_Orders_co,\
np.ones(All_Intensities_co.shape), p1, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m,minlines=500,order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
# end COMPARISON orders.
pdict = {'p1':p1,'mjd':mjd, 'G_pix':G_pix, 'G_ord':G_ord, 'G_wav':G_wav, 'II':II, 'rms_ms':rms_ms,\
'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Orders':All_Orders, 'All_Sigmas':All_Sigmas,
'p1_co':p1_co, 'G_pix_co':G_pix_co, 'G_ord_co':G_ord_co, 'G_wav_co':G_wav_co, 'II_co':II_co, 'rms_ms_co':rms_ms_co,\
'G_res_co':G_res_co, 'All_Centroids_co':All_Centroids_co}
pickle.dump( pdict, open( wavsol_pkl, 'w' ) )
#print "Median sigma:", np.median( All_Sigmas )
p0_array[i,:] = p1
else:
print "\t\tUsing previously computed wavelength solution in file",wavsol_pkl
pdict = pickle.load(open(wavsol_pkl,'r'))
p0_array[i,:] = pdict['p1']
p0_G = np.median(p0_array,axis=0)
if len(ThFP_ref) > 0:
print '\n\tExtraction of Fabry-Perot calibration frames:'
else:
print '\n\tNo Fabry-Perot calibration images found, moving on'
# Now extract ThAr-FP images
for fsim in ThFP_ref:
hthfp = pyfits.open( fsim )
thfp_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thfp_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(thfp_fits_ob,os.F_OK) == False ) or \
( os.access(thfp_fits_co,os.F_OK) == False ) or \
(force_thfp_extract):
print "\t\tNo previous extraction or extraction forced for ThFP file", fsim, "extracting..."
dthfp = coralieutils.OverscanTrim( pyfits.getdata( fsim ) )
Centers = np.zeros((len(c_all),dthfp.shape[1]))
for i in range(c_all.shape[0]):
Centers[i,:]=scipy.polyval(c_all[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(dthfp,Centers,span=5)
dthfp -= bac
thfp_S_ob = GLOBALutils.optimal_extraction(dthfp,P_ob,c_ob,ext_aperture,\
hthfp[0].header['HIERARCH ESO CORA CCD RON'],\
hthfp[0].header['HIERARCH ESO CORA CCD GAIN'],\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
thfp_S_co = GLOBALutils.optimal_extraction(dthfp,P_co,c_co,ext_aperture,\
hthfp[0].header['HIERARCH ESO CORA CCD RON'],
hthfp[0].header['HIERARCH ESO CORA CCD GAIN'],
S_Marsh,100.,min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(thfp_fits_ob,os.F_OK)):
os.remove( thfp_fits_ob )
if (os.access(thfp_fits_co,os.F_OK)):
os.remove( thfp_fits_co )
hdu = pyfits.PrimaryHDU( thfp_S_ob )
hdu.writeto( thfp_fits_ob )
hdu = pyfits.PrimaryHDU( thfp_S_co )
hdu.writeto( thfp_fits_co )
else:
print "\t\tFP file", fsim, "all ready extracted, loading..."
# Now calibrate the ThFP spectra with the closest ThAr spectrum
print '\n\tWavelength solution of Fabry-Perot spectra with closest ThAr spectrum:'
for fsim in ThFP_ref:
hthfp = pyfits.open( fsim )
mjd, mjd0 = coralieutils.mjd_fromheader(hthfp)
im = np.argmin(np.absolute(np.array(ThAr_ref_dates) - mjd))
wavsol_dict = pickle.load(open(dirout + ThAr_ref[im].split('/')[-1][:-8]+'wavsolpars.pkl','r'))
thfp_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thfp_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
wavsol_pkl_fp = dirout + fsim.split('/')[-1][:-8]+'wavsolpars.pkl'
fp_fits = dirout + fsim.split('/')[-1][:-8]+'sp.fits'
if ( os.access(wavsol_pkl_fp,os.F_OK) == False ) or (force_thfp_wavcal):# or True:
print '\t\tCalibrating', fsim,'...'
fp_fp = pyfits.getdata(thfp_fits_co)[:,1,:]
thar_fp = pyfits.getdata(thfp_fits_ob)
lines_thar_ob = thar_fp[:,1,:]
iv_thar_ob = thar_fp[:,2,:]
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
for order in range(n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_ob[order,:]
IV = iv_thar_ob[order,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, \
rmsmax=5000000, minlines=10,FixEnds=True,Dump_Argon=dumpargon, \
Dump_AllLines=True, Cheby=use_cheby)
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order )
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths, All_Orders,\
np.ones(All_Intensities.shape), p0_G, Cheby=use_cheby,\
maxrms=100, Inv=Inverse_m, minlines=minlines_glob_ob,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix, G_wav, G_ord,\
np.ones(G_wav.shape), wavsol_dict['p1'],\
Cheby=True,Inv=True,maxrms=100,minlines=minlines_glob_ob,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
spec = np.zeros((2,fp_fp.shape[0],lines_thar_ob.shape[1]))
equis = np.arange( lines_thar_ob.shape[1] )
for order in range(fp_fp.shape[0]):
m = order + 89 + 22
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=lines_thar_ob.shape[1],nx=ncoef_x,nm=ncoef_m)
WavSol = (1.0 + 1.0e-6*p_shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wavsol_dict['p1_co'],chebs,ncoef_x,ncoef_m)
spec[0,order,:] = WavSol
spec[1,order,:] = fp_fp[order]
if (os.access(fp_fits,os.F_OK)):
os.remove( fp_fits )
hdu = pyfits.PrimaryHDU( spec )
hdu.writeto( fp_fits )
fp_lines1 = fabryperot.InitialGuess(thfp_fits_co, lim1=50, lim2=-50)
fp_lines = fabryperot.GetFPLines(thfp_fits_co,fp_lines1,lim1=50,lim2=-50,npools=npools)
pdict = {'p1':p1,'p_shift':p_shift,'mjd':mjd, 'G_pix':G_pix, 'G_ord':G_ord, 'G_wav':G_wav, 'II':II, 'rms_ms':rms_ms,\
'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Orders':All_Orders, 'All_Sigmas':All_Sigmas, 'p1_co':wavsol_dict['p1_co'],'fplines':fp_lines}
pickle.dump( pdict, open( wavsol_pkl_fp, 'w' ) )
else:
print '\t\tFP spectrum', fsim, 'already calibrated, loading...'
### start of science frame reductions ###
new_list = []
new_list_obnames = []
new_list_texp = []
for i in range(len(simThAr_sci)):
fsim = simThAr_sci[i]
obname = obnames[i]
texp = exptimes[i]
if (object2do == 'all'):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
else:
if (obname == object2do):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
print '\n\tThe following targets will be processed:'
for nlisti in range(len(new_list)):
print '\t\t'+new_list_obnames[nlisti]
# Does any image have a special requirement for dealing with the moonlight?
if os.access(dirin + 'moon_corr.txt', os.F_OK):
fmoon = open(dirin + 'moon_corr.txt','r')
moon_lns = fmoon.readlines()
spec_moon = []
use_moon = []
for line in moon_lns:
spec_moon.append(line.split()[0])
if line.split()[1] == '0':
use_moon.append(False)
else:
use_moon.append(True)
else:
spec_moon = []
use_moon = []
spec_moon = np.array(spec_moon)
use_moon = np.array(use_moon)
# now extract the images
for nlisti in range(len(new_list)):
fsim = new_list[ nlisti ]
obname = new_list_obnames[ nlisti ]
TEXP = np.around(new_list_texp[ nlisti ])
know_moon = False
if fsim.split('/')[-1] in spec_moon:
I = np.where(fsim.split('/')[-1] == spec_moon)[0]
know_moon = True
here_moon = use_moon[I]
# get header h of image
h = pyfits.open(fsim)
print '\n'
print "\t--> Working on image: ", fsim
# get mjd and mjd0
mjd,mjd0 = coralieutils.mjd_fromheader(h)
# get gain and readnoise of object
ronoise = h[0].header['HIERARCH ESO CORA CCD RON']
gain = h[0].header['HIERARCH ESO CORA CCD GAIN']
# Object name
print "\t\tObject name:",obname
# Open file, trim, overscan subtract and MasterBias subtract
data = h[0].data
data = coralieutils.OverscanTrim(data)
data -= MasterBias
bacfile = dirout + 'BAC_' + fsim.split('/')[-1][:-4]+'fits'''
if (os.access(bacfile,os.F_OK))== False:
Centers = np.zeros((len(c_all),data.shape[1]))
for i in range(c_all.shape[0]):
Centers[i,:]=scipy.polyval(c_all[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(data,Centers,span=5)
hdbac = pyfits.PrimaryHDU( bac )
hdbac.writeto(bacfile)
else:
bac = pyfits.getdata(bacfile)
data -= bac
ra,dec = h[0].header['RA'],h[0].header['DEC']
ra2,dec2 = GLOBALutils.getcoords(obname,mjd,filen=reffile)
if ra2 !=0 and dec2 != 0:
ra = ra2
dec = dec2
else:
print '\t\tUsing the coordinates found in the image header.'
# Find lambda_bary/lambda_topo using JPLEPHEM
altitude = h[0].header['HIERARCH ESO OBS GEO ALTITUDE']
latitude = h[0].header['HIERARCH ESO OBS GEO LATITU']
longitude = h[0].header['HIERARCH ESO OBS GEO LONGIT']
epoch = h[0].header['HIERARCH ESO OBS EQUICAT']
iers = GLOBALutils.JPLiers( baryc_dir, mjd-999.0, mjd+999.0 )
obsradius, R0 = GLOBALutils.JPLR0( latitude, altitude)
obpos = GLOBALutils.obspos( longitude, obsradius, R0 )
jplephem.set_ephemeris_dir( baryc_dir , ephemeris )
jplephem.set_observer_coordinates( obpos[0], obpos[1], obpos[2] )
res = jplephem.doppler_fraction(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
lbary_ltopo = 1.0 + res['frac'][0]
bcvel_baryc = ( lbary_ltopo - 1.0 ) * 2.99792458E5
print "\t\tBarycentric velocity:", bcvel_baryc
res = jplephem.pulse_delay(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
mbjd = mjd + res['delay'][0] / (3600.0 * 24.0)
# Moon Phase Calculations
gobs = ephem.Observer()
gobs.name = 'Swiss1.2'
gobs.lat = rad(latitude) # lat/long in decimal degrees
gobs.long = rad(longitude)
DDATE = h[0].header['HIERARCH ESO CORA SHUTTER START DATE']
HHOUR = h[0].header['HIERARCH ESO CORA SHUTTER START HOUR']
Mho = str(int(HHOUR))
if len(Mho)<2:
Mho = '0'+Mho
mins = (HHOUR - int(Mho))*60.
Mmi = str(int(mins))
if len(Mmi)<2:
Mmi = '0'+Mmi
segs = (mins - int(Mmi))*60.
if segs<10:
Mse = '0'+str(segs)[:5]
else:
Mse = str(segs)[:6]
gobs.date = str(DDATE[:4]) + '-' + str(DDATE[4:6]) + '-' + str(DDATE[6:]) + ' ' + Mho + ':' + Mmi +':' +Mse
mephem = ephem.Moon()
mephem.compute(gobs)
Mcoo = jplephem.object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Mp = jplephem.barycentric_object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Sp = jplephem.barycentric_object_track("Sun", int(mjd), float(mjd%1), 1, 0.0)
res = jplephem.object_doppler("Moon", int(mjd), mjd%1, 1, 0.0)
lunation,moon_state,moonsep,moonvel = GLOBALutils.get_lunar_props(ephem,gobs,Mcoo,Mp,Sp,res,ra,dec)
refvel = bcvel_baryc + moonvel
print '\t\tRadial Velocity of sacttered moonlight:',refvel
sorted_indices = np.argsort( np.abs( np.array(ThAr_ref_dates) - mjd ) )
sorted_indices_FP = np.argsort( np.abs( np.array(ThFP_ref_dates) - mjd ) )
print '\t\tExtraction:'
# optimally and simply extract spectra
sci_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
sci_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
sci_fits_ob_simple = dirout + fsim.split('/')[-1][:-8]+'spec.simple.ob.fits.S'
sci_fits_co_simple = dirout + fsim.split('/')[-1][:-8]+'spec.simple.co.fits.S'
sci_fits_bac = dirout + fsim.split('/')[-1][:-8]+'spec.simple.bac.fits.S'
if ( os.access(sci_fits_ob,os.F_OK) == False ) or \
( os.access(sci_fits_co,os.F_OK) == False ) or \
( os.access(sci_fits_ob_simple,os.F_OK) == False ) or \
( os.access(sci_fits_co_simple,os.F_OK) == False ) or \
( os.access(sci_fits_bac,os.F_OK) == False ) or \
(force_sci_extract):
print "\t\t\tNo previous extraction or extraction forced for science file", fsim, "extracting..."
sci_Ss_ob = GLOBALutils.simple_extraction(data,c_ob,ext_aperture,\
min_extract_col,max_extract_col,npools)
sci_Ss_co = GLOBALutils.simple_extraction(data,c_co,ext_aperture,\
min_extract_col,max_extract_col,npools)
sci_S_ob = GLOBALutils.optimal_extraction(data,P_ob,c_ob,ext_aperture,\
h[0].header['HIERARCH ESO CORA CCD RON'],\
h[0].header['HIERARCH ESO CORA CCD GAIN'],\
S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_S_co = GLOBALutils.optimal_extraction(data,P_co,c_co,ext_aperture,\
h[0].header['HIERARCH ESO CORA CCD RON'],\
h[0].header['HIERARCH ESO CORA CCD GAIN'],\
S_Marsh,2.*NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_bac = GLOBALutils.simple_extraction(bac,c_ob,ext_aperture,\
min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(sci_fits_ob,os.F_OK)):
os.remove( sci_fits_ob )
if (os.access(sci_fits_co,os.F_OK)):
os.remove( sci_fits_co )
if (os.access(sci_fits_ob_simple,os.F_OK)):
os.remove( sci_fits_ob_simple )
if (os.access(sci_fits_co_simple,os.F_OK)):
os.remove( sci_fits_co_simple )
if (os.access(sci_fits_bac,os.F_OK)):
os.remove( sci_fits_bac )
hdu = pyfits.PrimaryHDU( sci_S_ob )
hdu.writeto( sci_fits_ob )
hdu = pyfits.PrimaryHDU( sci_S_co )
hdu.writeto( sci_fits_co )
hdu = pyfits.PrimaryHDU( sci_Ss_ob )
hdu.writeto( sci_fits_ob_simple )
hdu = pyfits.PrimaryHDU( sci_Ss_co )
hdu.writeto( sci_fits_co_simple )
hdu = pyfits.PrimaryHDU( sci_bac )
hdu.writeto( sci_fits_bac )
else:
print '\t\t\t'+fsim, "has already been extracted, reading in product fits files..."
sci_S_ob = pyfits.getdata( sci_fits_ob )
sci_S_co = pyfits.getdata( sci_fits_co )
sci_Ss_ob = pyfits.getdata( sci_fits_ob_simple )
sci_Ss_co = pyfits.getdata( sci_fits_co_simple )
sci_bac = pyfits.getdata( sci_fits_bac )
fout = 'proc/'+ obname + '_' + \
h[0].header['HIERARCH ESO CORA SHUTTER START DATE'] + '_' +\
'UT' + fsim[-17:-9] + '_' +\
'sp.fits'
#Build spectra
if ( os.access(dirout+fout ,os.F_OK) == False ) or (force_spectral_file_build):
# initialize file that will have the spectra
# n_useful should be nord_ob, but we still have not calibrated that bluest order -- TODO
spec = np.zeros((11, n_useful, data.shape[1]))
hdu = pyfits.PrimaryHDU( spec )
hdu = GLOBALutils.update_header(hdu,'HIERARCH MJD', mjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH MBJD', mbjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START DATE', h[0].header['HIERARCH ESO CORA SHUTTER START DATE'] )
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START UT', h[0].header['HIERARCH ESO CORA SHUTTER START HOUR'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEXP (S)',h[0].header['HIERARCH ESO OBS TEXP'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH FLUX WEIGHTED MEAN F ',h[0].header['HIERARCH ESO CORA PM FLUX TMMEAN'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH BARYCENTRIC CORRECTION (KM/S)', bcvel_baryc)
hdu = GLOBALutils.update_header(hdu,'HIERARCH (LAMBDA_BARY / LAMBDA_TOPO)', lbary_ltopo)
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARGET NAME', obname)
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA',h[0].header['HIERARCH ESO TEL TARG ALPHA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC',h[0].header['HIERARCH ESO TEL TARG DELTA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA DEG',h[0].header['RA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC DEG',h[0].header['DEC'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA BARY',ra)
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC BARY',dec)
hdu = GLOBALutils.update_header(hdu,'HIERARCH EQUINOX',h[0].header['HIERARCH ESO OBS EQUICAT'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LATITUDE',h[0].header['HIERARCH ESO OBS GEO LATITU'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LONGITUDE',h[0].header['HIERARCH ESO OBS GEO LONGIT'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS ALTITUDE',h[0].header['HIERARCH ESO OBS GEO ALTITUDE'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARG AIRMASS',h[0].header['HIERARCH ESO OBS TARG AIRMASS'])
print '\t\tWavelength calibration:'
print '\t\t\tComparision fibre is '+ h[0].header['HIERARCH ESO TPL TYPE']
if h[0].header['HIERARCH ESO TPL TYPE'] == 'OBTH':
# get ThAr closest in time
indice = sorted_indices[0]
thar_fits_ob = dirout + ThAr_ref[indice].split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + ThAr_ref[indice].split('/')[-1][:-8]+'spec.co.fits.S'
pkl_wsol = dirout + ThAr_ref[indice].split('/')[-1][:-8]+'wavsolpars.pkl'
print "\t\t\tUnpickling reference wavelength solution from", pkl_wsol, " ..."
wsol_dict = pickle.load(open(pkl_wsol,'r'))
# Extract thAr lines from comparison orders
lines_thar_co = sci_S_co[:,1,:]
iv_thar_co = sci_S_co[:,2,:]
All_Pixel_Centers_co = np.array([])
All_Wavelengths_co = np.array([])
All_Orders_co = np.array([])
All_Centroids_co = np.array([])
All_Sigmas_co = np.array([])
All_Intensities_co = np.array([])
for order in range(22,n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co[order-22,:]
IV = iv_thar_co[order-22,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, \
rmsmax=5000000, minlines=10,FixEnds=True,Dump_Argon=dumpargon, \
Dump_AllLines=True, Cheby=use_cheby)
All_Pixel_Centers_co = np.append( All_Pixel_Centers_co, pixel_centers )
All_Wavelengths_co = np.append( All_Wavelengths_co, wavelengths )
All_Orders_co = np.append( All_Orders_co, np.zeros( len(pixel_centers) ) + order )
All_Centroids_co = np.append( All_Centroids_co, centroids)
All_Sigmas_co = np.append( All_Sigmas_co, sigmas)
All_Intensities_co = np.append( All_Intensities_co, intensities )
# get a global solution for the lines found
p1_co, G_pix_co, G_ord_co, G_wav_co, II_co, rms_ms_co, G_res_co = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co, All_Wavelengths_co, All_Orders_co,\
np.ones(All_Intensities_co.shape), wsol_dict['p1_co'], Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m,minlines=minlines_glob_co,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
# get shift with respect to reference ThAr
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co, G_wav_co, G_ord_co,\
np.ones(G_wav_co.shape), wsol_dict['p1_co'],\
Cheby=True,Inv=True,maxrms=100,minlines=minlines_glob_co,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
precision = rms_ms/np.sqrt(len(I))
good_quality = True
if (precision > 10):
good_quality = False
else:
indice = sorted_indices_FP[0]
thfp_fits_co = dirout + ThFP_ref[indice].split('/')[-1][:-8]+'spec.co.fits.S'
pkl_wsol = dirout + ThFP_ref[indice].split('/')[-1][:-8]+'wavsolpars.pkl'
wsol_dict = pickle.load(open(pkl_wsol,'r'))
print "\t\t\tUnpickling reference wavelength solution from", pkl_wsol, " ..."
fp_lines = fabryperot.GetFPLines(sci_fits_co,wsol_dict['fplines'],lim1=50,lim2=-50,npools=npools)
tdrifts = np.array([])
for order in range(22,n_useful):
m = order + 89
ejx1 = fp_lines['order_'+str(int(order-22))]
ejxref = wsol_dict['fplines']['order_'+str(int(order-22))]
chebs1 = GLOBALutils.Calculate_chebs(ejx1, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=sci_S_co.shape[2],nx=ncoef_x,nm=ncoef_m)
WavSol1 = (1.0 + 1.0e-6*wsol_dict['p_shift']) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_co'],chebs1,ncoef_x,ncoef_m)
chebsref = GLOBALutils.Calculate_chebs(ejxref, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=sci_S_co.shape[2],nx=ncoef_x,nm=ncoef_m)
WavSolref = (1.0 + 1.0e-6*wsol_dict['p_shift']) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_co'],chebsref,ncoef_x,ncoef_m)
I = np.where((ejx1!=-999) & (ejxref!=-999))[0]
drifts = 299792458.*(WavSolref[I] - WavSol1[I]) / WavSolref[I]
tempw = WavSolref[I]
II = fabryperot.clipp(drifts,n=3)
#print II
#plot(WavSolref[I],drifts,'ro')
#plot(tempw[II],drifts[II],'ko')
tdrifts = np.hstack((tdrifts,drifts[II]))
fp_shift = np.mean(tdrifts)
fp_error = np.sqrt(np.var(tdrifts))
p_shift = 1e6*fp_shift/299792458.
print '\t\t\tFP shift = ',fp_shift,'+-',fp_error/np.sqrt(float(len(tdrifts))),'m/s'
good_quality = True
#show()
"""
lines_thar_co = np.zeros(sci_Ss_co.shape)
lines_thar_co_ref = np.zeros(sci_Ss_co.shape)
for si in range(S_flat_co_n.shape[0]):
JI = np.where(S_flat_co_n[si,1]>0)[0]
lines_thar_co[si,JI] = sci_S_co[si,1,JI] / S_flat_co_n[si,1,JI]
lines_thar_co_ref[si,JI] = pyfits.getdata(thfp_fits_co)[si,1,JI] / S_flat_co_n[si,1,JI]
JI1 = np.where(lines_thar_co[si]<0)[0]
JI2 = np.where(lines_thar_co_ref[si]<0)[0]
lines_thar_co[si,JI1] = 0.
lines_thar_co_ref[si,JI2] = 0.
#lines_thar_co = sci_S_co[:,1,:] / S_flat_co_simple_n
#lines_thar_co_ref = pyfits.getdata(thfp_fits_co)[:,1,:] / S_flat_co_simple_n
rv_fps = []
for order in range(nord_co):
I = np.where(np.isnan(lines_thar_co[order]))[0]
lines_thar_co[order][I]=0.
I = np.where(np.isnan(lines_thar_co_ref[order]))[0]
lines_thar_co_ref[order][I]=0.
try:
tc = GLOBALutils.fp_base(lines_thar_co[order])
tcr = GLOBALutils.fp_base(lines_thar_co_ref[order])
IJ1 = np.where(tc!=0)[0]
IJ2 = np.where(tcr!=0)[0]
tc /= np.median(tc[IJ1])
tcr /= np.median(tcr[IJ2])
rv_fp = GLOBALutils.ccf_fp(tc,tcr,wsol_dict['p1_co'],order+22,order0=89,nx=ncoef_x,nm=ncoef_m,npix=len(tc))
except:
rv_fp = -999
rv_fps.append(rv_fp)
#plot(rv_fps,'ro')
rv_fps = np.array(rv_fps)
I = np.where(rv_fps!=-999)[0]
rv_fps = rv_fps[I]
rv_fps = GLOBALutils.sig_cli2(rv_fps,ns=3.)
#plot(rv_fps,'ro')
#show()
#print np.median(rv_fps),np.sqrt(np.var(rv_fps))/np.sqrt(float(len(rv_fps)))
fp_shift = np.median(rv_fps)
p_sh = wsol_dict['p_shift'] * 299792458. * 1e-6
fp_shift += p_sh
p_shift = 1e6*fp_shift/299792458.
print '\t\t\tFP shift = ',fp_shift[0],'+-',np.sqrt(np.var(rv_fps))/np.sqrt(float(len(rv_fps))),'m/s'
good_quality = True
"""
equis = np.arange( data.shape[1] )
for order in range(n_useful):
m = order + 89
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=data.shape[1],nx=ncoef_x,nm=ncoef_m)
if good_quality:
WavSol = lbary_ltopo * (1.0 + 1.0e-6*p_shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1'],chebs,ncoef_x,ncoef_m)
else:
WavSol = lbary_ltopo * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1'],chebs,ncoef_x,ncoef_m)
spec[0,order,:] = WavSol
spec[1,order,:] = sci_S_ob[order,1, :]
spec[2,order,:] = sci_S_ob[order,2, :]
# Flat-fielded spectrum
fn = S_flat_ob_n[order,1,:]
L = np.where( fn > 0 )
spec[3,order,:][L] = sci_S_ob[order,1,:][L] / S_flat_ob_n[order,1,:][L]
spec[4,order,:][L] = sci_S_ob[order,2,:][L] * ( S_flat_ob_n[order,1,:][L] ** 2 )
# Continuum normalized spectrum
ron = h[0].header['HIERARCH ESO CORA CCD RON']
gain = h[0].header['HIERARCH ESO CORA CCD GAIN']
wav_temp, norm_spec = continuum.NORM2( spec[0,:,:],spec[3,:,:])
for order in range(n_useful):
L = np.where( spec[1,order,:] != 0 )
spec[5,order,:][L] = norm_spec[order][L]
nJ = np.where(np.isnan(spec[5,order])==True)[0]
nJ2 = np.where(np.isinf(spec[5,order])==True)[0]
spec[5,order,nJ] = 1.0
spec[5,order,nJ2] = 1.0
ratio = spec[3,order,:][L] / norm_spec[order][L]
spec[6,order,:][L] = spec[4,order,:][L] * (ratio ** 2 )
spec[7,order,:][L] = ratio
#spec[8,order,:][L] = ratio * S_flat_ob_n[order,1,:][L] / np.sqrt( ratio * S_flat_ob_n[order,1,:][L] / gain + ext_aperture*2*(ron/gain)**2 + sci_bac[order,:][L] / gain )
spec[8,order,:][L] = ratio * S_flat_ob_n[order,1,:][L] / np.sqrt( ratio * S_flat_ob_n[order,1,:][L] / gain + (ron/gain)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
# clean-up of CRs in continuum-normalized spectrum. Set troublesome pixels to 1
median_cn_spectrum = np.zeros( spec[5,order,:].shape )
median_cn_spectrum[L] = scipy.signal.medfilt( spec[5,order,:][L], 7 )
LK = np.where(spec[8,order] == 0.)[0]
spec[8,order,LK] = 0.000001
LL = np.where(spec[5,order] > 1. + 5./spec[8,order])
LL2 = np.where(spec[5,order] < - 5./spec[8,order])
spec[8,order,LK] = 0.
spec[5,order,:][LL] = 1
spec[5,order,:][LL2] = 1
spec[5,order,:][LK] = 0
spec[6,order,:][LL] = spec[8,order,:][LL] ** 2
spec[6,order,:][LL2] = spec[8,order,:][LL2] ** 2
spec[9,order,:][L] = spec[5,order,:][L] * (dlambda_dx[L] ** 1)
spec[10,order,:][L] = spec[6,order,:][L] / (dlambda_dx[L] ** 2)
if (not JustExtract):
if DoClass:
print '\t\tSpectral Analysis:'
# spectral analysis
# First, query SIMBAD with the object name
query_success = False
sp_type_query = 'None'
#query_success,sp_type_query = GLOBALutils.simbad_query_obname(obname)
# Now, query SIMBAD by coordinates if above not successful
#if (not query_success):
# query_success,sp_type_query = GLOBALutils.simbad_query_coords('12:00:00','00:00:00')
#print "\t\t\tSpectral type returned by SIMBAD query:",sp_type_query
hdu = GLOBALutils.update_header(hdu,'HIERARCH SIMBAD SPTYP', sp_type_query)
pars_file = dirout + fsim.split('/')[-1][:-8]+'_stellar_pars.txt'
if os.access(pars_file,os.F_OK) == False or force_stellar_pars:
print "\t\t\tEstimating atmospheric parameters:"
Rx = np.around(1./np.sqrt(1./40000.**2 - 1./60000**2))
spec2 = spec.copy()
for i in range(spec.shape[1]):
IJ = np.where(spec[5,i]!=0.)[0]
spec2[5,i,IJ] = GLOBALutils.convolve(spec[0,i,IJ],spec[5,i,IJ],Rx)
T_eff, logg, Z, vsini, vel0, ccf = correlation.CCF(spec2,model_path=models_path,npools=npools)
line = "%6d %4.1f %4.1f %8.1f %8.1f\n" % (T_eff,logg, Z, vsini, vel0)
f = open(pars_file,'w')
f.write(line)
f.close()
else:
print "\t\t\tAtmospheric parameters loaded from file:"
T_eff, logg, Z, vsini, vel0 = np.loadtxt(pars_file,unpack=True)
print "\t\t\t\tT_eff=",T_eff,"log(g)=",logg,"Z=",Z,"vsin(i)=",vsini,"vel0",vel0
else:
T_eff, logg, Z, vsini, vel0 = -999,-999,-999,-999,-999
T_eff_epoch = T_eff
logg_epoch = logg
Z_epoch = Z
vsini_epoch = vsini
vel0_epoch = vel0
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEFF', float(T_eff))
hdu = GLOBALutils.update_header(hdu,'HIERARCH LOGG', float(logg))
hdu = GLOBALutils.update_header(hdu,'HIERARCH Z', Z)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VSINI', vsini)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VEL0', vel0)
print "\t\tRadial Velocity analysis:"
# assign mask
sp_type, mask = GLOBALutils.get_mask_reffile(obname,reffile=reffile,base='../data/xc_masks/')
print "\t\t\tWill use",sp_type,"mask for CCF."
# Read in mask
ml, mh, weight = np.loadtxt(mask,unpack=True)
ml_v = GLOBALutils.ToVacuum( ml )
mh_v = GLOBALutils.ToVacuum( mh )
# make mask larger accounting for factor ~2 lower res in CORALIE w/r to HARPS
av_m = 0.5*( ml_v + mh_v )
ml_v -= (av_m - ml_v)
mh_v += (mh_v - av_m)
mask_hw_kms = (GLOBALutils.Constants.c/1e3) * 0.5*(mh_v - ml_v) / av_m
#sigma_fout = stellar_pars_dir + obname + '_' +'sigma.txt'
disp = GLOBALutils.get_disp(obname, reffile=reffile)
if disp == 0:
known_sigma = False
if vsini != -999 and vsini != 0.:
disp = vsini
else:
disp = 3.
else:
known_sigma = True
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
print '\t\t\tComputing the CCF...'
cond = True
while (cond):
# first rough correlation to find the minimum
vels, xc_full, sn, nlines_ccf, W_ccf = \
GLOBALutils.XCor(spec, ml_v, mh_v, weight, 0, lbary_ltopo, vel_width=300,vel_step=3,\
spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
# Normalize the continuum of the CCF robustly with lowess
yy = scipy.signal.medfilt(xc_av,11)
pred = lowess(yy, vels,frac=0.4,it=10,return_sorted=False)
tck1 = scipy.interpolate.splrep(vels,pred,k=1)
xc_av_orig = xc_av.copy()
xc_av /= pred
vel0_xc = vels[ np.argmin( xc_av ) ]
rvels, rxc_av, rpred, rxc_av_orig, rvel0_xc = vels.copy(), \
xc_av.copy(), pred.copy(), xc_av_orig.copy(), vel0_xc
xc_av_rough = xc_av
vels_rough = vels
if disp > 30:
disp = 30.
vel_width = np.maximum( 20.0, 6*disp )
vels, xc_full, sn, nlines_ccf, W_ccf =\
GLOBALutils.XCor(spec, ml_v, mh_v, weight, vel0_xc, lbary_ltopo, vel_width=vel_width,vel_step=0.1,\
spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
pred = scipy.interpolate.splev(vels,tck1)
xc_av /= pred
moonsep_cor = h[0].header['HIERARCH ESO OBS MOON SEP']
if sp_type == 'M5':
moon_sig = 2.5
elif sp_type == 'K5':
moon_sig = 3.3
else:
moon_sig = 4.5
p1,XCmodel,p1gau,XCmodelgau,Ls2 = GLOBALutils.XC_Final_Fit( vels, xc_av ,\
sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = False)
#ldc = CoralieUtils.get_ldc(T_eff, logg, Z, 1.0, ldfile = 'lin_coe_sloan2.dat')
#p1R, ROTmodel = CoralieUtils.XC_Final_Fit_Rot( vels, xc_av, ldc = ldc, vsini = vsini )
moonmatters = False
if (know_moon and here_moon):
moonmatters = True
ismoon = True
confused = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = GLOBALutils.XC_Final_Fit( vels, xc_av , sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = True)
moon_flag = 1
else:
confused = False
ismoon = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = p1,XCmodel,p1gau,XCmodelgau,Ls2
moon_flag = 0
SP = GLOBALutils.calc_bss2(vels,xc_av,p1gau)
#SP = bspan[0]
#print 'Bisector span:', SP
if (not known_sigma):
disp = np.floor(p1gau[2])
if (disp < 3.0):
disp = 3.0
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
known_sigma = True
else:
cond = False
xc_dict = {'vels':vels,'xc_av':xc_av,'XCmodelgau':XCmodelgau,'Ls2':Ls2,'refvel':refvel,\
'rvels':rvels,'rxc_av':rxc_av,'rpred':rpred,'rxc_av_orig':rxc_av_orig,\
'rvel0_xc':rvel0_xc,'xc_full':xc_full, 'p1':p1, 'sn':sn, 'p1gau':p1gau,\
'p1_m':p1_m,'XCmodel_m':XCmodel_m,'p1gau_m':p1gau_m,'Ls2_m':Ls2_m,\
'XCmodelgau_m':XCmodelgau_m}
moon_dict = {'moonmatters':moonmatters,'moon_state':moon_state,'moonsep':moonsep,\
'lunation':lunation,'mephem':mephem,'texp':h[0].header['EXPTIME']}
pkl_xc = dirout + fsim.split('/')[-1][:-8]+obname+'_XC_'+sp_type+'.pkl'
pickle.dump( xc_dict, open( pkl_xc, 'w' ) )
ccf_pdf = dirout + 'proc/' + fsim.split('/')[-1][:-4] + obname + '_XCs_' + sp_type + '.pdf'
if not avoid_plot:
GLOBALutils.plot_CCF(xc_dict,moon_dict,path=ccf_pdf)
SNR_5130 = np.median(spec[8,30,1000:1101] )
airmass = h[0].header['HIERARCH ESO OBS TARG AIRMASS']
seeing = h[0].header['HIERARCH ESO OBS AMBI DIMM SEEING']
if sp_type == 'G2':
if T_eff < 6000:
A = 0.06544
B = 0.00146
D = 0.24416
C = 0.00181
else:
A = 0.09821
B = 0.00014
D = 0.33491
C = 0.00113
elif sp_type == 'K5':
A = 0.05348
B = 0.00147
D = 0.20695
C = 0.00321
else:
A = 0.05348
B = 0.00147
D = 0.20695
C = 0.00321
RVerr = B + ( 1.6 + 0.2 * p1gau[2] ) * A / np.round(SNR_5130)
depth_fact = 1. + p1gau[0]/(p1gau[2]*np.sqrt(2*np.pi))
if depth_fact >= 1.:
RVerr2 = -999.000
else:
if sp_type == 'G2':
depth_fact = (1 - 0.62) / (1 - depth_fact)
else:
depth_fact = (1 - 0.59) / (1 - depth_fact)
RVerr2 = RVerr * depth_fact
if (RVerr2 <= 0.001):
RVerr2 = 0.001
if not good_quality:
RVerr2 = np.sqrt(0.03**2 + RVerr2**2)
BSerr = D / float(np.round(SNR_5130)) + C
RV = np.around(p1gau_m[1],4)
BS = np.around(SP,4)
RVerr2 = np.around(RVerr2,4)
BSerr = np.around(BSerr,4)
print '\t\t\tRV = '+str(RV)+' +- '+str(RVerr2)
print '\t\t\tBS = '+str(BS)+' +- '+str(BSerr)
bjd_out = 2400000.5 + mbjd
T_eff_err = 100
logg_err = 0.5
Z_err = 0.5
vsini_err = 2
XC_min = np.abs(np.around(np.min(XCmodel),2))
SNR_5130 = np.around(SNR_5130)
SNR_5130_R = np.around(SNR_5130*np.sqrt(2.9))
disp_epoch = np.around(p1gau_m[2],1)
hdu = GLOBALutils.update_header(hdu,'RV', RV)
hdu = GLOBALutils.update_header(hdu,'RV_E', RVerr2)
hdu = GLOBALutils.update_header(hdu,'BS', BS)
hdu = GLOBALutils.update_header(hdu,'BS_E', BSerr)
hdu = GLOBALutils.update_header(hdu,'DISP', disp_epoch)
hdu = GLOBALutils.update_header(hdu,'SNR', SNR_5130)
hdu = GLOBALutils.update_header(hdu,'SNR_R', SNR_5130_R)
hdu = GLOBALutils.update_header(hdu,'INST', 'CORALIE')
hdu = GLOBALutils.update_header(hdu,'RESOL', '60000')
hdu = GLOBALutils.update_header(hdu,'PIPELINE', 'CERES')
hdu = GLOBALutils.update_header(hdu,'XC_MIN', XC_min)
hdu = GLOBALutils.update_header(hdu,'BJD_OUT', bjd_out)
line_out = "%-15s %18.8f %9.4f %7.4f %9.3f %5.3f coralie ceres 60000 %6d %5.2f %5.2f %5.1f %4.2f %5.2f %6.1f %4d %s\n"%\
(obname, bjd_out, RV, RVerr2, BS, BSerr, T_eff_epoch, logg_epoch, Z_epoch, vsini_epoch, XC_min, disp_epoch,\
TEXP, SNR_5130_R, ccf_pdf)
f_res.write(line_out)
if (os.access( dirout + fout,os.F_OK)):
os.remove( dirout + fout)
hdu.writeto( dirout + fout )
else:
print "\t\tReading spectral file from", fout
spec = pyfits.getdata( fout )
f_res.close()
| mit | -7,693,740,892,673,060,000 | 44.228788 | 181 | 0.55308 | false | 2.856282 | false | false | false |
xiedidan/luna-data-pre-processing | segment.py | 1 | 1351 | # -*- coding:utf-8 -*-
import sys
sys.path.append("../luna-data-pre-processing")
import os
from glob import glob
from tqdm import tqdm
from multiprocessing import Pool
from NoduleSerializer import NoduleSerializer
import lung_segmentation
# create lung mask
class Segment(object):
# constructor
def __init__(self, dataPath, phase = "deploy"):
self.dataPath = dataPath
self.phase = phase
self.phaseSubPath = self.phase + "/"
#helper
def segmentSingleFile(self, file):
filename = os.path.basename(file)
serializer = NoduleSerializer(self.dataPath, self.phaseSubPath)
image = serializer.readFromNpy("resamples/", filename)
mask = lung_segmentation.segment_HU_scan_elias(image)
serializer.writeToNpy("mask/", filename, mask)
image = image * mask
serializer.writeToNpy("lung/", filename, image)
print("{0}".format(filename))
# self.progressBar.update(1)
# interface
def segmentAllFiles(self):
fileList = glob(os.path.join(self.dataPath, self.phaseSubPath, "resamples/*.npy"))
# self.progressBar = tqdm(total = len(fileList))
pool = Pool()
pool.map(self.segmentSingleFile, fileList)
if __name__ == "__main__":
seg = Segment("d:/project/tianchi/data/", "deploy")
seg.segmentAllFiles()
| gpl-3.0 | 1,400,560,257,404,363,800 | 26.571429 | 90 | 0.655811 | false | 3.661247 | false | false | false |
lgrahl/threema-msgapi-sdk-python | tests/test_cli.py | 1 | 9981 | import subprocess
import pytest
from threema.gateway import ReceptionCapability
from threema.gateway import __version__ as _version
from threema.gateway import feature_level
from threema.gateway.key import Key
class TestCLI:
@pytest.mark.asyncio
async def test_invalid_command(self, cli):
with pytest.raises(subprocess.CalledProcessError):
await cli('meow')
@pytest.mark.asyncio
async def test_get_version(self, cli):
output = await cli('version')
assert 'Version: {}'.format(_version) in output
assert 'Feature Level: {}'.format(feature_level) in output
@pytest.mark.asyncio
async def test_invalid_key(self, cli):
with pytest.raises(subprocess.CalledProcessError) as exc_info:
await cli('encrypt', 'meow', 'meow', input='meow')
assert 'Invalid key format' in exc_info.value.output
with pytest.raises(subprocess.CalledProcessError) as exc_info:
await cli(
'encrypt', pytest.msgapi.public, pytest.msgapi.private, input='meow')
assert 'Invalid key type' in exc_info.value.output
@pytest.mark.asyncio
async def test_encrypt_decrypt(self, cli):
input = '私はガラスを食べられます。それは私を傷つけません。'
output = await cli(
'encrypt', pytest.msgapi.private, pytest.msgapi.public, input=input)
nonce, data = output.splitlines()
output = await cli(
'decrypt', pytest.msgapi.private, pytest.msgapi.public, nonce, input=data)
assert input in output
@pytest.mark.asyncio
async def test_encrypt_decrypt_by_file(self, cli, private_key_file, public_key_file):
input = '私はガラスを食べられます。それは私を傷つけません。'
output = await cli(
'encrypt', private_key_file, public_key_file, input=input)
nonce, data = output.splitlines()
output = await cli(
'decrypt', private_key_file, public_key_file, nonce, input=data)
assert input in output
@pytest.mark.asyncio
async def test_generate(self, cli, tmpdir):
private_key_file = tmpdir.join('tmp_private_key')
public_key_file = tmpdir.join('tmp_public_key')
await cli('generate', str(private_key_file), str(public_key_file))
private_key = Key.decode(private_key_file.read().strip(), Key.Type.private)
public_key = Key.decode(public_key_file.read().strip(), Key.Type.public)
assert private_key
assert public_key
@pytest.mark.asyncio
async def test_hash_no_option(self, cli):
with pytest.raises(subprocess.CalledProcessError):
await cli('hash')
@pytest.mark.asyncio
async def test_hash_valid_email(self, cli):
hash_ = '1ea093239cc5f0e1b6ec81b866265b921f26dc4033025410063309f4d1a8ee2c'
output = await cli('hash', '-e', '[email protected]')
assert hash_ in output
output = await cli('hash', '--email', '[email protected]')
assert hash_ in output
@pytest.mark.asyncio
async def test_hash_valid_phone_number(self, cli):
hash_ = 'ad398f4d7ebe63c6550a486cc6e07f9baa09bd9d8b3d8cb9d9be106d35a7fdbc'
output = await cli('hash', '-p', '41791234567')
assert hash_ in output
output = await cli('hash', '--phone', '41791234567')
assert hash_ in output
@pytest.mark.asyncio
async def test_derive(self, cli):
output = await cli('derive', pytest.msgapi.private)
assert pytest.msgapi.public in output
@pytest.mark.asyncio
async def test_send_simple(self, cli):
id_, secret = pytest.msgapi.id, pytest.msgapi.secret
output = await cli('send_simple', 'ECHOECHO', id_, secret, input='Hello!')
assert output
@pytest.mark.asyncio
async def test_send_e2e(self, cli, server):
output_1 = await cli(
'send_e2e', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, input='Hello!')
assert output_1
output_2 = await cli(
'send_e2e', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, '-k', server.echoecho_encoded_key, input='Hello!')
assert output_2
assert output_1 == output_2
@pytest.mark.asyncio
async def test_send_image(self, cli, server):
server.latest_blob_ids = []
output_1 = await cli(
'send_image', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg)
assert output_1
assert len(server.latest_blob_ids) == 1
output_2 = await cli(
'send_image', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-k', server.echoecho_encoded_key)
assert output_2
assert output_1 == output_2
assert len(server.latest_blob_ids) == 2
@pytest.mark.asyncio
async def test_send_video(self, cli, server):
server.latest_blob_ids = []
output_1 = await cli(
'send_video', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_mp4, server.threema_jpg)
assert output_1
assert len(server.latest_blob_ids) == 2
output_2 = await cli(
'send_video', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_mp4, server.threema_jpg,
'-k', server.echoecho_encoded_key)
assert output_2
assert output_1 == output_2
assert len(server.latest_blob_ids) == 4
output = await cli(
'send_video', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_mp4, server.threema_jpg,
'-d', '1337')
assert output
assert len(server.latest_blob_ids) == 6
@pytest.mark.asyncio
async def test_send_file(self, cli, server):
server.latest_blob_ids = []
output_1 = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg)
assert output_1
assert len(server.latest_blob_ids) == 1
output_2 = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-k', server.echoecho_encoded_key)
assert output_2
assert output_1 == output_2
assert len(server.latest_blob_ids) == 2
output = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-t', server.threema_jpg)
assert output
assert len(server.latest_blob_ids) == 4
output = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-k', server.echoecho_encoded_key,
'-t', server.threema_jpg)
assert output
assert len(server.latest_blob_ids) == 6
@pytest.mark.asyncio
async def test_lookup_no_option(self, cli):
with pytest.raises(subprocess.CalledProcessError):
await cli('lookup', pytest.msgapi.id, pytest.msgapi.secret)
@pytest.mark.asyncio
async def test_lookup_id_by_email(self, cli):
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret,
'-e', '[email protected]')
assert 'ECHOECHO' in output
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret,
'--email', '[email protected]')
assert 'ECHOECHO' in output
@pytest.mark.asyncio
async def test_lookup_id_by_phone(self, cli):
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '-p', '44123456789')
assert 'ECHOECHO' in output
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '--phone', '44123456789')
assert 'ECHOECHO' in output
@pytest.mark.asyncio
async def test_lookup_pk_by_id(self, cli, server):
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '-i', 'ECHOECHO')
assert server.echoecho_encoded_key in output
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '--id', 'ECHOECHO')
assert server.echoecho_encoded_key in output
@pytest.mark.asyncio
async def test_capabilities(self, cli):
output = await cli(
'capabilities', pytest.msgapi.id, pytest.msgapi.secret, 'ECHOECHO')
capabilities = {
ReceptionCapability.text,
ReceptionCapability.image,
ReceptionCapability.video,
ReceptionCapability.file
}
assert all((capability.value in output for capability in capabilities))
@pytest.mark.asyncio
async def test_credits(self, cli):
output = await cli('credits', pytest.msgapi.id, pytest.msgapi.secret)
assert '100' in output
output = await cli(
'credits', pytest.msgapi.nocredit_id, pytest.msgapi.secret)
assert '0' in output
@pytest.mark.asyncio
async def test_invalid_id(self, cli):
with pytest.raises(subprocess.CalledProcessError) as exc_info:
await cli(
'credits', pytest.msgapi.noexist_id, pytest.msgapi.secret)
assert 'API identity or secret incorrect' in exc_info.value.output
@pytest.mark.asyncio
async def test_insufficient_credits(self, cli):
with pytest.raises(subprocess.CalledProcessError) as exc_info:
id_, secret = pytest.msgapi.nocredit_id, pytest.msgapi.secret
await cli('send_simple', 'ECHOECHO', id_, secret, input='!')
assert 'Insufficient credits' in exc_info.value.output
| mit | -2,318,817,066,822,805,000 | 40.868644 | 89 | 0.629289 | false | 3.531451 | true | false | false |
litedesk/litedesk-webserver-provision | src/authentication/views.py | 1 | 2430 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib import auth
from django.contrib.auth import login
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework import status
import permissions
import serializers
class SessionView(APIView):
def get_serializer(self, *args, **kw):
serializer_class = {
'POST': serializers.LoginSerializer
}.get(self.request.method, serializers.SessionSerializer)
kw['context'] = {
'request': self.request
}
return serializer_class(*args, **kw)
def get_object(self):
return self.request.session
def get(self, request, *args, **kw):
if not request.user.is_authenticated():
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = self.get_serializer(self.get_object())
return Response(serializer.data)
def post(self, request, *args, **kw):
serializer = self.get_serializer(data=request.DATA)
if serializer.is_valid():
login(request, serializer.object)
location_header = {'Location': reverse('session', request=request)}
return Response(status=status.HTTP_201_CREATED, headers=location_header)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, *args, **kw):
auth.logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
class SignupView(generics.CreateAPIView):
serializer_class = serializers.SignupSerializer
permission_classes = (permissions.UnauthenticatedUser,)
def post_save(self, user, **kw):
login(self.request, user)
| apache-2.0 | -473,156,678,292,000,500 | 32.75 | 84 | 0.695062 | false | 4.090909 | false | false | false |
wuqize/FluentPython | chapter1/vector.py | 1 | 1735 | #coding=utf-8
from math import hypot
class Vector:
"""自定义二维向量"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
"""
repr 就是通过 __repr__ 这个特殊方法来得到一个对象的字符串表示形式的。
__repr__ 和 __str__ 的区别在于,
后者是在 str() 函数被使用,
或是在用 print 函数打印一个对象的时候才被调用的,
并且它返回的字符串对终端用户更友好。
如果一个对象没有 __str__ 函数,
而 Python 又需要调用它的时候,
解释器会用 __repr__ 作为替代。
"""
return "Vector(%r, %r)"%(self.x, self.y)
def __abs__(self):
"""
中缀运算符的基本原则就是不改变操作对象,而是产出一个新的值。
"""
return hypot(self.x, self.y)
def __bool_(self):
"""
默认情况下,我们自己定义的类的实例总被认为是真的,
除非这个类对 __bool__ 或者 __len__ 函数有自己的实现。
bool(x) 的背后是调用 x.__bool__() 的结果;
如果不存在 __bool__ 方法,那么 bool(x) 会尝试调用 x.__len__()。
若返回 0,则 bool 会返回 False;否则返回 True。
"""
#return self.bool(abs(self))
#更高效
return bool(self.x or self.y)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Vector(x, y)
def __mul__(self, scalar):
return Vector(self.x * scalar, self.y * scalar)
| lgpl-3.0 | 8,450,293,533,702,580,000 | 22.811321 | 55 | 0.466297 | false | 1.916413 | false | false | false |
CaymanUnterborn/burnman | tests/test_solidsolution.py | 3 | 9813 | from __future__ import absolute_import
import unittest
import os
import sys
import warnings
sys.path.insert(1, os.path.abspath('..'))
import numpy as np
import burnman
from burnman.mineral import Mineral
from burnman.processchemistry import dictionarize_formula, formula_mass
from util import BurnManTest
class forsterite (Mineral):
def __init__(self):
formula = 'Mg2.0Si1.0O4.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'fo',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -2172590.0,
'S_0': 95.1,
'V_0': 4.366e-05,
'Cp': [233.3, 0.001494, -603800.0, -1869.7],
'a_0': 2.85e-05,
'K_0': 1.285e+11,
'Kprime_0': 3.84,
'Kdprime_0': -3e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
Mineral.__init__(self)
class fayalite (Mineral):
def __init__(self):
formula = 'Fe2.0Si1.0O4.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'fa',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -1477720.0,
'S_0': 151.0,
'V_0': 4.631e-05,
'Cp': [201.1, 0.01733, -1960600.0, -900.9],
'a_0': 2.82e-05,
'K_0': 1.256e+11,
'Kprime_0': 4.68,
'Kdprime_0': -3.7e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
Mineral.__init__(self)
# One-mineral solid solution
class forsterite_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Dummy solid solution'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg]2SiO4']]
self.energy_interaction = []
burnman.SolidSolution.__init__(self, molar_fractions)
# Two-mineral solid solution
class forsterite_forsterite_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Fo-Fo solid solution'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg]2SiO4'], [
forsterite(), '[Mg]2SiO4']]
self.energy_interaction = [[0.]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Ideal solid solution
class olivine_ideal_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Fo-Fo solid solution'
self.solution_type = 'ideal'
self.endmembers = [[
forsterite(), '[Mg]2SiO4'], [fayalite(), '[Fe]2SiO4']]
burnman.SolidSolution.__init__(self, molar_fractions)
# Olivine solid solution
class olivine_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Olivine'
self.solution_type = 'symmetric'
self.endmembers = [[
forsterite(), '[Mg]2SiO4'], [fayalite(), '[Fe]2SiO4']]
self.energy_interaction = [[8.4e3]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Orthopyroxene solid solution
class orthopyroxene(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
# Name
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg][Mg]Si2O6'], [
forsterite(), '[Mg1/2Al1/2][Mg1/2Al1/2]AlSiO6']]
self.energy_interaction = [[burnman.constants.gas_constant * 1.0e3]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Three-endmember, two site solid solution
class two_site_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'two_site_ss'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg]3[Al]2Si3O12'], [
forsterite(), '[Fe]3[Al]2Si3O12'], [forsterite(), '[Mg]3[Mg1/2Si1/2]2Si3O12']]
self.energy_interaction = [[10.0e3, 5.0e3], [-10.0e3]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Three-endmember, two site solid solution
class two_site_ss_subregular(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
# Name
self.name = 'two_site_ss (subregular symmetric)'
self.solution_type = 'subregular'
self.endmembers = [[forsterite(), '[Mg]3[Al]2Si3O12'], [
forsterite(), '[Fe]3[Al]2Si3O12'], [forsterite(), '[Mg]3[Mg1/2Si1/2]2Si3O12']]
# Interaction parameters
self.energy_interaction = [
[[10.e3, 10.e3], [5.e3, 5.e3]], [[-10.e3, -10.e3]]]
burnman.SolidSolution.__init__(self, molar_fractions)
class test_solidsolution(BurnManTest):
def setup_1min_ss(self):
P = 1.e5
T = 1000.
fo = forsterite()
fo.set_state(P, T)
fo_ss = forsterite_ss()
fo_ss.set_composition([1.0])
fo_ss.set_state(P, T)
return fo, fo_ss
def setup_2min_ss(self):
P = 1.e5
T = 1000.
fo = forsterite()
fo.set_state(P, T)
fo_fo_ss = forsterite_forsterite_ss()
fo_fo_ss.set_composition([0.3, 0.7])
fo_fo_ss.set_state(P, T)
return fo, fo_fo_ss
def setup_ol_ss(self):
P = 1.e5
T = 1000.
fo = forsterite()
fo.set_state(P, T)
ol_ss = olivine_ss()
ol_ss.set_composition([1.0, 0.0])
ol_ss.set_state(P, T)
return fo, ol_ss
def test_1_gibbs(self):
fo, fo_ss = self.setup_1min_ss()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
endmember_properties = [fo.gibbs, fo.H, fo.S, fo.V, fo.C_p,
fo.C_v, fo.alpha, fo.K_T, fo.K_S, fo.gr, fo.G]
ss_properties = [fo_ss.gibbs, fo_ss.H, fo_ss.S, fo_ss.V, fo_ss.C_p,
fo_ss.C_v, fo_ss.alpha, fo_ss.K_T, fo_ss.K_S, fo_ss.gr, fo_ss.G]
assert len(w) == 3 # we expect to trigger 3 shear modulus warnings
self.assertArraysAlmostEqual(endmember_properties, ss_properties)
def test_2_gibbs(self):
fo, fo_ss = self.setup_2min_ss()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
endmember_properties = [fo.gibbs, fo.H, fo.S, fo.V, fo.C_p,
fo.C_v, fo.alpha, fo.K_T, fo.K_S, fo.gr, fo.G]
ss_properties = [fo_ss.gibbs, fo_ss.H, fo_ss.S, fo_ss.V, fo_ss.C_p,
fo_ss.C_v, fo_ss.alpha, fo_ss.K_T, fo_ss.K_S, fo_ss.gr, fo_ss.G]
assert len(w) == 4 # we expect to trigger 4 shear modulus warnings
self.assertArraysAlmostEqual(endmember_properties, ss_properties)
def test_ol_gibbs(self):
fo, fo_ss = self.setup_ol_ss()
endmember_properties = [
fo.gibbs, fo.H, fo.S, fo.V, fo.C_p, fo.C_v, fo.alpha, fo.K_T, fo.K_S, fo.gr]
ss_properties = [fo_ss.gibbs, fo_ss.H, fo_ss.S, fo_ss.V,
fo_ss.C_p, fo_ss.C_v, fo_ss.alpha, fo_ss.K_T, fo_ss.K_S, fo_ss.gr]
self.assertArraysAlmostEqual(endmember_properties, ss_properties)
def test_ol_Wh(self):
ol_ss = olivine_ss()
H_excess = ol_ss.solution_model.excess_enthalpy(
1.e5, 1000., [0.5, 0.5]) # Hxs = Exs if Vxs=0
We = ol_ss.solution_model.We[0][1]
self.assertArraysAlmostEqual([We / 4.0], [H_excess])
def test_order_disorder(self):
opx = orthopyroxene()
opx.set_composition(np.array([0.0, 1.0]))
opx.set_state(1.e5, 300.)
self.assertArraysAlmostEqual([opx.excess_gibbs], [0.])
def test_site_totals(self):
ss = two_site_ss()
ss.set_composition([0.3, 0.3, 0.4])
ss.set_state(1.e5, 300.)
site_fractions = np.dot(
ss.molar_fractions, ss.solution_model.endmember_occupancies)
i = 0
site_fill = []
ones = [1.] * ss.solution_model.n_sites
for site in ss.solution_model.sites:
site_fill.append(sum(site_fractions[i:i + len(site)]))
i += len(site)
self.assertArraysAlmostEqual(site_fill, ones)
def test_set_method(self):
ss = olivine_ss()
ss.set_method('hp_tmt')
def test_molar_mass(self):
ss = olivine_ss()
ss.set_composition(np.array([0.5, 0.5]))
self.assertArraysAlmostEqual([ss.molar_mass], [0.5 *
forsterite().params['molar_mass'] + 0.5 * fayalite().params['molar_mass']])
def test_subregular(self):
ss0 = two_site_ss()
ss1 = two_site_ss_subregular()
ss0.set_composition([0.3, 0.3, 0.4])
ss0.set_state(1.e5, 300.)
ss1.set_composition([0.3, 0.3, 0.4])
ss1.set_state(1.e5, 300.)
self.assertArraysAlmostEqual(
ss0.excess_partial_gibbs, ss1.excess_partial_gibbs)
def test_activities_ideal(self):
ol = olivine_ideal_ss()
ol.set_composition(np.array([0.5, 0.5]))
ol.set_state(1.e5, 1000.)
self.assertArraysAlmostEqual(ol.activities, [0.25, 0.25])
def test_activity_coefficients_ideal(self):
ol = olivine_ideal_ss()
ol.set_composition(np.array([0.5, 0.5]))
ol.set_state(1.e5, 1000.)
self.assertArraysAlmostEqual(ol.activity_coefficients, [1., 1.])
def test_activity_coefficients_non_ideal(self):
opx = orthopyroxene()
opx.set_composition(np.array([0.0, 1.0]))
opx.set_state(1.e5, 1000.)
self.assertArraysAlmostEqual(
opx.activity_coefficients, [np.exp(1.), 1.])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -5,718,570,812,787,262,000 | 32.377551 | 112 | 0.558749 | false | 2.90928 | true | false | false |
blaze/dask | dask/bag/text.py | 1 | 5183 | import io
import os
from functools import partial
from tlz import concat
from ..bytes import open_files, read_bytes
from ..delayed import delayed
from ..utils import parse_bytes, system_encoding
from .core import from_delayed
delayed = delayed(pure=True)
def read_text(
urlpath,
blocksize=None,
compression="infer",
encoding=system_encoding,
errors="strict",
linedelimiter=os.linesep,
collection=True,
storage_options=None,
files_per_partition=None,
include_path=False,
):
"""Read lines from text files
Parameters
----------
urlpath : string or list
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
to read from alternative filesystems. To read from multiple files you
can pass a globstring or a list of paths, with the caveat that they
must all have the same protocol.
blocksize: None, int, or str
Size (in bytes) to cut up larger files. Streams by default.
Can be ``None`` for streaming, an integer number of bytes, or a string
like "128MiB"
compression: string
Compression format like 'gzip' or 'xz'. Defaults to 'infer'
encoding: string
errors: string
linedelimiter: string
collection: bool, optional
Return dask.bag if True, or list of delayed values if false
storage_options: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
files_per_partition: None or int
If set, group input files into partitions of the requested size,
instead of one partition per file. Mutually exclusive with blocksize.
include_path: bool
Whether or not to include the path in the bag.
If true, elements are tuples of (line, path).
Default is False.
Examples
--------
>>> b = read_text('myfiles.1.txt') # doctest: +SKIP
>>> b = read_text('myfiles.*.txt') # doctest: +SKIP
>>> b = read_text('myfiles.*.txt.gz') # doctest: +SKIP
>>> b = read_text('s3://bucket/myfiles.*.txt') # doctest: +SKIP
>>> b = read_text('s3://key:secret@bucket/myfiles.*.txt') # doctest: +SKIP
>>> b = read_text('hdfs://namenode.example.com/myfiles.*.txt') # doctest: +SKIP
Parallelize a large file by providing the number of uncompressed bytes to
load into each partition.
>>> b = read_text('largefile.txt', blocksize='10MB') # doctest: +SKIP
Get file paths of the bag by setting include_path=True
>>> b = read_text('myfiles.*.txt', include_path=True) # doctest: +SKIP
>>> b.take(1) # doctest: +SKIP
(('first line of the first file', '/home/dask/myfiles.0.txt'),)
Returns
-------
dask.bag.Bag or list
dask.bag.Bag if collection is True or list of Delayed lists otherwise.
See Also
--------
from_sequence: Build bag from Python sequence
"""
if blocksize is not None and files_per_partition is not None:
raise ValueError("Only one of blocksize or files_per_partition can be set")
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
files = open_files(
urlpath,
mode="rt",
encoding=encoding,
errors=errors,
compression=compression,
**(storage_options or {})
)
if blocksize is None:
if files_per_partition is None:
blocks = [
delayed(list)(delayed(partial(file_to_blocks, include_path))(fil))
for fil in files
]
else:
blocks = []
for start in range(0, len(files), files_per_partition):
block_files = files[start : (start + files_per_partition)]
block_lines = delayed(concat)(
delayed(map)(
partial(file_to_blocks, include_path),
block_files,
)
)
blocks.append(block_lines)
else:
o = read_bytes(
urlpath,
delimiter=linedelimiter.encode(),
blocksize=blocksize,
sample=False,
compression=compression,
include_path=include_path,
**(storage_options or {})
)
raw_blocks = o[1]
blocks = [delayed(decode)(b, encoding, errors) for b in concat(raw_blocks)]
if include_path:
paths = list(
concat([[path] * len(raw_blocks[i]) for i, path in enumerate(o[2])])
)
blocks = [
delayed(attach_path)(entry, path) for entry, path in zip(blocks, paths)
]
if not blocks:
raise ValueError("No files found", urlpath)
if collection:
blocks = from_delayed(blocks)
return blocks
def file_to_blocks(include_path, lazy_file):
with lazy_file as f:
for line in f:
yield (line, lazy_file.path) if include_path else line
def attach_path(block, path):
for p in block:
yield (p, path)
def decode(block, encoding, errors):
text = block.decode(encoding, errors)
lines = io.StringIO(text)
return list(lines)
| bsd-3-clause | -3,570,770,384,754,851,300 | 31.39375 | 87 | 0.599074 | false | 4.055556 | true | false | false |
akmetainfo/opencorpora | anaphora/export_groups.py | 2 | 7308 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
sys.path.append('/corpus/python')
from Annotation import AnnotationEditor
CONFIG_PATH = "/corpus/config.ini"
STR_NONE = 'NONE'
STR_ALL = 'ALL'
def choose_annotators(dbh, only_moderated):
moderators = {}
if only_moderated:
dbh.execute("""
SELECT book_id, old_syntax_moder_id
FROM books
WHERE syntax_on > 0
""")
res = dbh.fetchall()
for row in res:
moderators[row['book_id']] = row['old_syntax_moder_id']
dbh.execute("""
SELECT user_id, book_id
FROM anaphora_syntax_annotators
WHERE status = 2
ORDER BY book_id, user_id
""")
annotators = {}
for row in dbh.fetchall():
if row['book_id'] not in annotators:
if row['book_id'] in moderators:
annotators[row['book_id']] = moderators[row['book_id']]
elif not only_moderated:
annotators[row['book_id']] = row['user_id']
return annotators
def export_simple_groups(dbh, annotators):
groups = get_simple_groups(dbh, annotators, include_dummy=True)
for gid, group in sorted(groups.items()):
head_str = group['head']
if group['marks'] == 'bad':
continue
elif group['marks'] == 'no head':
head_str = STR_NONE
elif group['marks'] == 'all':
head_str = STR_ALL
print("{4}\t{0}\t{1}\t{2}\t{3}".format(
gid, ','.join(map(str, group['tokens'])), head_str, group['type'], group['book_id'])
)
def get_simple_groups(dbh, annotators, include_dummy=False):
groups = {}
q = """
SELECT group_id, group_type, user_id, head_id, book_id, token_id, marks
FROM anaphora_syntax_groups g
JOIN anaphora_syntax_groups_simple gs
USING (group_id)
LEFT JOIN tokens tf
ON (gs.token_id = tf.tf_id)
JOIN sentences USING (sent_id)
JOIN paragraphs USING (par_id)
"""
if not include_dummy:
q += " WHERE group_type != 16 "
q += " ORDER BY group_id, token_id"
dbh.execute(q)
for row in dbh.fetchall():
if row['book_id'] not in annotators or annotators[row['book_id']] != row['user_id']:
continue
if row['group_id'] in groups:
groups[row['group_id']]['tokens'].append(row['token_id'])
else:
groups[row['group_id']] = {
'head': row['head_id'],
'type': row['group_type'],
'tokens': [row['token_id']],
'marks': row['marks'],
'book_id': row['book_id'] # we expect they are all the same
}
return groups
def export_complex_groups(dbh, annotators):
print("COMPLEX")
groups = get_complex_groups(dbh, annotators)
for gid, group in sorted(groups.items()):
head_str = group['head']
if group['marks'] == 'bad':
continue
elif group['marks'] == 'no head':
head_str = STR_NONE
elif group['marks'] == 'all':
head_str = STR_ALL
print("{4}\t{0}\t{1}\t{2}\t{3}".format(
gid, ','.join(map(str, sorted(group['tokens']))), head_str, group['type'], group['book_id']
))
def get_complex_groups(dbh, annotators):
simple = get_simple_groups(dbh, annotators, include_dummy=True)
groups = {}
dbh.execute("""
SELECT parent_gid, child_gid, group_type, head_id, user_id, marks
FROM anaphora_syntax_groups_complex gc
LEFT JOIN anaphora_syntax_groups g ON (gc.parent_gid = g.group_id)
ORDER BY parent_gid, child_gid
""")
for row in dbh.fetchall():
if row['parent_gid'] not in groups:
groups[row['parent_gid']] = {
'head': row['head_id'],
'type': row['group_type'],
'children': [row['child_gid']],
'user_id' : row['user_id'],
'tokens': set(),
'book_id': 0,
'marks': row['marks']
}
else:
groups[row['parent_gid']]['children'].append(row['child_gid'])
# remove groups by other annotators
gids = groups.keys()
for gid in gids:
if not check_subgroups(gid, simple, groups):
del groups[gid]
# add list of tokens and book id
for gid in groups:
update_token_list(groups[gid], simple, groups)
assign_book_id(groups[gid], simple, groups)
# add head token id
for gid in groups:
groups[gid]['head'] = get_head_token_id(groups[gid]['head'], simple, groups)
return groups
def check_subgroups(gid, simple_groups, complex_groups):
if gid in complex_groups:
for child_id in complex_groups[gid]['children']:
if not check_subgroups(child_id, simple_groups, complex_groups):
return False
return True
elif gid in simple_groups:
return True
else:
return False
def assign_book_id(group, simple_groups, complex_groups):
if group['book_id']:
return
for child_gid in group['children']:
if child_gid in simple_groups:
group['book_id'] = simple_groups[child_gid]['book_id']
return
elif child_gid in complex_groups:
assign_book_id(complex_groups[child_gid], simple_groups, complex_groups)
group['book_id'] = complex_groups[child_gid]['book_id']
else:
raise KeyError("group #{0} not found".format(child_gid))
def update_token_list(group, simple_groups, complex_groups):
if len(group['tokens']) > 0:
return
for child_gid in group['children']:
if child_gid in simple_groups:
group['tokens'].update(simple_groups[child_gid]['tokens'])
elif child_gid in complex_groups:
update_token_list(complex_groups[child_gid], simple_groups, complex_groups)
group['tokens'].update(complex_groups[child_gid]['tokens'])
else:
raise KeyError("group #{0} not found".format(child_gid))
def get_head_token_id(old_id, simple_groups, complex_groups):
if old_id == 0:
return 0
elif old_id in complex_groups:
return get_head_token_id(complex_groups[old_id]['head'], simple_groups, complex_groups)
elif old_id in simple_groups:
return simple_groups[old_id]['head']
else:
return 0 # sometimes head groups get deleted
def do_export(dbh, gtype, only_moderated):
annotators = choose_annotators(dbh, only_moderated)
if gtype != 'complex':
export_simple_groups(dbh, annotators)
if gtype != 'simple':
export_complex_groups(dbh, annotators)
def main():
editor = AnnotationEditor(CONFIG_PATH)
only_moderated = False
if len(sys.argv) < 2 or sys.argv[1] not in ['simple', 'complex', 'both']:
sys.stderr.write("""Usage: {0} {{simple|complex|both}} [mod]\n\tmod: export only moderators' groups, otherwise first user's annotation for each text\n""".format(sys.argv[0]))
sys.exit(1)
if len(sys.argv) > 2 and sys.argv[2] == 'mod':
only_moderated = True
do_export(editor.db_cursor, sys.argv[1], only_moderated)
if __name__ == "__main__":
main()
| gpl-2.0 | 262,133,076,401,282,620 | 35.358209 | 182 | 0.568008 | false | 3.591155 | false | false | false |
hfutsuchao/Python2.6 | premierClick/premierCityCount.py | 1 | 1505 | #coding:utf-8
import GJDB
db = GJDB.GJDB()
db.rp()
db.selectDB('house_report')
#dts = []
dts = ['2013_01','2013_02','2013_03','2014_01','2014_02','2014_03']
result1 = open('result1.txt','w')
result2 = open('result2.txt','w')
for dt in dts:
print dt
sql1 = 'SELECT AccountCityId, FROM_UNIXTIME(ReportDate), SUM(ClickCount) ,SUM(HouseCount) FROM house_account_generalstat_report_' + dt + ' WHERE AccountCityId IN (0,100,401,400,800,801,1300,1400,1900,900,901,903,902,1000,1001,600,601,300,2800,500,402,2600,1800,1700,2500,2505,1200,1600,1501,1506,1502,1500,1507,1503,1513,200,2900,2200,2000,1100,2300,2100) AND CountType IN (1,3) GROUP BY AccountCityId, ReportDate;'
sql2 = 'SELECT AccountCityId, FROM_UNIXTIME(ReportDate), HouseType, SUM(ClickCount) ,SUM(HouseCount) FROM house_account_generalstat_report_' + dt + ' WHERE AccountCityId IN (0,100,401,400,800,801,1300,1400,1900,900,901,903,902,1000,1001,600,601,300,2800,500,402,2600,1800,1700,2500,2505,1200,1600,1501,1506,1502,1500,1507,1503,1513,200,2900,2200,2000,1100,2300,2100) AND CountType IN (1,3) AND housetype IN (1,3,5) GROUP BY AccountCityId,ReportDate,HouseType;'
data1 = db.selectData(sql1)
data2 = db.selectData(sql2)
for row in data1:
result1.write(str(row[0]) + '\t' + str(row[1]) + '\t' + str(row[2]) + '\t' + str(row[3]) + '\n')
for row in data2:
result2.write(str(row[0]) + '\t' + str(row[1]) + '\t' + str(row[2]) + '\t' + str(row[3]) + '\t' + str(row[4]) + '\n') | gpl-2.0 | 7,618,124,902,688,000,000 | 66.5 | 465 | 0.665116 | false | 2.396497 | false | false | false |
CristianCantoro/wikipedia-tags-in-osm | wikipedia_coords_downloader.py | 2 | 9374 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Simone F. <[email protected]>
#
# This file is part of wikipedia-tags-in-osm.
# wikipedia-tags-in-osm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# wikipedia-tags-in-osm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wikipedia-tags-in-osm. If not, see <http://www.gnu.org/licenses/>.
"""Download coordinates of Wikipedia articles from MediaWiki API
https://www.mediawiki.org/wiki/Extension:GeoData#prop.3Dcoordinates
"""
import os
import urllib
import urllib2
import json
class CoordsDownloader:
def __init__(self, user_agent, coords_file, answer_file, wikipedia_lang,
titles):
self.user_agent = user_agent
self.coords_file = coords_file
self.wikipedia_lang = wikipedia_lang
self.titles = sorted(titles)
self.titles_coords, titles_coords_num = self.read_previous_coords()
titles_to_check = [
t for t in self.titles if t not in self.titles_coords]
# with open("titlestodownload", "w") as f:
# f.write("\n".join([t.encode("utf-8") for t in titles_to_check]))
print "Titles:", len(self.titles)
print "checked in the past: {0}, with coordinates {1}".format(
len(self.titles_coords), titles_coords_num)
if len(titles_to_check) == 0:
print ("The coordinates of all the articles have already been "
"downloaded.")
return
print "to be checked:", len(titles_to_check)
# Query Wikpedia for coordinates
self.query_wikipedia(titles_to_check, answer_file)
# Save updated titles' coordinates
self.save_titles_coords()
def read_previous_coords(self):
"""Read the titles whose coordinates were downloaded in the past."""
titles_coords = {}
titles_coords_num = 0
if os.path.isfile(self.coords_file):
lines = [line.rstrip('\n').split("\t")
for line in open(self.coords_file)]
for line in lines:
title = line[0].decode("utf-8").replace(" ", "_")
if len(line) == 1:
line.append("")
line.append("")
lat = line[1]
lon = line[2]
titles_coords[title] = []
if (lat, lon) != ("", ""):
titles_coords_num += 1
titles_coords[title] = [lat, lon]
return titles_coords, titles_coords_num
def query_wikipedia(self, titles_to_check, answer_file):
"""Query Wikipedia API for coordinates."""
# Create titles_strings with 50 titles each to query Wikipedia API
titles_strings = []
for fifty_titles in [titles_to_check[i:i + 50] for i in range(
0, len(titles_to_check), 50)]:
titles_string = "|".join(fifty_titles)
titles_strings.append(titles_string)
print "{0} queries of 50 titles each will be necessay".format(
len(titles_strings))
# Download
print "\n- Download coordinates from Wikipedia"
for i, titles_string in enumerate(titles_strings):
continue_string = ""
cocontinue_string = ""
print "\nrequest: {0}/{1}".format(i + 1, len(titles_strings))
# Debugging
# answer = raw_input("\n Download 50 titles' coordinates "
# "from Wikipedia?\n [y/N]")
answer = "y"
if answer.lower() != "y":
print " Download stopped."
break
while True:
wikipedia_answer = self.download_coordinates(answer_file,
titles_string,
continue_string,
cocontinue_string)
if not wikipedia_answer:
break
# Parsing
continue_string, cocontinue_string = self.parse_answer(
answer_file)
if (continue_string, cocontinue_string) == ("", ""):
break
else:
print "continue", continue_string, cocontinue_string
if not wikipedia_answer:
break
def download_coordinates(self, answer_file, titles_string, continue_string,
cocontinue_string):
"""Query Wikipedia API for articles' coordinates
"""
titles = urllib.quote_plus(
titles_string.replace("_", " ").encode("utf-8"))
url = ('http://{0}.wikipedia.org/w/api.php?action=query'
'&format=json'
'&titles={1}'
'&prop=coordinates'
'&coprimary=primary'
'&maxlag=5'
'&continue='.format(self.wikipedia_lang, titles))
if continue_string != "":
url += '{0}&cocontinue={1}'.format(
urllib.quote_plus(continue_string),
urllib.quote_plus(cocontinue_string))
request = urllib2.Request(url, None, {'User-Agent': self.user_agent})
try:
wikipedia_answer = urllib2.urlopen(request)
except:
print ("\n* a problem occurred during download:\n"
"{0}, {1}, {2}\ncontinue...".format(
titles_string.encode("utf-8"),
continue_string.encode("utf-8"),
cocontinue_string.encode("utf-8")))
return False
else:
with open(answer_file, "w") as f:
f.write(wikipedia_answer.read())
return True
def parse_answer(self, answer_file):
"""Read coordinates from Wikipedia API answer."""
with open(answer_file, "r") as f:
data = json.load(f)
for page in data["query"]["pages"].values():
title = page["title"].replace(" ", "_")
if title not in self.titles_coords:
self.titles_coords[title] = ["", ""]
if "coordinates" in page:
for coords in page["coordinates"]:
self.titles_coords[title] = [coords["lat"], coords["lon"]]
print "{0}/{1} {2} {3}".format(len(self.titles_coords),
len(self.titles),
title.encode("utf-8"),
self.titles_coords[title])
if "continue" in data:
return (data["continue"]["continue"],
data["continue"]["cocontinue"])
else:
return ("", "")
def save_titles_coords(self):
"""Save the updated list of articles with coordinates."""
with open(self.coords_file, "w") as f:
for i, (title, coordinates) in enumerate(
self.titles_coords.iteritems()):
if len(coordinates) == 2:
lat, lon = coordinates
else:
lat, lon = "", ""
f.write("{0}\t{1}\t{2}".format(title.encode("utf-8"),
lat,
lon))
if i < len(self.titles_coords) - 1:
f.write("\n")
if __name__ == "__main__":
user_agent = "Some coordinates download test"
coords_file = "articles_coords_test.csv"
titles = ["Archivio Storico Capitolino",
"Biblioteca Universitaria Alessandrina",
"Biblioteca Vallicelliana",
"Biblioteca apostolica vaticana",
"Biblioteca centrale della Facoltà di Architettura",
"Biblioteca del Ministero degli Affari Esteri",
"Biblioteca dell'Accademia Nazionale dei Lincei e Corsiniana",
"Biblioteca dell'Istituto dell'Enciclopedia Italiana",
"Biblioteca di papa Agapito I",
"Biblioteca di storia moderna e contemporanea",
"Biblioteca e museo teatrale del Burcardo",
"Biblioteca comunale Augusto Tersenghi",
"Biblioteca Civica Centrale",
"Biblioteca Nazionale del Club Alpino Italiano",
"Biblioteca Reale",
"Biblioteca capitolare (Vercelli)",
"Biblioteca civica Italo Calvino",
"Biblioteca civica Luigi Carluccio",
"Biblioteca internazionale di cinema e fotografia Mario Gromo",
"Biblioteca della Libera Università di Bolzano"]
CoordsDownloader(user_agent,
coords_file,
"answer.json",
"it",
[t.decode("utf-8") for t in titles])
print "\nDone."
| gpl-3.0 | -9,216,762,979,593,107,000 | 41.026906 | 81 | 0.525928 | false | 4.187668 | false | false | false |
andycavatorta/oratio | Roles/avl-input-1/main.py | 1 | 5600 | """
inputs:
4 cap sensors on I2C
3 rotary encoders on SPI
output topics:
pitch_key_event - integer from 0 to 47
voice_key_1_position - float from 0.0 to 1.0
voice_key_2_position - float from 0.0 to 1.0
voice_key_3_position - float from 0.0 to 1.0
"""
import Adafruit_MPR121.MPR121 as MPR121
import importlib
import json
import os
import Queue
import random
import settings
import sys
import threading
import time
from thirtybirds_2_0.Network.manager import init as network_init
from thirtybirds_2_0.Network.email_simple import init as email_init
from thirtybirds_2_0.Adaptors.Sensors import AMT203
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
UPPER_PATH = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
DEVICES_PATH = "%s/Hosts/" % (BASE_PATH )
THIRTYBIRDS_PATH = "%s/thirtybirds" % (UPPER_PATH )
sys.path.append(BASE_PATH)
sys.path.append(UPPER_PATH)
class Main(threading.Thread):
def __init__(self, hostname):
threading.Thread.__init__(self)
self.hostname = hostname
self.queue = Queue.Queue()
def add_to_queue(self, topic, msg):
self.queue.put([topic, msg])
def run(self):
while True:
topic_msg = self.queue.get(True)
network.send(topic_msg[0], topic_msg[1])
class MPR121Array(threading.Thread):
def __init__(self, i2c_address):
threading.Thread.__init__(self)
position_raw = 0
self.i2c_address = i2c_address
self.capsensors = []
self.last_touched = [0,0,0,0]
for sensor_id in range(4):
self.capsensors.append(MPR121.MPR121())
if not self.capsensors[sensor_id].begin(self.i2c_address[sensor_id]):
print('Error initializing MPR121 @{}'.format(self.i2c_address[sensor_id]))
print repr(self.capsensors[sensor_id])
print "class CapSensorArray instantiated with values", self.i2c_address
def run(self):
print "class CapSensorArray thread started"
for sensor_id in range(4):
self.last_touched[sensor_id] = self.capsensors[sensor_id].touched()
global_position = 1
while True:
for sensor_id in range(4):
current_touched = self.capsensors[sensor_id].touched()
for i in range(12):
pin_bit = 1 << i
if current_touched & pin_bit and not self.last_touched[sensor_id] & pin_bit:
print('{0} touched!'.format(i))
global_position = i + (12 * sensor_id)
if not current_touched & pin_bit and self.last_touched[sensor_id] & pin_bit:
print('{0} released!'.format(i))
self.last_touched[sensor_id] = current_touched
if global_position > 1:
time.sleep(0.01)
main.add_to_queue("pitch_key_event", global_position)
class Key(threading.Thread):
def __init__(self, name, bus, deviceId):
threading.Thread.__init__(self)
self.name = name
self.bus = bus
self.deviceId = deviceId
print "creating amt203 object"
self.encoder = AMT203.AMT203(bus, deviceId)
print "setting zero ", self.bus, self.deviceId
self.encoder.set_zero()
print "after zero ", self.bus, self.deviceId
print "class Key instantiated with values", name, bus, deviceId
self.encoder_min = 0.0
self.encoder_max = 120.0
self.last_pos = 0.0
def run(self):
print "class Key thread started"
while True:
pos = self.encoder.get_position()
if self.last_pos != pos:
mapped_pos = self.map_key(self.name, pos)
main.add_to_queue(self.name, mapped_pos)
self.last_pos = pos
time.sleep(0.01)
def map_key(self, name, value):
value = value if value <= 1000 else 0
value = value if value <= self.encoder_max else self.encoder_max
value = value if value >= self.encoder_min else self.encoder_min
mapped_value = 0.8*(((value - self.encoder_min))/(self.encoder_max - self.encoder_min))
return mapped_value
def network_status_handler(msg):
print "network_status_handler", msg
def network_message_handler(msg):
print "network_message_handler", msg
topic = msg[0]
#host, sensor, data = yaml.safe_load(msg[1])
if topic == "__heartbeat__":
print "heartbeat received", msg
network = None # makin' it global
main = None
def init(HOSTNAME):
global network
global main
network = network_init(
hostname=HOSTNAME,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
network.subscribe_to_topic("system") # subscribe to all system messages
#network.subscribe_to_topic("sensor_data")
main = Main(HOSTNAME)
main.daemon = True
main.start()
#mpr121array = MPR121Array([0x5a, 0x5b, 0x5c, 0x5d])
#mpr121array.start()
key_0 = Key("voice_key_2_position",0,0)
key_1 = Key("voice_key_1_position",0,1)
#key_2 = Key("voice_key_3_position",1,1)
key_0.daemon = True
key_0.start()
time.sleep(5)
key_1.daemon = True
key_1.start()
time.sleep(5)
#key_2.start()
| mit | 574,097,868,440,619,140 | 33.567901 | 96 | 0.615714 | false | 3.478261 | false | false | false |
zstanecic/NVDARemote | addon/globalPlugins/remoteClient/local_machine.py | 1 | 2755 | import os
import wx
from . import input
import api
import nvwave
import tones
import speech
import ctypes
import braille
import inputCore
import versionInfo
import logging
logger = logging.getLogger('local_machine')
def setSpeechCancelledToFalse():
"""
This function updates the state of speech so that it is aware that future
speech should not be cancelled. In the long term this is a fragile solution
as NVDA does not support modifying the internal state of speech.
"""
if versionInfo.version_year >= 2021:
# workaround as beenCanceled is readonly as of NVDA#12395
speech.speech._speechState.beenCanceled = False
else:
speech.beenCanceled = False
class LocalMachine:
def __init__(self):
self.is_muted = False
self.receiving_braille=False
def play_wave(self, fileName):
"""Instructed by remote machine to play a wave file."""
if self.is_muted:
return
if os.path.exists(fileName):
# ignore async / asynchronous from kwargs:
# playWaveFile should play asynchronously from NVDA remote.
nvwave.playWaveFile(fileName=fileName, asynchronous=True)
def beep(self, hz, length, left, right, **kwargs):
if self.is_muted:
return
tones.beep(hz, length, left, right)
def cancel_speech(self, **kwargs):
if self.is_muted:
return
wx.CallAfter(speech._manager.cancel)
def speak(
self,
sequence,
priority=speech.priorities.Spri.NORMAL,
**kwargs
):
if self.is_muted:
return
setSpeechCancelledToFalse()
wx.CallAfter(speech._manager.speak, sequence, priority)
def display(self, cells, **kwargs):
if self.receiving_braille and braille.handler.displaySize > 0 and len(cells) <= braille.handler.displaySize:
# We use braille.handler._writeCells since this respects thread safe displays and automatically falls back to noBraille if desired
cells = cells + [0] * (braille.handler.displaySize - len(cells))
wx.CallAfter(braille.handler._writeCells, cells)
def braille_input(self, **kwargs):
try:
inputCore.manager.executeGesture(input.BrailleInputGesture(**kwargs))
except inputCore.NoInputGestureAction:
pass
def set_braille_display_size(self, sizes, **kwargs):
sizes.append(braille.handler.display.numCells)
try:
size=min(i for i in sizes if i>0)
except ValueError:
size = braille.handler.display.numCells
braille.handler.displaySize = size
braille.handler.enabled = bool(size)
def send_key(self, vk_code=None, extended=None, pressed=None, **kwargs):
wx.CallAfter(input.send_key, vk_code, None, extended, pressed)
def set_clipboard_text(self, text, **kwargs):
api.copyToClip(text=text)
def send_SAS(self, **kwargs):
ctypes.windll.sas.SendSAS(0)
| gpl-2.0 | 4,050,571,339,939,729,000 | 27.308511 | 133 | 0.716152 | false | 3.233568 | false | false | false |
HiTechIronMan/openfda | openfda/parallel/outputs.py | 1 | 3765 | import cPickle
import multiprocessing
import os
import logging
import subprocess
import types
import leveldb
import simplejson as json
logger = logging.getLogger('mapreduce')
class MROutput(object):
suffix = 'db'
class Writer(object):
def __init__(self, filename, **kw):
self.filename = filename
def put(self, key, value):
assert False, "Don't use this class directly: use an output like LevelDBOutput"
def flush(self):
pass
def __init__(self, **kw):
self.writer_args = kw
def create_writer(self, prefix, shard_idx, num_shards):
assert prefix, 'No output prefix specified for output'
assert shard_idx < num_shards, 'Invalid shard index (%d > %d)' % (shard_idx, num_shards)
os.system('mkdir -p "%s"' % prefix)
return self.__class__.Writer(
prefix + '/shard-%05d-of-%05d.%s' % (shard_idx, num_shards, self.suffix),
**self.writer_args)
def recommended_shards(self):
return multiprocessing.cpu_count()
def finalize(self, tmp_dir, final_dir):
logger.info('Moving results from %s -> %s', tmp_dir, final_dir)
subprocess.check_output('mv "%s" "%s"' % (tmp_dir, final_dir), shell=True)
class LevelDBOutput(MROutput):
class Writer(MROutput.Writer):
def __init__(self, filename):
self.db = leveldb.LevelDB(filename)
self._last_key = None
def put(self, key, value):
assert isinstance(key, str)
assert key != self._last_key, (
'Duplicate keys (%s) passed to LevelDBOutput.'
'This output does not support multiple keys!' % key
)
self.db.Put(key, cPickle.dumps(value, -1))
class JSONOutput(MROutput):
suffix = 'json'
class Writer(MROutput.Writer):
def __init__(self, filename, **kw):
self.db = {}
self.json_args = kw
self.filename = filename
def put(self, key, value):
assert isinstance(key, str)
self.db[key] = value
def flush(self):
with open(self.filename, 'w') as out_file:
json.dump(self.db, out_file, **self.json_args)
def create_writer(self, prefix, shard_idx, num_shards):
assert num_shards == 1, 'JSONOutput only works with a single output shard!'
return MROutput.create_writer(self, prefix, shard_idx, num_shards)
def recommended_shards(self):
return 1
def finalize(self, tmp_dir, final_dir):
'''
Move the output JSON file to the final location.
There should only be one file -- this will fail if the user specified multiple shards!
'''
import glob
files = glob.glob('%s/*.json' % tmp_dir)
assert len(files) == 1, 'JSONOutput expected one temporary file, got: %s' % files
logger.info('Moving temporary file: %s to final destination: %s', files[0], final_dir)
subprocess.check_output('mv "%s" "%s"' % (files[0], final_dir), shell=True)
class JSONLineOutput(MROutput):
'''
Writes values as JSON, with one value per line.
The result is a single file.
'''
suffix = 'jsonline'
def finalize(self, tmp_dir, final_dir):
os.system('ls "%s"' % tmp_dir)
subprocess.check_output('cat %s/*.jsonline > "%s"' % (tmp_dir, final_dir), shell=True)
class Writer(MROutput.Writer):
def __init__(self, filename):
MROutput.Writer.__init__(self, filename)
self.output_file = open(filename, 'w')
def put(self, key, value):
self.output_file.write(json.dumps(value))
self.output_file.write('\n')
def flush(self):
logger.info('Flushing: %s', self.filename)
self.output_file.close()
class NullOutput(MROutput):
'''
Ignores all outputs and produces no output files.
'''
def finalize(self, tmp_dir, final_dir):
os.system('rm -rf "%s"' % tmp_dir)
class Writer(MROutput.Writer):
def put(self, key, value):
pass
| cc0-1.0 | 6,950,219,569,277,280,000 | 27.522727 | 92 | 0.643559 | false | 3.410326 | false | false | false |
peterayeni/rapidsms | rapidsms/backends/vumi/tests.py | 1 | 9379 | import json
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from rapidsms.backends.vumi.outgoing import VumiBackend
from rapidsms.backends.vumi.forms import VumiForm
from rapidsms.tests.harness import RapidTest, CreateDataMixin
class VumiFormTest(TestCase):
def setUp(self):
self.valid_data = {
"transport_name": "transport",
"in_reply_to": None,
"group": None,
"from_addr": "127.0.0.1:38634",
"message_type": "user_message",
"helper_metadata": {},
"to_addr": "0.0.0.0:8005",
"content": "ping",
"message_version": "20110921",
"transport_type": "telnet",
"timestamp": "2012-07-06 14:08:20.845715",
"transport_metadata": {},
"session_event": "resume",
"message_id": "56047985ceec40da908ca064f2fd59d3"
}
def test_valid_form(self):
"""Form should be valid if GET keys match configuration."""
form = VumiForm(self.valid_data, backend_name='vumi-backend')
self.assertTrue(form.is_valid())
def test_invalid_form(self):
"""Form is invalid if POST keys don't match configuration."""
data = {'invalid-phone': '1112223333', 'invalid-message': 'hi there'}
form = VumiForm(data, backend_name='vumi-backend')
self.assertFalse(form.is_valid())
def test_get_incoming_data(self):
"""get_incoming_data should return matching text and connection."""
form = VumiForm(self.valid_data, backend_name='vumi-backend')
form.is_valid()
incoming_data = form.get_incoming_data()
self.assertEqual(self.valid_data['content'], incoming_data['text'])
self.assertEqual(self.valid_data['from_addr'],
incoming_data['connection'].identity)
self.assertEqual('vumi-backend',
incoming_data['connection'].backend.name)
class VumiViewTest(RapidTest):
urls = 'rapidsms.backends.vumi.urls'
disable_phases = True
def setUp(self):
self.valid_data = {
"transport_name": "transport",
"in_reply_to": None,
"group": None,
"from_addr": "127.0.0.1:38634",
"message_type": "user_message",
"helper_metadata": {},
"to_addr": "0.0.0.0:8005",
"content": "ping",
"message_version": "20110921",
"transport_type": "telnet",
"timestamp": "2012-07-06 14:08:20.845715",
"transport_metadata": {},
"session_event": "resume",
"message_id": "56047985ceec40da908ca064f2fd59d3"
}
def test_valid_response_post(self):
"""HTTP 200 should return if data is valid."""
response = self.client.post(reverse('vumi-backend'),
json.dumps(self.valid_data),
content_type='text/json')
self.assertEqual(response.status_code, 200)
def test_invalid_response(self):
"""HTTP 400 should return if data is invalid."""
data = {'invalid-phone': '1112223333', 'message': 'hi there'}
response = self.client.post(reverse('vumi-backend'), json.dumps(data),
content_type='text/json')
self.assertEqual(response.status_code, 400)
def test_invalid_json(self):
"""HTTP 400 should return if JSON is invalid."""
data = "{bad json, , lala}"
response = self.client.post(reverse('vumi-backend'), data,
content_type='text/json')
self.assertEqual(response.status_code, 400)
def test_valid_post_message(self):
"""Valid POSTs should pass message object to router."""
self.client.post(reverse('vumi-backend'), json.dumps(self.valid_data),
content_type='text/json')
message = self.inbound[0]
self.assertEqual(self.valid_data['content'], message.text)
self.assertEqual(self.valid_data['from_addr'],
message.connection.identity)
self.assertEqual('vumi-backend',
message.connection.backend.name)
def test_blank_message_is_valid(self):
"""Blank messages should be considered valid."""
empty = self.valid_data.copy()
empty.update({'content': ''})
null = self.valid_data.copy()
null.update({'content': None})
no_content = self.valid_data.copy()
del no_content['content']
for blank_msg in [empty, null, no_content]:
self.client.post(reverse('vumi-backend'), json.dumps(blank_msg),
content_type='text/json')
message = self.inbound[0]
self.assertEqual('', message.text)
class VumiSendTest(CreateDataMixin, TestCase):
def test_required_fields(self):
"""Vumi backend requires Gateway URL and credentials."""
self.assertRaises(TypeError, VumiBackend, None, "vumi")
def test_outgoing_keys(self):
"""Vumi requires JSON to include to_addr and content."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
self.assertEqual(kwargs['url'], config['sendsms_url'])
data = json.loads(kwargs['data'])
self.assertEqual(data['to_addr'], [message.connections[0].identity])
self.assertEqual(data['content'], message.text)
def test_response_external_id(self):
"""Make sure external_id context is sent to Vumi."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity],
{'external_id': 'ASDF1234'})
data = json.loads(kwargs['data'])
self.assertEqual("ASDF1234", data['in_reply_to'])
def test_bulk_response_external_id(self):
"""Only single messages should include in_response_to."""
conn1 = self.create_connection()
conn2 = self.create_connection()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request("1234", "foo",
[conn1.identity, conn2.identity],
{'external_id': 'ASDF1234'})
data = json.loads(kwargs['data'])
self.assertTrue('in_reply_to' not in data)
def test_message_id_in_metadata(self):
"""Make sure our uuid is sent to Vumi."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
data = json.loads(kwargs['data'])
self.assertIn(message.id, data.get('metadata', {}).values())
def test_from_addr_and_endpoint_in_payload(self):
"""Make sure that we include from_addr or endpoint if provided, but only those keys"""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
context = {'from_addr': '5551212',
'endpoint': '12345',
'other': 'not included'}
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], context)
data = json.loads(kwargs['data'])
self.assertEqual(context['from_addr'], data['from_addr'])
self.assertEqual(context['endpoint'], data['endpoint'])
self.assertNotIn('other', data)
def test_send(self):
"""Test successful send."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
with patch('rapidsms.backends.vumi.outgoing.requests.post') as mock_post:
backend.send(message.id, message.text,
[message.connections[0].identity], {})
mock_post.assert_called_once_with(**kwargs)
def test_auth(self):
"""Vumi backend shold use basic authentication if given user/pass."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com",
"sendsms_user": "username",
"sendsms_pass": "password"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
self.assertTrue('auth' in kwargs)
| bsd-3-clause | -7,346,952,051,807,796,000 | 43.240566 | 94 | 0.572449 | false | 3.954047 | true | false | false |
jfining/mincss | run.py | 1 | 2337 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
# make sure it's running the mincss here and not anything installed
sys.path.insert(0, os.path.dirname(__file__))
from mincss.processor import Processor
def run(args):
options = {'debug': args.verbose}
if args.phantomjs_path:
options['phantomjs'] = args.phantomjs_path
elif args.phantomjs:
options['phantomjs'] = True
p = Processor(**options)
t0 = time.time()
p.process(args.url)
t1 = time.time()
print("TOTAL TIME ", t1 - t0)
for inline in p.inlines:
print("ON", inline.url)
print("AT line", inline.line)
print("BEFORE ".ljust(79, '-'))
print(inline.before)
print("AFTER ".ljust(79, '-'))
print(inline.after)
print()
output_dir = args.outputdir
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for link in p.links:
print("FOR", link.href)
#print("BEFORE ".ljust(79, '-'))
#print(link.before)
#print("AFTER ".ljust(79, '-'))
#print(link.after)
orig_name = link.href.split('/')[-1]
with open(os.path.join(output_dir, orig_name), 'w') as f:
f.write(link.after)
before_name = 'before_' + link.href.split('/')[-1]
with open(os.path.join(output_dir, before_name), 'w') as f:
f.write(link.before.encode('utf-8'))
print("Files written to", output_dir)
print()
print(
'(from %d to %d saves %d)' %
(len(link.before), len(link.after),
len(link.before) - len(link.after))
)
return 0
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
add = parser.add_argument
add("url", type=str,
help="URL to process")
add("--outputdir", action="store",
default="./output",
help="directory where to put output (default ./output)")
add("-v", "--verbose", action="store_true",
help="increase output verbosity")
add("--phantomjs", action="store_true",
help="Use PhantomJS to download the source")
add("--phantomjs-path", action="store",
default="",
help="Where is the phantomjs executable")
args = parser.parse_args()
sys.exit(run(args))
| bsd-3-clause | 4,892,649,772,427,431,000 | 29.350649 | 67 | 0.575524 | false | 3.567939 | false | false | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/bugs/browser/bugcomment.py | 1 | 12906 | # Copyright 2006-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Bug comment browser view classes."""
__metaclass__ = type
__all__ = [
'BugComment',
'BugCommentBoxExpandedReplyView',
'BugCommentBoxView',
'BugCommentBreadcrumb',
'BugCommentView',
'BugCommentXHTMLRepresentation',
'build_comments_from_chunks',
'group_comments_with_activity',
]
from datetime import timedelta
from itertools import (
chain,
groupby,
)
from operator import itemgetter
from lazr.delegates import delegates
from lazr.restful.interfaces import IWebServiceClientRequest
from zope.component import (
adapts,
getMultiAdapter,
getUtility,
)
from zope.interface import (
implements,
Interface,
)
from zope.security.proxy import removeSecurityProxy
from lp.bugs.interfaces.bugattachment import BugAttachmentType
from lp.bugs.interfaces.bugmessage import IBugComment
from lp.services.comments.browser.comment import download_body
from lp.services.comments.browser.messagecomment import MessageComment
from lp.services.config import config
from lp.services.librarian.browser import ProxiedLibraryFileAlias
from lp.services.messages.interfaces.message import IMessage
from lp.services.propertycache import (
cachedproperty,
get_property_cache,
)
from lp.services.webapp import (
canonical_url,
LaunchpadView,
)
from lp.services.webapp.breadcrumb import Breadcrumb
from lp.services.webapp.interfaces import ILaunchBag
COMMENT_ACTIVITY_GROUPING_WINDOW = timedelta(minutes=5)
def build_comments_from_chunks(
bugtask, truncate=False, slice_info=None, show_spam_controls=False,
user=None, hide_first=False):
"""Build BugComments from MessageChunks.
:param truncate: Perform truncation of large messages.
:param slice_info: If not None, an iterable of slices to retrieve.
"""
chunks = bugtask.bug.getMessagesForView(slice_info=slice_info)
# This would be better as part of indexed_messages eager loading.
comments = {}
for bugmessage, message, chunk in chunks:
cache = get_property_cache(message)
if getattr(cache, 'chunks', None) is None:
cache.chunks = []
cache.chunks.append(removeSecurityProxy(chunk))
bug_comment = comments.get(message.id)
if bug_comment is None:
if bugmessage.index == 0 and hide_first:
display = 'hide'
elif truncate:
display = 'truncate'
else:
display = 'full'
bug_comment = BugComment(
bugmessage.index, message, bugtask,
show_spam_controls=show_spam_controls, user=user,
display=display)
comments[message.id] = bug_comment
# This code path is currently only used from a BugTask view which
# has already loaded all the bug watches. If we start lazy loading
# those, or not needing them we will need to batch lookup watches
# here.
if bugmessage.bugwatchID is not None:
bug_comment.bugwatch = bugmessage.bugwatch
bug_comment.synchronized = (
bugmessage.remote_comment_id is not None)
return comments
def group_comments_with_activity(comments, activities):
"""Group comments and activity together for human consumption.
Generates a stream of comment instances (with the activity grouped within)
or `list`s of grouped activities.
:param comments: An iterable of `BugComment` instances, which should be
sorted by index already.
:param activities: An iterable of `BugActivity` instances.
"""
window = COMMENT_ACTIVITY_GROUPING_WINDOW
comment_kind = "comment"
if comments:
max_index = comments[-1].index + 1
else:
max_index = 0
comments = (
(comment.datecreated, comment.index,
comment.owner, comment_kind, comment)
for comment in comments)
activity_kind = "activity"
activity = (
(activity.datechanged, max_index,
activity.person, activity_kind, activity)
for activity in activities)
# when an action and a comment happen at the same time, the action comes
# second, when two events are tied the comment index is used to
# disambiguate.
events = sorted(chain(comments, activity), key=itemgetter(0, 1, 2))
def gen_event_windows(events):
"""Generate event windows.
Yields `(window_index, kind, event)` tuples, where `window_index` is
an integer, and is incremented each time the windowing conditions are
triggered.
:param events: An iterable of `(date, ignored, actor, kind, event)`
tuples in order.
"""
window_comment, window_actor = None, None
window_index, window_end = 0, None
for date, _, actor, kind, event in events:
window_ended = (
# A window may contain only one comment.
(window_comment is not None and kind is comment_kind) or
# All events must have happened within a given timeframe.
(window_end is None or date >= window_end) or
# All events within the window must belong to the same actor.
(window_actor is None or actor != window_actor))
if window_ended:
window_comment, window_actor = None, actor
window_index, window_end = window_index + 1, date + window
if kind is comment_kind:
window_comment = event
yield window_index, kind, event
event_windows = gen_event_windows(events)
event_windows_grouper = groupby(event_windows, itemgetter(0))
for window_index, window_group in event_windows_grouper:
window_group = [
(kind, event) for (index, kind, event) in window_group]
for kind, event in window_group:
if kind is comment_kind:
window_comment = event
window_comment.activity.extend(
event for (kind, event) in window_group
if kind is activity_kind)
yield window_comment
# There's only one comment per window.
break
else:
yield [event for (kind, event) in window_group]
class BugComment(MessageComment):
"""Data structure that holds all data pertaining to a bug comment.
It keeps track of which index it has in the bug comment list and
also provides functionality to truncate the comment.
Note that although this class is called BugComment it really takes
as an argument a bugtask. The reason for this is to allow
canonical_url()s of BugComments to take you to the correct
(task-specific) location.
"""
implements(IBugComment)
delegates(IMessage, '_message')
def __init__(
self, index, message, bugtask, activity=None,
show_spam_controls=False, user=None, display='full'):
if display == 'truncate':
comment_limit = config.malone.max_comment_size
else:
comment_limit = None
super(BugComment, self).__init__(comment_limit)
self.index = index
self.bugtask = bugtask
self.bugwatch = None
self._message = message
self.display_title = False
self.patches = []
if activity is None:
activity = []
self.activity = activity
self.synchronized = False
# We use a feature flag to control users deleting their own comments.
user_owns_comment = user is not None and user == self.owner
self.show_spam_controls = show_spam_controls or user_owns_comment
self.hide_text = (display == 'hide')
@cachedproperty
def bugattachments(self):
return [attachment for attachment in self._message.bugattachments if
attachment.type != BugAttachmentType.PATCH]
@property
def show_for_admin(self):
"""Show hidden comments for Launchpad admins.
This is used in templates to add a class to hidden
comments to enable display for admins, so the admin
can see the comment even after it is hidden. Since comments
aren't published unless the user is registry or admin, this
can just check if the comment is visible.
"""
return not self.visible
@cachedproperty
def text_for_display(self):
if self.hide_text:
return ''
else:
return super(BugComment, self).text_for_display
def isIdenticalTo(self, other):
"""Compare this BugComment to another and return True if they are
identical.
"""
if self.owner != other.owner:
return False
if self.text_for_display != other.text_for_display:
return False
if self.title != other.title:
return False
if (self.bugattachments or self.patches or other.bugattachments or
other.patches):
# We shouldn't collapse comments which have attachments;
# there's really no possible identity in that case.
return False
return True
def isEmpty(self):
"""Return True if text_for_display is empty."""
return (len(self.text_for_display) == 0 and
len(self.bugattachments) == 0 and len(self.patches) == 0)
@property
def add_comment_url(self):
return canonical_url(self.bugtask, view_name='+addcomment')
@property
def download_url(self):
return canonical_url(self, view_name='+download')
@property
def show_footer(self):
"""Return True if the footer should be shown for this comment."""
return bool(
len(self.activity) > 0 or
self.bugwatch or
self.show_spam_controls)
class BugCommentView(LaunchpadView):
"""View for a single bug comment."""
def __init__(self, context, request):
# We use the current bug task as the context in order to get the
# menu and portlets working.
bugtask = getUtility(ILaunchBag).bugtask
LaunchpadView.__init__(self, bugtask, request)
self.comment = context
def __call__(self):
"""View redirects to +download if comment is too long to render."""
if self.comment.too_long_to_render:
return self.request.response.redirect(self.comment.download_url)
return super(BugCommentView, self).__call__()
def download(self):
return download_body(self.comment, self.request)
@property
def show_spam_controls(self):
return self.comment.show_spam_controls
def page_title(self):
return 'Comment %d for bug %d' % (
self.comment.index, self.context.bug.id)
@property
def page_description(self):
return self.comment.text_contents
@property
def privacy_notice_classes(self):
if not self.context.bug.private:
return 'hidden'
else:
return ''
class BugCommentBoxViewMixin:
"""A class which provides proxied Librarian URLs for bug attachments."""
@property
def show_spam_controls(self):
if hasattr(self.context, 'show_spam_controls'):
return self.context.show_spam_controls
elif (hasattr(self, 'comment') and
hasattr(self.comment, 'show_spam_controls')):
return self.comment.show_spam_controls
else:
return False
def proxiedUrlOfLibraryFileAlias(self, attachment):
"""Return the proxied URL for the Librarian file of the attachment."""
return ProxiedLibraryFileAlias(
attachment.libraryfile, attachment).http_url
class BugCommentBoxView(LaunchpadView, BugCommentBoxViewMixin):
"""Render a comment box with reply field collapsed."""
expand_reply_box = False
class BugCommentBoxExpandedReplyView(LaunchpadView, BugCommentBoxViewMixin):
"""Render a comment box with reply field expanded."""
expand_reply_box = True
class BugCommentXHTMLRepresentation:
adapts(IBugComment, IWebServiceClientRequest)
implements(Interface)
def __init__(self, comment, request):
self.comment = comment
self.request = request
def __call__(self):
"""Render `BugComment` as XHTML using the webservice."""
comment_view = getMultiAdapter(
(self.comment, self.request), name="+box")
return comment_view()
class BugCommentBreadcrumb(Breadcrumb):
"""Breadcrumb for an `IBugComment`."""
def __init__(self, context):
super(BugCommentBreadcrumb, self).__init__(context)
@property
def text(self):
return "Comment #%d" % self.context.index
| agpl-3.0 | -239,312,751,276,880,060 | 33.233422 | 78 | 0.641872 | false | 4.325067 | false | false | false |
phlax/pootle | pootle/apps/pootle_score/migrations/0002_set_user_scores.py | 6 | 3384 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-02 07:04
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import functions, Case, IntegerField, Sum, When
class TranslationActionCodes(object):
NEW = 0 # 'TA' unit translated
EDITED = 1 # 'TE' unit edited after someone else
EDITED_OWN = 2 # 'TX' unit edited after themselves
DELETED = 3 # 'TD' translation deleted by admin
REVIEWED = 4 # 'R' translation reviewed
MARKED_FUZZY = 5 # 'TF' translation’s fuzzy flag is set by admin
EDIT_PENALTY = 6 # 'XE' translation penalty [when translation deleted]
REVIEW_PENALTY = 7 # 'XR' translation penalty [when review canceled]
SUGG_ADDED = 8 # 'S' suggestion added
# 'SA' suggestion accepted (counted towards the suggestion author)
SUGG_ACCEPTED = 9
# 'SR' suggestion rejected (counted towards the suggestion author)
SUGG_REJECTED = 10
# 'RA' suggestion accepted (counted towards the reviewer)
SUGG_REVIEWED_ACCEPTED = 11
# 'RR' suggestion rejected (counted towards the reviewer)
SUGG_REVIEWED_REJECTED = 12
def set_user_scores(apps, schema_editor):
from accounts.models import User
UserTPScore = apps.get_model("pootle_score.UserTPScore")
scorelogs = apps.get_model("pootle_statistics.ScoreLog").objects.exclude(
user__username__in=User.objects.META_USERS)
scorelogs = scorelogs.annotate(
day=functions.TruncDay("creation_time")).values(
"day", "user", "submission__translation_project").annotate(
score=Sum("score_delta"),
suggested=Sum(
Case(
When(
action_code=TranslationActionCodes.SUGG_ADDED,
then='wordcount'),
default=0,
output_field=IntegerField())),
translated=Sum(
Case(
When(
translated_wordcount__isnull=False,
then='translated_wordcount'),
default=0,
output_field=IntegerField())),
reviewed=Sum(
Case(
When(
action_code__in=[
TranslationActionCodes.SUGG_REVIEWED_ACCEPTED,
TranslationActionCodes.REVIEWED,
TranslationActionCodes.EDITED],
translated_wordcount__isnull=True,
then='wordcount'),
default=0,
output_field=IntegerField())))
UserTPScore.objects.bulk_create(
UserTPScore(
date=score["day"],
user_id=score["user"],
tp_id=score["submission__translation_project"],
score=score["score"],
reviewed=score["reviewed"],
suggested=score["suggested"],
translated=score["translated"])
for score in scorelogs.iterator())
class Migration(migrations.Migration):
dependencies = [
('pootle_score', '0001_initial'),
('pootle_statistics', '0005_index_ordering')
]
operations = [
migrations.RunPython(set_user_scores),
]
| gpl-3.0 | -4,285,922,003,002,522,000 | 38.788235 | 78 | 0.55825 | false | 4.380829 | false | false | false |
arbenson/tensor-sc | scripts/dir3cycles.py | 2 | 1281 | import sys
import numpy as np
import snap
# Enumerate all of the directed 3 cycles in a graph.
symmetrize = True
def EnumerateDir3Cycles(G, name):
''' Count all directed 3 cycles in the graph.
Returns an array of dictionaries counts such that
counts[w][v] is the number of 3-cycles involving the edge w -> v '''
total = 0
with open(name, 'w') as output:
for u_ in G.Nodes():
u = u_.GetId()
for v in u_.GetOutEdges():
for w in u_.GetInEdges():
if (u == v or v == w or w == u):
continue
if not G.IsEdge(v, w):
continue
# w -> u -> v -> w
output.write('%d %d %d\n' % (w, v, u))
total += 1
if symmetrize:
output.write('%d %d %d\n' % (v, w, u))
total += 1
print 'number of dir. 3 cycles (no reciprocated edges): ', total
def main():
name = sys.argv[1]
out_name = name.split('.txt')[0] + '-d3c.txt'
G = snap.LoadEdgeList(snap.PNGraph, name, 0, 1)
EnumerateDir3Cycles(G, out_name)
if __name__ == '__main__':
main()
| bsd-2-clause | -7,635,492,846,843,102,000 | 29.5 | 72 | 0.460578 | false | 3.767647 | false | false | false |
prrvchr/USBTerminal | USB/InitGui.py | 1 | 3558 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Pierre Vacher <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
""" Gui workbench initialization """
from __future__ import unicode_literals
class UsbWorkbench(Workbench):
"USB workbench object"
Icon = b"""
/* XPM */
static const char * const usbwb_xpm[]={
"16 16 3 1",
". c None",
"# c #FFFFFF",
"$ c #000000",
"................",
".......$$#......",
"......$$$$#.#...",
"....#..$$#.$$#..",
"...$$#.$$#$$$$#.",
"..$$$$#$$#.$$#..",
"...$$#.$$#.$$#..",
"...$$#.$$#.$$#..",
"...$$#.$$#$$#...",
"...$$#.$$$##....",
"....$$#$$#......",
"......$$$#......",
".......$$##.....",
".....$$$$$$#....",
".....$$$$$$#....",
"................"};
"""
MenuText = "USB"
ToolTip = "Python USB workbench"
def Initialize(self):
from PySide import QtCore
from Gui import Script
from App import DocumentObserver, UsbPool, UsbCommand, TinyG2
Script.initIcons()
commands = [b"Usb_Pool", b"Usb_Refresh", b"Usb_Open", b"Usb_Start", b"Usb_Pause"]
# Add commands to menu and toolbar
self.appendToolbar("Commands for Usb", commands)
self.appendMenu([b"USB"], commands)
App.addDocumentObserver(DocumentObserver.DocumentObserver())
Log('Loading USB workbench... done\n')
def GetClassName(self):
return "Gui::PythonWorkbench"
def Activated(self):
from Gui import PySerialPanel, UsbPoolPanel, TinyG2Panel
Gui.Control.addTaskWatcher([PySerialPanel.TaskWatcher(),
UsbPoolPanel.TaskWatcher(),
TinyG2Panel.TaskWatcher()])
Log("USB workbench activated\n")
def Deactivated(self):
Gui.Control.clearTaskWatcher()
Log("USB workbench deactivated\n")
Gui.addWorkbench(UsbWorkbench())
| gpl-2.0 | 1,791,434,675,139,694,800 | 41.357143 | 89 | 0.426082 | false | 4.481108 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.