repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
idle-code/ContextShell | tests/unit/fakes.py | 1 | 1444 | from collections import OrderedDict
from typing import Dict
from contextshell.action import Action, ActionArgsPack, ActionExecutor
from contextshell.path import NodePath
class FakeTree:
def __init__(self):
self.node_map : Dict[NodePath, FakeTree] = dict()
def get(self, path):
return self.node_map[path]
def contains(self, path):
return path in self.node_map
class FakeAction(Action):
def __init__(self, name='action'):
super().__init__(NodePath(name))
self.called = False
self.received_tree = None
self.received_target = None
self.received_action = None
self.received_arguments = None
self.return_value = None
def invoke(self, target: NodePath, action: NodePath, arguments: ActionArgsPack):
self.called = True
self.received_target = target
self.received_action = action
self.received_arguments = arguments
return self.return_value
class FakeActionExecutor(ActionExecutor):
def __init__(self):
self.execute_target = None
self.execute_action = None
self.execute_args = None
self.execute_return = None
def execute(self, target: NodePath, action: NodePath, args: ActionArgsPack = None):
self.execute_target = target
self.execute_action = action
self.execute_args = args if args else OrderedDict()
return self.execute_return
| mit | -7,868,696,964,391,235,000 | 29.083333 | 87 | 0.657895 | false |
marnnie/Cable-buenaventura | plugin.video.genesis/resources/lib/indexers/movies.py | 1 | 52852 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,re,json,urllib,urlparse,base64,datetime
try: action = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))['action']
except: action = None
from resources.lib.libraries import trakt
from resources.lib.libraries import control
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib.libraries import metacache
from resources.lib.libraries import favourites
from resources.lib.libraries import workers
from resources.lib.libraries import views
class movies:
def __init__(self):
self.list = []
self.tmdb_link = 'http://api.themoviedb.org'
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.imdb_link = 'http://www.imdb.com'
self.tmdb_key = base64.urlsafe_b64decode('NTc5ODNlMzFmYjQzNWRmNGRmNzdhZmI4NTQ3NDBlYTk=')
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.month_date = (self.datetime - datetime.timedelta(days = 30)).strftime('%Y-%m-%d')
self.month2_date = (self.datetime - datetime.timedelta(days = 60)).strftime('%Y-%m-%d')
self.year_date = (self.datetime - datetime.timedelta(days = 365)).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt_user')
self.imdb_user = control.setting('imdb_user').replace('ur', '')
self.info_lang = control.setting('infoLang') or 'en'
self.tmdb_info_link = 'http://api.themoviedb.org/3/movie/%s?api_key=%s&language=%s&append_to_response=credits,releases' % ('%s', self.tmdb_key, self.info_lang)
self.imdb_by_query = 'http://www.omdbapi.com/?t=%s&y=%s'
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.persons_link = 'http://api.themoviedb.org/3/search/person?api_key=%s&query=%s&include_adult=false&page=1' % (self.tmdb_key, '%s')
self.personlist_link = 'http://api.themoviedb.org/3/person/popular?api_key=%s&page=%s' % (self.tmdb_key, '%s')
self.genres_link = 'http://api.themoviedb.org/3/genre/movie/list?api_key=%s&language=%s' % (self.tmdb_key, self.info_lang)
self.certifications_link = 'http://api.themoviedb.org/3/certification/movie/list?api_key=%s' % self.tmdb_key
self.search_link = 'http://api.themoviedb.org/3/search/movie?api_key=%s&query=%s'
self.popular_link = 'http://api.themoviedb.org/3/movie/popular?api_key=%s&page=1'
self.views_link = 'http://api.themoviedb.org/3/movie/top_rated?api_key=%s&page=1'
self.featured_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&primary_release_date.gte=%s&primary_release_date.lte=%s&page=1' % ('%s', self.year_date, self.month2_date)
self.person_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&with_people=%s&primary_release_date.lte=%s&sort_by=primary_release_date.desc&page=1' % ('%s', '%s', self.today_date)
self.genre_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&with_genres=%s&primary_release_date.gte=%s&primary_release_date.lte=%s&page=1' % ('%s', '%s', self.year_date, self.today_date)
self.certification_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&certification=%s&certification_country=US&primary_release_date.lte=%s&page=1' % ('%s', '%s', self.today_date)
self.year_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&year=%s&primary_release_date.lte=%s&page=1' % ('%s', '%s', self.today_date)
self.theaters_link = 'http://api.themoviedb.org/3/movie/now_playing?api_key=%s&page=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&sort=boxoffice_gross_us,desc&count=20&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&groups=oscar_best_picture_winners&sort=year,desc&count=20&start=1'
self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?limit=20&page=1'
self.scn_link = 'http://predb.me'
self.scn_page = 'http://predb.me/?search=720p+%s+tag:-foreign&cats=movies-hd&page=%s'
self.added_link = 'http://predb.me?start=1'
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/%s/lists' % self.trakt_user
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items' % (self.trakt_user, '%s')
self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/%s/collection/movies' % self.trakt_user
self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/%s/watchlist/movies' % self.trakt_user
self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=20'
self.traktratings_link = 'https://api-v2launch.trakt.tv/users/%s/ratings/movies' % self.trakt_user
self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist' % self.imdb_user
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.tmdb_link:
self.list = cache.get(self.tmdb_list, 24, url)
self.worker()
elif u in self.trakt_link and '/users/' in url:
self.list = cache.get(self.trakt_list, 0, url)
self.list = sorted(self.list, key=lambda k: k['title'])
if idx == True: self.worker()
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url)
if idx == True: self.worker()
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 0, url, idx)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker()
elif u in self.scn_link:
self.list = cache.get(self.scn_list, 24, url)
if idx == True: self.worker()
if idx == True: self.movieDirectory(self.list)
return self.list
except:
pass
def widget(self):
setting = control.setting('movie_widget')
if setting == '2':
self.get(self.featured_link)
elif setting == '3':
self.get(self.trending_link)
else:
self.get(self.added_link)
def favourites(self):
try:
items = favourites.getFavourites('movies')
self.list = [i[1] for i in items]
for i in self.list:
if not 'name' in i: i['name'] = '%s (%s)' % (i['title'], i['year'])
try: i['title'] = i['title'].encode('utf-8')
except: pass
try: i['name'] = i['name'].encode('utf-8')
except: pass
if not 'duration' in i: i['duration'] = '0'
if not 'imdb' in i: i['imdb'] = '0'
if not 'tmdb' in i: i['tmdb'] = '0'
if not 'tvdb' in i: i['tvdb'] = '0'
if not 'tvrage' in i: i['tvrage'] = '0'
if not 'poster' in i: i['poster'] = '0'
if not 'banner' in i: i['banner'] = '0'
if not 'fanart' in i: i['fanart'] = '0'
self.worker()
self.list = sorted(self.list, key=lambda k: k['title'])
self.movieDirectory(self.list)
except:
return
def search(self, query=None):
try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.search_link % ('%s', urllib.quote_plus(self.query))
self.list = cache.get(self.tmdb_list, 0, url)
self.worker()
self.movieDirectory(self.list)
return self.list
except:
return
def person(self, query=None):
try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.persons_link % urllib.quote_plus(self.query)
self.list = cache.get(self.tmdb_person_list, 0, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def genres(self):
try:
url = self.genres_link
url = re.sub('language=(fi|hr|no)', '', url)
self.list = cache.get(self.tmdb_genre_list, 24, url)
for i in range(0, len(self.list)): self.list[i].update({'image': 'movieGenres.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def certifications(self):
try:
url = self.certifications_link
self.list = cache.get(self.tmdb_certification_list, 24, url)
for i in range(0, len(self.list)): self.list[i].update({'image': 'movieCertificates.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % ('%s', str(i)), 'image': 'movieYears.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self):
personlists = []
for i in range(1, 5):
try:
self.list = []
personlists += cache.get(self.tmdb_person_list, 24, self.personlist_link % str(i))
except:
pass
self.list = personlists
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentials() == False: raise Exception()
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'movieUserlists.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def tmdb_list(self, url):
try:
result = client.request(url % self.tmdb_key)
result = json.loads(result)
items = result['results']
except:
return
try:
next = str(result['page'])
total = str(result['total_pages'])
if next == total: raise Exception()
if not 'page=' in url: raise Exception()
next = '%s&page=%s' % (url.split('&page=', 1)[0], str(int(next)+1))
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['release_date']
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
tmdb = item['id']
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
poster = item['poster_path']
if poster == '' or poster == None: raise Exception()
else: poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart == None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
premiered = item['release_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
rating = str(item['vote_average'])
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
plot = item['overview']
if plot == '' or plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': '0', 'duration': '0', 'rating': rating, 'votes': votes, 'mpaa': '0', 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'name': name, 'code': '0', 'imdb': '0', 'tmdb': tmdb, 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': fanart, 'next': next})
except:
pass
return self.list
def tmdb_person_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['results']
except:
return
for item in items:
try:
name = item['name']
name = name.encode('utf-8')
url = self.person_link % ('%s', item['id'])
url = url.encode('utf-8')
image = '%s%s' % (self.tmdb_image, item['profile_path'])
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def tmdb_genre_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['genres']
except:
return
for item in items:
try:
name = item['name']
name = name.encode('utf-8')
url = self.genre_link % ('%s', item['id'])
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url})
except:
pass
return self.list
def tmdb_certification_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['certifications']['US']
except:
return
for item in items:
try:
name = item['certification']
name = name.encode('utf-8')
url = self.certification_link % ('%s', item['certification'])
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url})
except:
pass
return self.list
def trakt_list(self, url):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
p = str(int(q['page']) + 1)
if p == '5': raise Exception()
q.update({'page': p})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
tmdb = item['ids']['tmdb']
if tmdb == None or tmdb == '': tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = '0'
try: poster = item['images']['poster']['medium']
except: pass
if poster == None or not '/posters/' in poster: poster = '0'
poster = poster.rsplit('?', 1)[0]
poster = poster.encode('utf-8')
banner = poster
try: banner = item['images']['banner']['full']
except: pass
if banner == None or not '/banners/' in banner: banner = '0'
banner = banner.rsplit('?', 1)[0]
banner = banner.encode('utf-8')
fanart = '0'
try: fanart = item['images']['fanart']['full']
except: pass
if fanart == None or not '/fanarts/' in fanart: fanart = '0'
fanart = fanart.rsplit('?', 1)[0]
fanart = fanart.encode('utf-8')
premiered = item['released']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
genre = item['genres']
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
mpaa = item['certification']
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['overview']
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': banner, 'fanart': fanart, 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = self.traktlist_link % item['ids']['slug']
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
return self.list
def imdb_list(self, url, idx=True):
try:
if url == self.imdbwatchlist_link:
def imdb_watchlist_id(url):
return re.compile('/export[?]list_id=(ls\d*)').findall(client.request(url))[0]
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
result = str(client.request(url))
try:
if idx == True: raise Exception()
pages = client.parseDOM(result, 'div', attrs = {'class': 'desc'})[0]
pages = re.compile('Page \d+? of (\d*)').findall(pages)[0]
for i in range(1, int(pages)):
u = url.replace('&start=1', '&start=%s' % str(i*100+1))
result += str(client.request(u))
except:
pass
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+?'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'span', attrs = {'class': 'pagination'})
next += client.parseDOM(result, 'div', attrs = {'class': 'pagination'})
name = client.parseDOM(next[-1], 'a')[-1]
if 'laquo' in name: raise Exception()
next = client.parseDOM(next, 'a', ret='href')[-1]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
try: title = client.parseDOM(item, 'a')[1]
except: pass
try: title = client.parseDOM(item, 'a', attrs = {'onclick': '.+?'})[-1]
except: pass
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'year_type'})[0]
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = 'tt' + re.sub('[^0-9]', '', imdb.rsplit('tt', 1)[-1])
imdb = imdb.encode('utf-8')
poster = '0'
try: poster = client.parseDOM(item, 'img', ret='src')[0]
except: pass
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: pass
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})
genre = client.parseDOM(genre, 'a')
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.compile('(\d+?) mins').findall(item)[-1]
except: duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: rating = '0'
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': 'rating rating-list'})[0]
except: votes = '0'
try: votes = re.compile('[(](.+?) votes[)]').findall(votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
try: mpaa = client.parseDOM(mpaa, 'span', ret='title')[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
director = client.parseDOM(item, 'span', attrs = {'class': 'credit'})
director += client.parseDOM(item, 'div', attrs = {'class': 'secondary'})
try: director = [i for i in director if 'Director:' in i or 'Dir:' in i][0]
except: director = '0'
director = director.split('With:', 1)[0].strip()
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
cast = client.parseDOM(item, 'span', attrs = {'class': 'credit'})
cast += client.parseDOM(item, 'div', attrs = {'class': 'secondary'})
try: cast = [i for i in cast if 'With:' in i or 'Stars:' in i][0]
except: cast = '0'
cast = cast.split('With:', 1)[-1].strip()
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'span', attrs = {'class': 'outline'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'tagline': tagline, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': '0', 'next': next})
except:
pass
return self.list
def imdb_user_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
return self.list
def scn_list(self, url):
def predb_items():
try:
years = [(self.datetime).strftime('%Y'), (self.datetime - datetime.timedelta(days = 365)).strftime('%Y')]
months = (self.datetime - datetime.timedelta(days = 180)).strftime('%Y%m%d')
result = ''
for i in years:
result += client.request(self.scn_page % (str(i), '1'))
result += client.request(self.scn_page % (str(i), '2'))
items = client.parseDOM(result, 'div', attrs = {'class': 'post'})
items = [(client.parseDOM(i, 'a', attrs = {'class': 'p-title'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in items]
items = [(i[0][0], i[1][0]) for i in items if len(i[0]) > 0 and len(i[1]) > 0]
items = [(re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', i[0]), re.compile('[\.|\(|\[|\s](\d{4})[\.|\)|\]|\s]').findall(i[0]), re.sub('[^0-9]', '', i[1])) for i in items]
items = [(i[0], i[1][-1], i[2]) for i in items if len(i[1]) > 0]
items = [i for i in items if int(months) <= int(i[2])]
items = sorted(items,key=lambda x: x[2])[::-1]
items = [(re.sub('(\.|\(|\[|LIMITED|UNCUT)', ' ', i[0]).strip(), i[1]) for i in items]
items = [x for y,x in enumerate(items) if x not in items[:y]]
items = items[:150]
return items
except:
return
def predb_list(i):
try:
url = self.imdb_by_query % (urllib.quote_plus(i[0]), i[1])
item = client.request(url, timeout='10')
item = json.loads(item)
title = item['Title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['Year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = item['Poster']
if poster == None or poster == '' or poster == 'N/A': poster = '0'
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = poster.encode('utf-8')
genre = item['Genre']
if genre == None or genre == '' or genre == 'N/A': genre = '0'
genre = genre.replace(', ', ' / ')
genre = genre.encode('utf-8')
duration = item['Runtime']
if duration == None or duration == '' or duration == 'N/A': duration = '0'
duration = re.sub('[^0-9]', '', str(duration))
duration = duration.encode('utf-8')
rating = item['imdbRating']
if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
votes = item['imdbVotes']
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None or votes == '' or votes == 'N/A': votes = '0'
votes = votes.encode('utf-8')
mpaa = item['Rated']
if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0'
mpaa = mpaa.encode('utf-8')
director = item['Director']
if director == None or director == '' or director == 'N/A': director = '0'
director = director.replace(', ', ' / ')
director = re.sub(r'\(.*?\)', '', director)
director = ' '.join(director.split())
director = director.encode('utf-8')
writer = item['Writer']
if writer == None or writer == '' or writer == 'N/A': writer = '0'
writer = writer.replace(', ', ' / ')
writer = re.sub(r'\(.*?\)', '', writer)
writer = ' '.join(writer.split())
writer = writer.encode('utf-8')
cast = item['Actors']
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
plot = item['Plot']
if plot == None or plot == '' or plot == 'N/A': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': '0'})
except:
pass
try:
items = cache.get(predb_items, 24)
start = re.compile('start=(\d*)').findall(url)[-1]
start = int(start)
if len(items) > (start + 30): next = self.scn_link + '?start=%s' % (start + 30)
else: next = ''
except:
return
threads = []
for i in range(start - 1, start + 29):
try: threads.append(workers.Thread(predb_list, items[i]))
except: pass
[i.start() for i in threads]
[i.join() for i in threads]
for i in range(0, len(self.list)): self.list[i].update({'next': next})
return self.list
def worker(self):
self.meta = []
total = len(self.list)
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.info_lang)
for r in range(0, total, 25):
threads = []
for i in range(r, r+25):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
self.list = [i for i in self.list if not i['imdb'] == '0']
if len(self.meta) > 0: metacache.insert(self.meta)
def super_info(self, i):
try:
if self.list[i]['metacache'] == True: raise Exception()
try: imdb = self.list[i]['imdb']
except: imdb = '0'
try: tmdb = self.list[i]['tmdb']
except: tmdb = '0'
if not tmdb == '0': url = self.tmdb_info_link % tmdb
elif not imdb == '0': url = self.tmdb_info_link % imdb
else: raise Exception()
item = client.request(url, timeout='10')
item = json.loads(item)
tmdb = item['id']
if tmdb == '' or tmdb == None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
if not tmdb == '0': self.list[i].update({'tmdb': tmdb})
imdb = item['imdb_id']
if imdb == '' or imdb == None: imdb = '0'
if not imdb == '0': imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb})
poster = item['poster_path']
if poster == '' or poster == None: poster = '0'
if not poster == '0': poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
if not poster == '0' and (self.list[i]['poster'] == '0' or 'imdb.com' in self.list[i]['poster']): self.list[i].update({'poster': poster})
fanart = item['backdrop_path']
if fanart == '' or fanart == None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
if not fanart == '0' and self.list[i]['fanart'] == '0': self.list[i].update({'fanart': fanart})
premiered = item['release_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
if premiered == '' or premiered == None: premiered = '0'
premiered = premiered.encode('utf-8')
if not premiered == '0': self.list[i].update({'premiered': premiered})
studio = item['production_companies']
try: studio = [x['name'] for x in studio][0]
except: studio = '0'
if studio == '' or studio == None: studio = '0'
studio = studio.encode('utf-8')
if not studio == '0': self.list[i].update({'studio': studio})
genre = item['genres']
try: genre = [x['name'] for x in genre]
except: genre = '0'
if genre == '' or genre == None or genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
if not genre == '0': self.list[i].update({'genre': genre})
try: duration = str(item['runtime'])
except: duration = '0'
if duration == '' or duration == None: duration = '0'
duration = duration.encode('utf-8')
if not duration == '0': self.list[i].update({'duration': duration})
rating = str(item['vote_average'])
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
if not rating == '0': self.list[i].update({'rating': rating})
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
if not votes == '0': self.list[i].update({'votes': votes})
mpaa = item['releases']['countries']
try: mpaa = [x for x in mpaa if not x['certification'] == '']
except: mpaa = '0'
try: mpaa = ([x for x in mpaa if x['iso_3166_1'].encode('utf-8') == 'US'] + [x for x in mpaa if not x['iso_3166_1'].encode('utf-8') == 'US'])[0]['certification']
except: mpaa = '0'
mpaa = mpaa.encode('utf-8')
if not mpaa == '0': self.list[i].update({'mpaa': mpaa})
director = item['credits']['crew']
try: director = [x['name'] for x in director if x['job'].encode('utf-8') == 'Director']
except: director = '0'
if director == '' or director == None or director == []: director = '0'
director = ' / '.join(director)
director = director.encode('utf-8')
if not director == '0': self.list[i].update({'director': director})
writer = item['credits']['crew']
try: writer = [x['name'] for x in writer if x['job'].encode('utf-8') in ['Writer', 'Screenplay']]
except: writer = '0'
try: writer = [x for n,x in enumerate(writer) if x not in writer[:n]]
except: writer = '0'
if writer == '' or writer == None or writer == []: writer = '0'
writer = ' / '.join(writer)
writer = writer.encode('utf-8')
if not writer == '0': self.list[i].update({'writer': writer})
cast = item['credits']['cast']
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
if len(cast) > 0: self.list[i].update({'cast': cast})
plot = item['overview']
if plot == '' or plot == None: plot = '0'
plot = plot.encode('utf-8')
if not plot == '0': self.list[i].update({'plot': plot})
tagline = item['tagline']
if (tagline == '' or tagline == None) and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == '' or tagline == None: tagline = '0'
try: tagline = tagline.encode('utf-8')
except: pass
if not tagline == '0': self.list[i].update({'tagline': tagline})
self.meta.append({'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'tvrage': '0', 'lang': self.info_lang, 'item': {'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline}})
except:
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: return
isFolder = True if control.setting('autoplay') == 'false' and control.setting('host_select') == '1' else False
isFolder = False if control.window.getProperty('PseudoTVRunning') == 'True' else isFolder
playbackMenu = control.lang(30204).encode('utf-8') if control.setting('autoplay') == 'true' else control.lang(30203).encode('utf-8')
traktMode = False if trakt.getTraktCredentials() == False else True
cacheToDisc = False if not action == 'movieSearch' else True
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
sysaddon = sys.argv[0]
try:
favitems = favourites.getFavourites('movies')
favitems = [i[0] for i in favitems]
except:
pass
try:
if traktMode == True: raise Exception()
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
except:
pass
try:
if traktMode == False: raise Exception()
indicators = trakt.syncMovies(timeout=720)
indicators = json.loads(indicators)
except:
pass
for i in items:
try:
label = i['name']
sysname = urllib.quote_plus(label)
systitle = urllib.quote_plus(i['title'])
imdb, tmdb, year = i['imdb'], i['tmdb'], i['year']
poster, banner, fanart = i['poster'], i['banner'], i['fanart']
if poster == '0': poster = addonPoster
if banner == '0' and poster == '0': banner = addonBanner
elif banner == '0': banner = poster
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if i['duration'] == '0': meta.update({'duration': '120'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s&t=%s' % (sysaddon, sysname, systitle, year, imdb, tmdb, self.systime)
sysurl = urllib.quote_plus(url)
if isFolder == True:
url = '%s?action=sources&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s&meta=%s' % (sysaddon, sysname, systitle, year, imdb, tmdb, sysmeta)
try:
if traktMode == True: raise Exception()
playcount = metaget._get_watched('movie', imdb, '', '')
if playcount == 7: meta.update({'playcount': 1, 'overlay': 7})
else: meta.update({'playcount': 0, 'overlay': 6})
except:
pass
try:
if traktMode == False: raise Exception()
playcount = [i for i in indicators if str(i['movie']['ids']['imdb']) == imdb][0]
meta.update({'playcount': 1, 'overlay': 7})
except:
pass
cm = []
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
cm.append((control.lang(30205).encode('utf-8'), 'Action(Info)'))
if not action == 'movieSearch':
cm.append((control.lang(30206).encode('utf-8'), 'RunPlugin(%s?action=moviePlaycount&title=%s&year=%s&imdb=%s&query=7)' % (sysaddon, systitle, year, imdb)))
cm.append((control.lang(30207).encode('utf-8'), 'RunPlugin(%s?action=moviePlaycount&title=%s&year=%s&imdb=%s&query=6)' % (sysaddon, systitle, year, imdb)))
if traktMode == True:
cm.append((control.lang(30208).encode('utf-8'), 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))
if action == 'movieFavourites':
cm.append((control.lang(30210).encode('utf-8'), 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
elif action == 'movieSearch':
cm.append((control.lang(30209).encode('utf-8'), 'RunPlugin(%s?action=addFavourite&meta=%s&query=0&content=movies)' % (sysaddon, sysmeta)))
else:
if not imdb in favitems: cm.append((control.lang(30209).encode('utf-8'), 'RunPlugin(%s?action=addFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
else: cm.append((control.lang(30210).encode('utf-8'), 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
cm.append((control.lang(30211).encode('utf-8'), 'RunPlugin(%s?action=movieToLibrary&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s)' % (sysaddon, sysname, systitle, year, imdb, tmdb)))
cm.append((control.lang(30212).encode('utf-8'), 'RunPlugin(%s?action=addView&content=movies)' % sysaddon))
item = control.item(label=label, iconImage=poster, thumbnailImage=poster)
try: item.setArt({'poster': poster, 'banner': banner})
except: pass
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setInfo(type='Video', infoLabels = meta)
item.setProperty('Video', 'true')
item.setProperty('IsPlayable', 'true')
item.addContextMenuItems(cm, replaceItems=True)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=isFolder)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
url = '%s?action=movies&url=%s' % (sysaddon, urllib.quote_plus(url))
addonNext = control.addonNext()
item = control.item(label=control.lang(30213).encode('utf-8'), iconImage=addonNext, thumbnailImage=addonNext)
item.addContextMenuItems([], replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.content(int(sys.argv[1]), 'movies')
control.directory(int(sys.argv[1]), cacheToDisc=cacheToDisc)
views.setView('movies', {'skin.confluence': 500})
def addDirectory(self, items):
if items == None or len(items) == 0: return
sysaddon = sys.argv[0]
addonFanart = control.addonFanart()
addonThumb = control.addonThumb()
artPath = control.artPath()
for i in items:
try:
try: name = control.lang(i['name']).encode('utf-8')
except: name = i['name']
if i['image'].startswith('http://'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
try: cm.append((control.lang(30211).encode('utf-8'), 'RunPlugin(%s?action=moviesToLibrary&url=%s)' % (sysaddon, urllib.quote_plus(i['context']))))
except: pass
item = control.item(label=name, iconImage=thumb, thumbnailImage=thumb)
item.addContextMenuItems(cm, replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
| gpl-2.0 | 3,366,632,997,002,474,500 | 42.039088 | 444 | 0.505109 | false |
NeuralProsthesisLab/unlock | unlock/util/signal.py | 1 | 4991 | # Copyright (c) James Percent, Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.import socket
import numpy as np
import random
__author__ = 'jpercent'
class RMSSignalGenerator(object):
'''
Generates simulated device samples. Each invocation of the
generate method returns a table of samples. The generate method determines sample values
by consulting an unlock.state.SequenceState. The state returns a tuple of True/False values,
one foreach channel. A state channel value that is True results in sample value, for the
corresponding channel, that is above the threshold; a False value results in value above
the min, but below the threshold.
channels: number of channels
minmax: list of tuples denoting the min and max values of a channel
thresholds: list of channel thresholds
state: an unlock.state.SequenceState. provides a means to dynamically configure
which channels of a given set of samples are above/below threshold values
samples: default number of samples per request
'''
def __init__(self, channels, minmax, thresholds, state, samples, seed=31337):
assert channels == len(thresholds) and channels == len(minmax)
self.channels = channels
self.min = 0
self.max = 1
self.minmax = minmax
self.thresholds = thresholds
self.samples = samples
self.state = state
self.state.start()
self.generate_sample = self.simple_sample_gen
self.random = random.Random()
self.random.seed(seed)
def generate_samples(self, samples=None):
if samples == None:
samples = self.samples
ret = np.zeros((samples, self.channels))
for sample in range(samples):
ret[sample] = self.generate_sample(self.state.state())
self.state.step()
return ret
def simple_sample_gen(self, state_value):
assert self.channels == len(state_value)
sample = np.zeros(self.channels)
for i in range(self.channels):
if state_value[i] == True:
sample[i] = self.random.randint(self.thresholds[i], self.minmax[i][self.max])
elif state_value[i] == False:
sample[i] = self.random.randint(self.minmax[i][self.min], self.thresholds[i]-1)
else:
raise Exception('invalid state')
return sample
if __name__ == '__main__':
# example
from unlock.state import SequenceState
channels = 4
minmax = [(0,10), (-10, 10), (9,100), (0,7)]
thresholds = [ 8, 5, 80, 5]
samples = 12
seq = [(False, False, False, False), (True, False, False, False), (True, True, False, False),
(False, False, False, True), (False, True, False, False), (True, False, True, False),
(False, False, True, False), (False, False, False, True),
(True, False, False, True), (False, True, False, True), (True, True, True, False),
(True, True, True, True)]
state = SequenceState(seq)
print(state.sequence)
gen = RMSSignalGenerator(channels, minmax, thresholds, state, samples)
sample_values = gen.generate_samples()
for i in range(len(seq)):
print ("Sequence value = ", seq[i])
print("Normalized Sample = ", sample_values[i] - np.array(thresholds))
print('-'*80)
| bsd-3-clause | 2,712,801,871,920,440,300 | 46.542857 | 101 | 0.660589 | false |
ingadhoc/odoo-infrastructure | infrastructure/models/server_docker_image.py | 1 | 1758 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
from .server import custom_sudo as sudo
import logging
_logger = logging.getLogger(__name__)
class server_docker_image(models.Model):
""""""
_name = 'infrastructure.server_docker_image'
_description = 'Server Docker Image'
_rec_name = 'docker_image_id'
docker_image_id = fields.Many2one(
'infrastructure.docker_image',
'Docker Image',
required=True,
)
server_id = fields.Many2one(
'infrastructure.server',
'Server',
required=True,
ondelete='cascade',
)
_sql_constraints = [
('image_uniq', 'unique(docker_image_id, server_id)',
'Docker Image Must be Unique per server'),
]
@api.multi
def pull_image(self, context=None, detached=False):
""" Tuvimos que ponerle el context porque desde la vista lo pasa sin
enmascararlo en self"""
self.server_id.get_env()
image = self.docker_image_id
image_name = image.pull_name
# if any tag, pull the first one
if image.tag_ids:
image_name = '%s:%s' % (image_name, image.tag_ids[0].name)
_logger.info("Pulling Image %s" % image_name)
if detached:
sudo('dtach -n `mktemp -u /tmp/dtach.XXXX` docker pull %s' %
image_name)
else:
sudo('docker pull %s' % image_name)
@api.multi
def pull_image_detached(self):
self.pull_image(detached=True)
| agpl-3.0 | 1,706,531,932,765,589,000 | 30.963636 | 78 | 0.5438 | false |
csarn/qthexedit | besthex.py | 1 | 11828 | # PyQt hex editor widget
# Copyright (C) 2015 Christoph Sarnowski
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# standard modules
import time
import mmap
import re
import os
import collections
from binascii import *
from math import *
# gui lib
import sip
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QString', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
sip.setapi('QVariant', 2)
from PySide.QtGui import *
from PySide.QtCore import *
#from PyQt4.Qsci import *
# own submodules
from hexwidget import *
from ipythonwidget import *
from cursor import *
from docks import *
from mmapslice import *
class Delegate(QItemDelegate):
def __init__(self):
super(Delegate, self).__init__()
self.validator = QIntValidator()
def setModelData(self, editor, model, index):
print editor, model, index
editor = QLineEdit(editor)
editor.setValidator(self.validator)
super(Delegate, self).setModelData(editor, model, index)
class SearchDialog(QWidget):
def __init__(self, hexwidget=None, parent=None):
super(SearchDialog, self).__init__(parent)
self.hexwidget = hexwidget
self.lyt = QGridLayout()
self.setLayout(self.lyt)
self.searchline = QLineEdit()
self.pb_search = QPushButton("Search")
self.lyt.addWidget(self.searchline, 0, 0)
self.lyt.addWidget(self.pb_search, 0, 1)
self.pb_search.clicked.connect(self.do_search)
def do_search(self):
phrase = self.searchline.text()
index = self.hexwidget.data.find(phrase, self.hexwidget.cursor.address)
print index
if index >= 0:
self.hexwidget.goto(index)
self.close()
class HexEditor(QMainWindow):
def __init__(self):
super(HexEditor, self).__init__()
self.setWindowTitle("Best Hex Editor")
self.hexwidgets = []
self.central = QMainWindow()
self.central.setWindowFlags(Qt.Widget)
self.central.setDockOptions(self.central.dockOptions()|QMainWindow.AllowNestedDocks)
self.tabs = []
self.open_file("besthex.py")
self.setCentralWidget(self.central)
self.font = QFont("Courier", 10)
self.indicator = QLabel("Overwrite")
self.statusBar().showMessage("yay")
self.statusBar().addPermanentWidget(self.indicator)
self.createDocks()
self.createActions()
self.createMenus()
self.set_example_data()
self.drawIcon()
def drawIcon(self):
self.pixmap = QPixmap(64,64)
painter = QPainter(self.pixmap)
painter.fillRect(0,0,64,64,Qt.green)
painter.setPen(QColor(192,0,192))
painter.setFont(QFont("Courier", 64))
painter.drawText(6,57,"H")
self.icon = QIcon(self.pixmap)
self.setWindowIcon(self.icon)
def createDocks(self):
self.setDockOptions(self.dockOptions() | QMainWindow.AllowNestedDocks)
allowed_positions = Qt.AllDockWidgetAreas
# make struct editor widget
self.structeditor = QTextEdit()
# qscintilla compatibility
self.structeditor.text = self.structeditor.toPlainText
self.structeditor.setText = self.structeditor.setPlainText
self.structeditor.setFont(self.font)
self.dock1 = QDockWidget()
self.dock1.setWindowTitle("Struct Editor")
self.dock1.setWidget(self.structeditor)
self.dock1.setAllowedAreas(allowed_positions)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock1)
# make struct explorer widget
self.structexplorer = s = QTreeWidget()
s.setColumnCount(3)
self.d = Delegate()
self.dock2 = QDockWidget()
self.dock2.setWindowTitle("Struct Explorer")
self.dock2.setWidget(self.structexplorer)
self.dock2.setAllowedAreas(allowed_positions)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock2)
self.hexwidgets[0].cursor.changed.connect(self.eval)
self.structeditor.setMinimumWidth(300)
self.structexplorer.setMinimumWidth(300)
self.ipython = IPythonWidget(run='''
import matplotlib
%matplotlib inline
from pylab import *
from PySide.QtCore import *
from PySide.QtGui import *
from construct import *
from binascii import *
data = main.hexwidgets[0].data
a = np.ndarray.__new__(np.ndarray,
shape=(len(data),),
dtype=np.uint8,
buffer=data,
offset=0,
strides=(1,),
order='C')
def histogram():
hist(a, bins=256, range=(0,256))
''',main=self)
self.ipython.setMinimumWidth(300)
self.dock3 = QDockWidget()
self.dock3.setWindowTitle("IPython")
self.dock3.setWidget(self.ipython)
self.dock3.setAllowedAreas(allowed_positions)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dock3)
self.dock1.setObjectName("structedit")
self.dock2.setObjectName("structexp")
self.dock3.setObjectName("ipython")
def open_file(self, filename=None):
if filename is None:
filename = QFileDialog.getOpenFileName(self, "Open File...")[0]
#print self.filename
if filename:
w = HexWidget(filename=filename)
self.hexwidgets.append(w)
self.tabs.append(QDockWidget())
self.tabs[-1].setWindowTitle(w.filename)
self.tabs[-1].setWidget(w)
self.tabs[-1].setAllowedAreas(Qt.AllDockWidgetAreas)
self.central.addDockWidget(Qt.RightDockWidgetArea, self.tabs[-1])
def save_file_as(self):
self.filename = QFileDialog.getSaveFileName(self, "Save File as...")[0]
if self.filename:
self.statusBar().showMessage("Saving...")
open(self.filename, 'wb').write(self.hexwidget.data)
self.statusBar().showMessage("done.")
def createActions(self):
self.act_open = QAction("&Open", self)
self.act_open.setShortcuts(QKeySequence.Open)
self.act_open.setStatusTip("Open file")
self.act_open.triggered.connect(self.open_file)
self.act_saveas = QAction("&Save as...", self)
self.act_saveas.setShortcuts(QKeySequence.SaveAs)
self.act_saveas.setStatusTip("Save file as...")
self.act_saveas.triggered.connect(self.save_file_as)
self.act_quit = QAction("&Quit", self)
self.act_quit.setShortcuts(QKeySequence.Quit)
self.act_quit.setStatusTip("Quit Best Hex Editor")
self.act_quit.triggered.connect(self.close)
self.act_search = QAction("&Search", self)
self.act_search.setShortcuts(QKeySequence.Find)
self.act_search.setStatusTip("Search current buffer for a string")
self.act_search.triggered.connect(self.search)
self.ta_sed = self.dock1.toggleViewAction()
self.ta_sed.setShortcut(QKeySequence("Alt+S"))
self.ta_sexp = self.dock2.toggleViewAction()
self.ta_sexp.setShortcut(QKeySequence("Alt+X"))
self.ta_ipy = self.dock3.toggleViewAction()
self.ta_ipy.setShortcut(QKeySequence("Alt+P"))
def createMenus(self):
self.filemenu = self.menuBar().addMenu("&File")
self.filemenu.addAction(self.act_open)
self.filemenu.addAction(self.act_saveas)
self.filemenu.addAction(self.act_quit)
self.filemenu.addAction(self.act_search)
self.viewmenu = self.menuBar().addMenu("&View")
self.viewmenu.addAction(self.ta_sed)
self.viewmenu.addAction(self.ta_sexp)
self.viewmenu.addAction(self.ta_ipy)
def toggle_structedit(self):
if self.structeditor.isVisible():
self.structeditor.setVisible(False)
else:
self.structeditor.setVisible(True)
def search(self):
self.dia = SearchDialog(hexwidget = self.hexwidgets[0])
self.dia.show()
self.dia.raise_()
self.dia.activateWindow()
def foo(self, x):
try:
y = ("\n" + self.structeditor.text()).index("\n" + x)
except:
print x
raise
return y
def eval(self):
try:
self.structexplorer.clear()
self.items = []
ns = {}
exec(compile("from construct import *\n" + self.structeditor.text(), '<none>', 'exec'), ns)
results = []
import construct
keys = sorted([x for x, v in ns.iteritems() if isinstance(v, construct.Construct) and x not in dir(construct) and (not x.startswith('_'))],
key=self.foo)
for name in keys:
cons = ns[name]
try:
parsed = cons.parse(self.hexwidgets[0].data[self.hexwidgets[0].cursor.address:])
except:
parsed = "<parse error>"
if isinstance(parsed, construct.lib.container.Container):
self.items.append(QTreeWidgetItem(self.structexplorer,
[cons.name,
'Container',
"none"]))
parent = self.items[-1]
parent.setExpanded(True)
for k, v in parsed.iteritems():
it = QTreeWidgetItem(parent, [k, str(v), 'none'])
it.setFlags(it.flags() | Qt.ItemIsEditable)
self.items.append(it)
else:
it = QTreeWidgetItem(self.structexplorer,
[cons.name,
str(parsed),
"none"])
self.items.append(it)
for i in range(3):
self.structexplorer.resizeColumnToContents(i)
# self.hexwidget.viewport().update()
except Exception as e:
print e
def closeEvent(self, event):
settings = QSettings("csarn", "best hex editor")
settings.setValue("geometry", self.saveGeometry())
settings.setValue("windowState", self.saveState())
QMainWindow.closeEvent(self, event)
def set_example_data(self):
self.hexwidgets[0].highlights.append(Selection(10,20))
self.structeditor.setText("""foo = Union("default data types",
ULInt8("uint8"),
ULInt16("uint16"),
ULInt32("uint32"),
ULInt64("uint64"),
SLInt8("sint8"),
SLInt16("sint16"),
SLInt32("sint32"),
SLInt64("sint64"),
LFloat32("float"),
LFloat64("double"),
)
bar = Union("data types (big endian)",
UBInt8("uint8"),
UBInt16("uint16"),
UBInt32("uint32"),
UBInt64("uint64"),
SBInt8("sint8"),
SBInt16("sint16"),
SBInt32("sint32"),
SBInt64("sint64"),
BFloat32("float"),
BFloat64("double"),
)
""")
self.eval()
if __name__ == '__main__':
app = QApplication([])
h = HexEditor()
h.show()
app.exec_()
| gpl-2.0 | 5,709,250,314,761,372,000 | 31.584022 | 151 | 0.605428 | false |
ex/js | lib/closure-library/closure/bin/build/source.py | 1 | 3566 | # Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scans a source JS file for its provided and required namespaces.
Simple class to scan a JavaScript file and express its dependencies.
"""
__author__ = '[email protected]'
import re
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
_MODULE_REGEX = re.compile(_BASE_REGEX_STRING % 'module')
_PROVIDE_REGEX = re.compile(_BASE_REGEX_STRING % 'provide')
_REQUIRES_REGEX = re.compile(_BASE_REGEX_STRING % 'require')
class Source(object):
"""Scans a JavaScript source for its provided and required namespaces."""
# Matches a "/* ... */" comment.
# Note: We can't definitively distinguish a "/*" in a string literal without a
# state machine tokenizer. We'll assume that a line starting with whitespace
# and "/*" is a comment.
_COMMENT_REGEX = re.compile(
r"""
^\s* # Start of a new line and whitespace
/\* # Opening "/*"
.*? # Non greedy match of any characters (including newlines)
\*/ # Closing "*/""",
re.MULTILINE | re.DOTALL | re.VERBOSE)
def __init__(self, source):
"""Initialize a source.
Args:
source: str, The JavaScript source.
"""
self.provides = set()
self.requires = set()
self.is_goog_module = False
self._source = source
self._ScanSource()
def GetSource(self):
"""Get the source as a string."""
return self._source
@classmethod
def _StripComments(cls, source):
return cls._COMMENT_REGEX.sub('', source)
@classmethod
def _HasProvideGoogFlag(cls, source):
"""Determines whether the @provideGoog flag is in a comment."""
for comment_content in cls._COMMENT_REGEX.findall(source):
if '@provideGoog' in comment_content:
return True
return False
def _ScanSource(self):
"""Fill in provides and requires by scanning the source."""
stripped_source = self._StripComments(self.GetSource())
source_lines = stripped_source.splitlines()
for line in source_lines:
match = _PROVIDE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
match = _MODULE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
self.is_goog_module = True
match = _REQUIRES_REGEX.match(line)
if match:
self.requires.add(match.group(1))
# Closure's base file implicitly provides 'goog'.
# This is indicated with the @provideGoog flag.
if self._HasProvideGoogFlag(self.GetSource()):
if len(self.provides) or len(self.requires):
raise Exception(
'Base file should not provide or require namespaces.')
self.provides.add('goog')
def GetFileContents(path):
"""Get a file's contents as a string.
Args:
path: str, Path to file.
Returns:
str, Contents of file.
Raises:
IOError: An error occurred opening or reading the file.
"""
fileobj = open(path, encoding='utf-8')
try:
return fileobj.read()
finally:
fileobj.close()
| mit | 1,210,208,933,829,568,300 | 27.758065 | 80 | 0.661806 | false |
vanant/googleads-dfa-reporting-samples | python/v2.0/create_subaccount.py | 1 | 2495 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a subaccount in a given DFA account.
To get the account ID, run get_all_userprofiles.py. To get the available
permissions, run get_user_role_permissions.py.
Tags: subaccounts.insert
"""
__author__ = ('[email protected] (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to add a subaccount for')
argparser.add_argument(
'account_id', type=int,
help='The ID of the account to create a subaccount for')
argparser.add_argument(
'permission_id', type=int,
help='The ID of the permission to apply to this subaccount')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
account_id = flags.account_id
permission_id = flags.permission_id
try:
# Construct the basic subaccount structure.
subaccount = {
'name': 'Test Subaccount',
'accountId': account_id,
'availablePermissionIds': [permission_id]
}
request = service.subaccounts().insert(
profileId=profile_id, body=subaccount)
# Execute request and print response.
response = request.execute()
print ('Created subaccount with ID %s and name "%s".'
% (response['id'], response['name']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 7,794,931,736,434,190,000 | 30.1875 | 77 | 0.703407 | false |
ForsvaretsForskningsinstitutt/Paper-NLLS-speedup | case_generator/nlls.py | 1 | 1232 | import numpy as np
import random
class NonLinearLeastSquares(object):
samples = []
grid_size = None
alpha = 2.0
def __init__(self, grid_size):
self.grid_size = np.array(grid_size)
def setSamples(self,samples):
self.samples = samples
def pkl(self,k,l):
return self.samples[k].strength- self.samples[l].strength
def qxy(self,position):
error = 0.0
for k in xrange(len(self.samples)):
for l in xrange(k+1,len(self.samples)):
p = self.pkl(k,l) - 5.0 * self.alpha * np.log10(np.linalg.norm(position-self.samples[l].position)**2/np.linalg.norm(position-self.samples[k].position)**2)
error += p**2
return error
#if __name__=="__main__":
#from environment import Environment
#random.seed(64)
#env = Environment([200,200],100,2.0)
#samples = []
#for x in xrange(10):
#pos = np.array([random.randint(0,1000),random.randint(0,1000)])
#s = Sample(pos, env.measuredPower(pos))
#samples.append(s)
#grid_size = np.array([1000,1000])
#nlls = NonLinearLeastSquares(grid_size)
#nlls.setSamples(samples)
#print nlls.minimizeQxy(10.0)
| gpl-3.0 | 7,997,464,228,841,553,000 | 27 | 170 | 0.590097 | false |
DemocracyLab/CivicTechExchange | civictechprojects/models.py | 1 | 45731 | from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db import models
from django.utils import timezone
from django.contrib.gis.db.models import PointField
from enum import Enum
from democracylab.models import Contributor
from common.models.tags import Tag
from taggit.managers import TaggableManager
from taggit.models import TaggedItemBase
from civictechprojects.caching.cache import ProjectCache, GroupCache, EventCache, ProjectSearchTagsCache
from common.helpers.form_helpers import is_json_field_empty, is_creator_or_staff
from common.helpers.dictionaries import merge_dicts, keys_subset
from common.helpers.collections import flatten, count_occurrences
# Without the following classes, the following error occurs:
#
# ValueError: You can't have two TaggableManagers with the same
# through model.
#
# By default, the `through` field is the same across both TaggableManagers
# because when the parameter is omitted, identical defaults are provided.
# See: https://django-taggit.readthedocs.io/en/latest/api.html#TaggableManager
class TaggedIssueAreas(TaggedItemBase):
content_object = models.ForeignKey('Project', on_delete=models.CASCADE)
class TaggedStage(TaggedItemBase):
content_object = models.ForeignKey('Project', on_delete=models.CASCADE)
class TaggedTechnologies(TaggedItemBase):
content_object = models.ForeignKey('Project', on_delete=models.CASCADE)
class TaggedOrganization(TaggedItemBase):
content_object = models.ForeignKey('Project', on_delete=models.CASCADE)
class TaggedOrganizationType(TaggedItemBase):
content_object = models.ForeignKey('Project', on_delete=models.CASCADE)
class ArchiveManager(models.Manager):
def get_queryset(self):
return super(ArchiveManager, self).get_queryset().filter(deleted=True)
class DefaultManager(models.Manager):
def get_queryset(self):
return super(DefaultManager, self).get_queryset().filter(deleted=False)
# This base class adds delete functionality to models using a flag, and filters deleted items out of the default result set
class Archived(models.Model):
class Meta:
abstract = True
objects = DefaultManager()
archives = ArchiveManager()
deleted = models.BooleanField(default=False)
def delete(self):
self.deleted = True
self.save()
class Project(Archived):
project_creator = models.ForeignKey(Contributor, related_name='created_projects', on_delete=models.CASCADE)
project_description = models.CharField(max_length=4000, blank=True)
project_description_solution = models.CharField(max_length=4000, blank=True)
project_description_actions = models.CharField(max_length=4000, blank=True)
project_short_description = models.CharField(max_length=140, blank=True)
project_issue_area = TaggableManager(blank=True, through=TaggedIssueAreas)
project_issue_area.remote_field.related_name = 'issue_projects'
project_stage = TaggableManager(blank=True, through=TaggedStage)
project_stage.remote_field.related_name = related_name='stage_projects'
project_technologies = TaggableManager(blank=True, through=TaggedTechnologies)
project_technologies.remote_field.related_name = 'technology_projects'
project_organization = TaggableManager(blank=True, through=TaggedOrganization)
project_organization.remote_field.related_name = 'org_projects'
project_organization_type = TaggableManager(blank=True, through=TaggedOrganizationType)
project_organization_type.remote_field.related_name = 'org_type_projects'
project_location = models.CharField(max_length=200, blank=True)
project_location_coords = PointField(null=True, blank=True, srid=4326, default='POINT EMPTY')
project_country = models.CharField(max_length=100, blank=True)
project_state = models.CharField(max_length=100, blank=True)
project_city = models.CharField(max_length=100, blank=True)
project_name = models.CharField(max_length=200)
project_url = models.CharField(max_length=2083, blank=True)
project_date_created = models.DateTimeField(null=True)
project_date_modified = models.DateTimeField(auto_now_add=True, null=True)
is_searchable = models.BooleanField(default=False)
is_created = models.BooleanField(default=True)
_full_text_capacity = 200000
full_text = models.CharField(max_length=_full_text_capacity, blank=True)
def __str__(self):
return str(self.id) + ':' + str(self.project_name)
def delete(self):
self.is_searchable=False
self.update_timestamp()
super().delete()
def all_owners(self):
owners = [self.project_creator]
project_volunteers = VolunteerRelation.objects.filter(project=self.id)
project_co_owners = filter(lambda pv: pv.is_co_owner, project_volunteers)
return owners + list(map(lambda pv: pv.volunteer, project_co_owners))
def hydrate_to_json(self):
return ProjectCache.get(self) or ProjectCache.refresh(self, self._hydrate_to_json())
def _hydrate_to_json(self):
files = ProjectFile.objects.filter(file_project=self.id)
thumbnail_files = list(files.filter(file_category=FileCategory.THUMBNAIL.value))
other_files = list(files.filter(file_category=FileCategory.ETC.value))
links = ProjectLink.objects.filter(link_project=self.id)
positions = ProjectPosition.objects.filter(position_project=self.id).order_by('order_number')
volunteers = VolunteerRelation.objects.filter(project=self.id)
group_relationships = ProjectRelationship.objects.filter(relationship_project=self).exclude(relationship_group=None)
commits = ProjectCommit.objects.filter(commit_project=self.id).order_by('-commit_date')[:20]
project = {
'project_id': self.id,
'project_name': self.project_name,
'project_creator': self.project_creator.id,
'project_claimed': not self.project_creator.is_admin_contributor(),
'project_approved': self.is_searchable,
'project_description': self.project_description,
'project_description_solution': self.project_description_solution,
'project_description_actions': self.project_description_actions,
'project_short_description': self.project_short_description,
'project_url': self.project_url,
'project_location': self.project_location,
'project_country': self.project_country,
'project_state': self.project_state,
'project_city': self.project_city,
'project_organization': Tag.hydrate_to_json(self.id, list(self.project_organization.all().values())),
'project_organization_type': Tag.hydrate_to_json(self.id, list(self.project_organization_type.all().values())),
'project_issue_area': Tag.hydrate_to_json(self.id, list(self.project_issue_area.all().values())),
'project_stage': Tag.hydrate_to_json(self.id, list(self.project_stage.all().values())),
'project_technologies': Tag.hydrate_to_json(self.id, list(self.project_technologies.all().values())),
'project_positions': list(map(lambda position: position.to_json(), positions)),
'project_files': list(map(lambda file: file.to_json(), other_files)),
'project_links': list(map(lambda link: link.to_json(), links)),
'project_commits': list(map(lambda commit: commit.to_json(), commits)),
'project_groups': list(map(lambda gr: gr.hydrate_to_list_json(), group_relationships)),
'project_events': list(map(lambda er: er.hydrate_to_tile_json(), self.get_project_events())),
'project_owners': [self.project_creator.hydrate_to_tile_json()],
'project_volunteers': list(map(lambda volunteer: volunteer.to_json(), volunteers)),
'project_date_modified': self.project_date_modified.__str__()
}
if self.project_location_coords is not None and not self.project_location_coords.empty:
project['project_latitude'] = self.project_location_coords.x
project['project_longitude'] = self.project_location_coords.y
if len(thumbnail_files) > 0:
project['project_thumbnail'] = thumbnail_files[0].to_json()
return project
def hydrate_to_tile_json(self):
keys = [
'project_id', 'project_name', 'project_creator', 'project_url', 'project_location', 'project_country',
'project_state', 'project_city', 'project_issue_area', 'project_stage', 'project_positions',
'project_date_modified', 'project_thumbnail', 'project_description'
]
json_base = self.hydrate_to_json()
json_result = keys_subset(json_base, keys)
project_short_description = json_base['project_short_description']
if len(project_short_description) > 0:
json_result['project_description'] = project_short_description
return json_result
def hydrate_to_list_json(self):
project = {
'project_id': self.id,
'project_name': self.project_name,
'project_creator': self.project_creator.id,
'isApproved': self.is_searchable,
'isCreated': self.is_created
}
return project
def get_project_events(self):
slugs = list(map(lambda tag: tag['slug'], self.project_organization.all().values()))
return Event.objects.filter(event_legacy_organization__name__in=slugs, is_private=False, is_searchable=True)
def get_project_groups(self):
project_relationships = ProjectRelationship.objects.filter(relationship_project=self.id)
groups_ids = list(map(lambda pr: pr.relationship_group.id, project_relationships))
return Group.objects.filter(id__in=groups_ids)
def update_timestamp(self, time=None):
self.project_date_modified = time or timezone.now()
self.save()
def recache(self, recache_linked=False):
hydrated_project = self._hydrate_to_json()
ProjectCache.refresh(self, hydrated_project)
self.generate_full_text()
if recache_linked:
self.update_linked_items()
def update_linked_items(self):
# Recache events, but only if project is searchable
if self.is_searchable:
owned_events = self.get_project_events()
for event in owned_events:
event.recache()
def generate_full_text(self):
base_json = self.hydrate_to_json()
# Don't cache external entities because they take up space and aren't useful in project search
omit_fields = ['project_volunteers', 'project_owners', 'project_events', 'project_groups', 'project_commits']
# Don't cache files because they contain noise without adequate signal
omit_fields += ['project_thumbnail', 'project_files']
# Don't cache boolean fields
omit_fields += ['project_claimed', 'project_approved']
# Don't cache numeric fields
omit_fields += ['project_id', 'project_creator', 'project_latitude', 'project_longitude']
# Don't cache date fields
omit_fields += ['project_date_modified']
for field in omit_fields:
base_json.pop(field, None)
full_text = str(base_json)
if len(full_text) >= Project._full_text_capacity:
full_text = full_text[:Project._full_text_capacity - 1]
print('Project Full Text Field Overflow Alert: ' + self.__str__())
self.full_text = full_text
self.save()
class Group(Archived):
group_creator = models.ForeignKey(Contributor, related_name='group_creator', on_delete=models.CASCADE)
group_date_created = models.DateTimeField(null=True)
group_date_modified = models.DateTimeField(auto_now_add=True, null=True)
group_description = models.CharField(max_length=4000, blank=True)
group_url = models.CharField(max_length=2083, blank=True)
group_location = models.CharField(max_length=200, blank=True)
group_location_coords = PointField(null=True, blank=True, srid=4326, default='POINT EMPTY')
group_country = models.CharField(max_length=100, blank=True)
group_state = models.CharField(max_length=100, blank=True)
group_city = models.CharField(max_length=100, blank=True)
group_name = models.CharField(max_length=200)
group_short_description = models.CharField(max_length=140, blank=True)
is_searchable = models.BooleanField(default=False)
is_created = models.BooleanField(default=True)
def __str__(self):
return str(self.id) + ':' + str(self.group_name)
def delete(self):
self.is_searchable = False
super().delete()
def update_timestamp(self):
self.group_date_modified = timezone.now()
self.save()
def hydrate_to_json(self):
return GroupCache.get(self) or GroupCache.refresh(self, self._hydrate_to_json())
def _hydrate_to_json(self):
files = ProjectFile.objects.filter(file_group=self.id)
thumbnail_files = list(files.filter(file_category=FileCategory.THUMBNAIL.value))
other_files = list(files.filter(file_category=FileCategory.ETC.value))
links = ProjectLink.objects.filter(link_group=self.id)
projects = self.get_group_project_relationships(approved_only=True)
group = {
'group_creator': self.group_creator.id,
'group_date_modified': self.group_date_modified.__str__(),
'group_description': self.group_description,
'group_files': list(map(lambda file: file.to_json(), other_files)),
'group_id': self.id,
'group_links': list(map(lambda link: link.to_json(), links)),
'group_url': self.group_url,
'group_name': self.group_name,
'group_location': self.group_location,
'group_country': self.group_country,
'group_state': self.group_state,
'group_city': self.group_city,
'group_owners': [self.group_creator.hydrate_to_tile_json()],
'group_short_description': self.group_short_description,
'group_project_count': projects.count()
}
if len(projects) > 0:
group['group_issue_areas'] = self.get_project_issue_areas(with_counts=True, project_relationships=projects)
if len(thumbnail_files) > 0:
group['group_thumbnail'] = thumbnail_files[0].to_json()
return group
def hydrate_to_tile_json(self):
keys = [
'group_date_modified', 'group_id', 'group_name', 'group_location', 'group_country', 'group_state',
'group_city', 'group_short_description', 'group_project_count', 'group_issue_areas', 'group_thumbnail'
]
return keys_subset(self.hydrate_to_json(), keys)
def hydrate_to_list_json(self):
files = ProjectFile.objects.filter(file_group=self.id)
thumbnail_files = list(files.filter(file_category=FileCategory.THUMBNAIL.value))
group = {
'group_date_modified': self.group_date_modified.__str__(),
'group_id': self.id,
'group_name': self.group_name,
'group_creator': self.group_creator.id,
'isApproved': self.is_searchable,
'isCreated': self.is_created
}
if len(thumbnail_files) > 0:
group['group_thumbnail'] = thumbnail_files[0].to_json()
return group
def get_project_issue_areas(self, with_counts, project_relationships=None):
if project_relationships is None:
project_relationships = ProjectRelationship.objects.filter(relationship_group=self.id)
all_issue_areas = flatten(list(map(lambda p: p.relationship_project.project_issue_area.all().values(), project_relationships)))
all_issue_area_names = list(map(lambda issue_tag: issue_tag['name'], all_issue_areas))
if with_counts:
issue_area_counts = count_occurrences(all_issue_area_names)
return issue_area_counts
else:
return list(set(all_issue_area_names))
def get_group_project_relationships(self, approved_only=True):
project_relationships = ProjectRelationship.objects.filter(relationship_group=self.id)
if approved_only:
project_relationships = project_relationships.filter(is_approved=True, relationship_project__is_searchable=True)
return project_relationships
def get_group_projects(self, approved_only=True):
project_ids = list(map(lambda pr: pr.relationship_project.id, self.get_group_project_relationships(approved_only=approved_only)))
return Project.objects.filter(id__in=project_ids)
def recache(self):
hydrated_group = self._hydrate_to_json()
GroupCache.refresh(self, hydrated_group)
ProjectSearchTagsCache.refresh(event=None, group=self)
def update_linked_items(self):
# Recache linked projects
project_relationships = ProjectRelationship.objects.filter(relationship_group=self)
for project_relationship in project_relationships:
project_relationship.relationship_project.recache(recache_linked=False)
class TaggedEventOrganization(TaggedItemBase):
content_object = models.ForeignKey('Event', on_delete=models.CASCADE)
class Event(Archived):
event_agenda = models.CharField(max_length=4000, blank=True)
event_creator = models.ForeignKey(Contributor, related_name='event_creator', on_delete=models.CASCADE)
event_date_created = models.DateTimeField(null=True)
event_date_end = models.DateTimeField()
event_date_modified = models.DateTimeField(auto_now_add=True, null=True)
event_date_start = models.DateTimeField()
event_description = models.CharField(max_length=4000, blank=True)
event_organizers_text = models.CharField(max_length=200, blank=True)
event_location = models.CharField(max_length=200, blank=True)
event_name = models.CharField(max_length=200)
event_rsvp_url = models.CharField(max_length=2083, blank=True)
event_live_id = models.CharField(max_length=50, blank=True)
event_short_description = models.CharField(max_length=140, blank=True)
event_legacy_organization = TaggableManager(blank=True, through=TaggedEventOrganization)
event_legacy_organization.remote_field.related_name = 'org_events'
event_slug = models.CharField(max_length=100, blank=True)
is_private = models.BooleanField(default=False)
is_searchable = models.BooleanField(default=False)
is_created = models.BooleanField(default=True)
show_headers = models.BooleanField(default=False)
def __str__(self):
return str(self.id) + ':' + str(self.event_name)
def delete(self):
self.is_searchable=False
super().delete()
def update_timestamp(self):
self.event_date_modified = timezone.now()
self.save()
def hydrate_to_json(self):
return EventCache.get(self) or EventCache.refresh(self, self._hydrate_to_json())
def _hydrate_to_json(self):
files = ProjectFile.objects.filter(file_event=self.id)
thumbnail_files = list(files.filter(file_category=FileCategory.THUMBNAIL.value))
other_files = list(files.filter(file_category=FileCategory.ETC.value))
event = {
'event_agenda': self.event_agenda,
'event_creator': self.event_creator.id,
'event_date_end': self.event_date_end.__str__(),
'event_date_modified': self.event_date_modified.__str__(),
'event_date_start': self.event_date_start.__str__(),
'event_description': self.event_description,
'event_files': list(map(lambda file: file.to_json(), other_files)),
'event_id': self.id,
'event_location': self.event_location,
'event_rsvp_url': self.event_rsvp_url,
'event_live_id': self.event_live_id,
'event_name': self.event_name,
'event_organizers_text': self.event_organizers_text,
'event_owners': [self.event_creator.hydrate_to_tile_json()],
'event_short_description': self.event_short_description,
'event_legacy_organization': Tag.hydrate_to_json(self.id, list(self.event_legacy_organization.all().values())),
'event_slug': self.event_slug,
'is_private': self.is_private,
'show_headers': self.show_headers
}
if len(thumbnail_files) > 0:
event['event_thumbnail'] = thumbnail_files[0].to_json()
return event
def hydrate_to_tile_json(self):
keys = [
'event_date_end', 'event_date_start', 'event_id', 'event_slug', 'event_location', 'event_name',
'event_organizers_text', 'event_short_description', 'event_thumbnail'
]
return keys_subset(self.hydrate_to_json(), keys)
def hydrate_to_list_json(self):
event = self.hydrate_to_tile_json()
event['event_creator'] = self.event_creator.id
event['is_searchable'] = self.is_searchable
event['is_created'] = self.is_created
return event
@staticmethod
def get_by_id_or_slug(slug):
event = None
if slug is not None:
_slug = slug.strip().lower()
if _slug.isnumeric():
event = Event.objects.get(id=_slug)
elif len(_slug) > 0:
event = Event.objects.filter(event_slug=_slug).first() or NameRecord.get_event(_slug)
return event
def get_issue_areas(self):
project_relationships = ProjectRelationship.objects.filter(relationship_event=self.id)
project_ids = list(map(lambda relationship: relationship.relationship_project.id, project_relationships))
project_list = Project.objects.filter(id__in=project_ids)
return [Tag.hydrate_to_json(project.id, list(project.project_issue_area.all().values())) for project in project_list]
def get_linked_projects(self):
# Get projects by legacy organization
projects = None
legacy_org_slugs = self.event_legacy_organization.slugs()
if legacy_org_slugs and len(legacy_org_slugs) > 0:
projects = Project.objects.filter(project_organization__name__in=legacy_org_slugs)
return projects
def update_linked_items(self):
# Recache linked projects
projects = self.get_linked_projects()
if projects:
for project in projects:
project.recache(recache_linked=False)
def recache(self):
hydrated_event = self._hydrate_to_json()
EventCache.refresh(self, hydrated_event)
ProjectSearchTagsCache.refresh(event=self)
class NameRecord(models.Model):
event = models.ForeignKey(Event, related_name='old_slugs', blank=True, null=True, on_delete=models.CASCADE)
name = models.CharField(max_length=100, blank=True)
@staticmethod
def get_event(name):
record = NameRecord.objects.filter(name=name).first()
return record and record.event
@staticmethod
def delete_record(name):
record = NameRecord.objects.filter(name=name).first()
if record:
record.delete()
return True
else:
return False
class ProjectRelationship(models.Model):
relationship_project = models.ForeignKey(Project, related_name='relationships', blank=True, null=True, on_delete=models.CASCADE)
relationship_group = models.ForeignKey(Group, related_name='relationships', blank=True, null=True, on_delete=models.CASCADE)
relationship_event = models.ForeignKey(Event, related_name='relationships', blank=True, null=True, on_delete=models.CASCADE)
introduction_text = models.CharField(max_length=10000, blank=True)
project_initiated = models.BooleanField(default=False)
is_approved = models.BooleanField(default=False)
def __str__(self):
if self.relationship_group is not None:
project_counterpart = ('Group', self.relationship_group)
elif self.relationship_event is not None:
project_counterpart = ('Event', self.relationship_event)
return "{proj} - ({type}) {counterpart}".format(
proj=self.relationship_project.__str__(),
type=project_counterpart[0],
counterpart=project_counterpart[1].__str__())
@staticmethod
def create(owner, project, approved=False, introduction_text=""):
relationship = ProjectRelationship()
relationship.project_initiated = False
relationship.relationship_project = project
relationship.introduction_text = introduction_text
if type(owner) is Group:
relationship.relationship_group = owner
relationship.is_approved = approved
else:
relationship.relationship_event = owner
relationship.is_approved = True
return relationship
def is_group_relationship(self):
return self.relationship_group is not None
def hydrate_to_list_json(self):
list_json = {
'project_relationship_id': self.id,
'relationship_is_approved': self.is_approved
}
if self.is_group_relationship():
list_json = merge_dicts(list_json, self.relationship_group.hydrate_to_list_json())
return list_json
def hydrate_to_project_tile_json(self):
list_json = {
'project_relationship_id': self.id,
'relationship_is_approved': self.is_approved
}
list_json = merge_dicts(list_json, self.relationship_project.hydrate_to_tile_json())
return list_json
class ProjectCommit(models.Model):
commit_project = models.ForeignKey(Project, related_name='commits', blank=True, null=True, on_delete=models.CASCADE)
user_name = models.CharField(max_length=200)
user_link = models.CharField(max_length=2083)
user_avatar_link = models.CharField(max_length=2083)
commit_date = models.DateTimeField()
commit_sha = models.CharField(max_length=40)
commit_title = models.CharField(max_length=2000)
branch_name = models.CharField(max_length=200)
repo_name = models.CharField(max_length=200)
def __str__(self):
return "({repo}) {sha}: {title}".format(repo=self.repo_name, sha=self.commit_sha[:6], title=self.commit_title)
@staticmethod
def create(project, repo_name, branch_name, github_json):
commit_sha = github_json['sha']
existing_commit = ProjectCommit.objects.filter(commit_sha=commit_sha, commit_project=project.id)
if existing_commit.count() == 0:
project_commit = ProjectCommit()
project_commit.commit_project = project
project_commit.repo_name = repo_name
project_commit.branch_name = branch_name
project_commit.commit_sha = commit_sha
commit_section = github_json['commit']
project_commit.commit_title = commit_section['message'][:2000]
project_commit.commit_date = commit_section['author']['date']
author_section = github_json['author']
if author_section:
project_commit.user_name = author_section['login']
project_commit.user_link = author_section['html_url']
project_commit.user_avatar_link = author_section['avatar_url']
project_commit.save()
def to_json(self):
return {
'user_name': self.user_name,
'user_link': self.user_link,
'user_avatar_link': self.user_avatar_link,
'commit_date': self.commit_date,
'commit_sha': self.commit_sha,
'commit_title': self.commit_title,
'branch_name': self.branch_name,
'repo_name': self.repo_name
}
class ProjectLink(models.Model):
link_project = models.ForeignKey(Project, related_name='links', blank=True, null=True, on_delete=models.CASCADE)
link_group = models.ForeignKey(Group, related_name='links', blank=True, null=True, on_delete=models.CASCADE)
link_event = models.ForeignKey(Event, related_name='links', blank=True, null=True, on_delete=models.CASCADE)
link_user = models.ForeignKey(Contributor, related_name='links', blank=True, null=True, on_delete=models.CASCADE)
link_name = models.CharField(max_length=200, blank=True)
link_url = models.CharField(max_length=2083)
link_visibility = models.CharField(max_length=50)
@staticmethod
def create(owner, url, name, visibility):
# TODO: Validate input
link = ProjectLink()
link.link_url = url
link.link_name = name
link.link_visibility = visibility
if type(owner) is Project:
link.link_project = owner
elif type(owner) is Group:
link.link_group = owner
else:
link.link_user = owner
return link
@staticmethod
def merge_changes(owner, links):
updated_links = list(filter(lambda link: 'id' in link, links))
ProjectLink.remove_links_not_in_list(owner, updated_links)
for link_json in links:
link = ProjectLink.from_json(owner, link_json)
if not link.id:
ProjectLink.create(owner,
link.link_url,
link.link_name,
link.link_visibility).save()
else:
existing_link = ProjectLink.objects.get(id=link.id)
existing_link.link_name = link.link_name
existing_link.link_url = link.link_url
existing_link.link_visibility = link.link_visibility
existing_link.save()
@staticmethod
def remove_links_not_in_list(owner, links):
if type(owner) is Project:
existing_links = ProjectLink.objects.filter(link_project=owner.id)
elif type(owner) is Group:
existing_links = ProjectLink.objects.filter(link_group=owner.id)
else:
existing_links = ProjectLink.objects.filter(link_user=owner.id)
existing_link_ids = set(map(lambda link: link.id, existing_links))
updated_link_ids = set(map(lambda link: link['id'], links))
deleted_link_ids = list(existing_link_ids - updated_link_ids)
for link_id in deleted_link_ids:
ProjectLink.objects.get(id=link_id).delete()
@staticmethod
def from_json(owner, thumbnail_json):
link = ProjectLink.create(owner=owner,
url=thumbnail_json['linkUrl'],
name=thumbnail_json['linkName'],
visibility=thumbnail_json['visibility']
)
if 'id' in thumbnail_json:
link.id = thumbnail_json['id']
return link
def to_json(self):
return {
'id': self.id,
'linkName': self.link_name,
'linkUrl': self.link_url,
'visibility': self.link_visibility
}
class TaggedPositionRole(TaggedItemBase):
content_object = models.ForeignKey('ProjectPosition', on_delete=models.CASCADE)
class ProjectPosition(models.Model):
position_project = models.ForeignKey(Project, related_name='positions', on_delete=models.CASCADE)
position_role = TaggableManager(blank=False, through=TaggedPositionRole)
position_description = models.CharField(max_length=3000, blank=True)
description_url = models.CharField(max_length=2083, default='')
order_number = models.PositiveIntegerField(default=0)
is_hidden = models.BooleanField(default=False)
def to_json(self):
return {
'id': self.id,
'description': self.position_description,
'descriptionUrl': self.description_url,
'roleTag': Tag.hydrate_to_json(self.id, self.position_role.all().values())[0],
'orderNumber': self.order_number,
'isHidden': self.is_hidden
}
@staticmethod
def create_from_json(project, position_json):
position = ProjectPosition()
position.position_project = project
position.position_description = position_json['description']
position.description_url = position_json['descriptionUrl']
position.order_number = position_json['orderNumber']
position.is_hidden = position_json['isHidden']
position.save()
position.position_role.add(position_json['roleTag']['tag_name'])
return position
@staticmethod
def update_from_json(position, position_json):
position.position_description = position_json['description']
position.description_url = position_json['descriptionUrl']
position.order_number = position_json['orderNumber']
position.is_hidden = position_json['isHidden']
new_role = position_json['roleTag']['tag_name']
Tag.merge_tags_field(position.position_role, new_role)
position.save()
@staticmethod
def delete_position(position):
position.position_role.clear()
position.delete()
@staticmethod
def merge_changes(project, positions):
"""
Merge project position changes
:param project: Project with position changes
:param positions: Position changes
:return: True if there were position changes
"""
added_positions = list(filter(lambda position: 'id' not in position, positions))
updated_positions = list(filter(lambda position: 'id' in position, positions))
updated_positions_ids = set(map(lambda position: position['id'], updated_positions))
existing_positions = ProjectPosition.objects.filter(position_project=project.id)
existing_positions_ids = set(map(lambda position: position.id, existing_positions))
existing_projects_by_id = {position.id: position for position in existing_positions}
deleted_position_ids = list(existing_positions_ids - updated_positions_ids)
for added_position in added_positions:
ProjectPosition.create_from_json(project, added_position)
for updated_position_json in updated_positions:
ProjectPosition.update_from_json(existing_projects_by_id[updated_position_json['id']], updated_position_json)
for deleted_position_id in deleted_position_ids:
ProjectPosition.delete_position(existing_projects_by_id[deleted_position_id])
return len(added_positions) > 0 or len(updated_positions) > 0 or len(deleted_position_ids) > 0
class ProjectFile(models.Model):
# TODO: Add ForeignKey pointing to Contributor, see https://stackoverflow.com/a/20935513/6326903
file_project = models.ForeignKey(Project, related_name='files', blank=True, null=True, on_delete=models.CASCADE)
file_user = models.ForeignKey(Contributor, related_name='files', blank=True, null=True, on_delete=models.CASCADE)
file_group = models.ForeignKey(Group, related_name='files', blank=True, null=True, on_delete=models.CASCADE)
file_event = models.ForeignKey(Event, related_name='files', blank=True, null=True, on_delete=models.CASCADE)
file_visibility = models.CharField(max_length=50)
file_name = models.CharField(max_length=300)
file_key = models.CharField(max_length=400)
file_url = models.CharField(max_length=2083)
file_type = models.CharField(max_length=50)
file_category = models.CharField(max_length=50)
def __str__(self):
owner = self.get_owner() or ''
return f'[{owner}]:{self.file_name}.{self.file_type}({self.file_category})'
@staticmethod
def create(owner, file_url, file_name, file_key, file_type, file_category, file_visibility):
# TODO: Validate input
file = ProjectFile()
file.file_url = file_url
file.file_name = file_name
file.file_key = file_key
file.file_type = file_type
file.file_category = file_category
file.file_visibility = file_visibility
if type(owner) is Project:
file.file_project = owner
elif type(owner) is Group:
file.file_group = owner
elif type(owner) is Event:
file.file_event = owner
else:
file.file_user = owner
return file
@staticmethod
def merge_changes(owner, files):
# Add new files
added_files = filter(lambda file: 'id' not in file, files)
if type(owner) is Project:
old_files = list(ProjectFile.objects.filter(file_project=owner.id, file_category=FileCategory.ETC.value)
.values())
elif type(owner) is Group:
old_files = list(ProjectFile.objects.filter(file_group=owner.id, file_category=FileCategory.ETC.value)
.values())
elif type(owner) is Event:
old_files = list(ProjectFile.objects.filter(file_event=owner.id, file_category=FileCategory.ETC.value)
.values())
else:
old_files = list(ProjectFile.objects.filter(file_user=owner.id, file_category=FileCategory.ETC.value)
.values())
for file in added_files:
ProjectFile.from_json(owner=owner, file_category=FileCategory.ETC, file_json=file).save()
# Remove files that were deleted
old_file_ids = set(map(lambda file: file['id'], old_files))
updated_files = filter(lambda file: 'id' in file, files)
updated_file_ids = set(map(lambda file: file['id'], updated_files))
removed_file_ids = list(old_file_ids - updated_file_ids)
for file_id in removed_file_ids:
ProjectFile.objects.get(id=file_id).delete()
@staticmethod
def replace_single_file(owner, file_category, file_json, new_file_category=None):
"""
:param owner: Owner model instace of the file
:param file_category: File type
:param file_json: File metadata
:param new_file_category: New file type
:return: True if the file was changed
"""
new_file_category = new_file_category or file_category
if type(owner) is Project:
existing_file = ProjectFile.objects.filter(file_project=owner.id, file_category=file_category.value).first()
elif type(owner) is Group:
existing_file = ProjectFile.objects.filter(file_group=owner.id, file_category=file_category.value).first()
elif type(owner) is Event:
existing_file = ProjectFile.objects.filter(file_event=owner.id, file_category=file_category.value).first()
else:
existing_file = ProjectFile.objects.filter(file_user=owner.id, file_category=file_category.value).first()
is_empty_field = is_json_field_empty(file_json)
file_changed = False
if is_empty_field and existing_file:
# Remove existing file
existing_file.delete()
file_changed = True
elif not is_empty_field:
if not existing_file:
# Add new file
thumbnail = ProjectFile.from_json(owner, new_file_category, file_json)
thumbnail.save()
file_changed = True
elif file_json['key'] != existing_file.file_key:
# Replace existing file
thumbnail = ProjectFile.from_json(owner, new_file_category, file_json)
thumbnail.save()
existing_file.delete()
file_changed = True
return file_changed
def get_owner(self):
return self.file_project or self.file_group or self.file_event or self.file_user
@staticmethod
def from_json(owner, file_category, file_json):
file_name_parts = file_json['fileName'].split('.')
file_name = "".join(file_name_parts[:-1])
file_type = file_name_parts[-1]
return ProjectFile.create(owner=owner,
file_url=file_json['publicUrl'],
file_name=file_name,
file_key=file_json['key'],
file_type=file_type,
file_category=file_category.value,
file_visibility=file_json['visibility'])
def to_json(self):
return {
'key': self.file_key,
'fileName': self.file_name + '.' + self.file_type,
'fileCategory': self.file_category,
'publicUrl': self.file_url,
'visibility': self.file_visibility
}
class FileCategory(Enum):
THUMBNAIL = 'THUMBNAIL'
THUMBNAIL_ERROR = 'THUMBNAIL_ERROR'
RESUME = 'RESUME'
ETC = 'ETC'
class UserAlert(models.Model):
email = models.EmailField()
filters = models.CharField(max_length=2083)
country = models.CharField(max_length=2)
postal_code = models.CharField(max_length=20)
def __str__(self):
return str(self.email)
@staticmethod
def create_or_update(email, filters, country, postal_code):
alert = UserAlert.objects.filter(email=email).first()
if alert is None:
alert = UserAlert()
alert.email = email
alert.filters = filters
alert.country = country
alert.postal_code = postal_code
alert.save()
class TaggedVolunteerRole(TaggedItemBase):
content_object = models.ForeignKey('VolunteerRelation', on_delete=models.CASCADE)
class VolunteerRelation(Archived):
project = models.ForeignKey(Project, related_name='volunteer_relations', on_delete=models.CASCADE)
volunteer = models.ForeignKey(Contributor, related_name='volunteer_relations', on_delete=models.CASCADE)
role = TaggableManager(blank=True, through=TaggedVolunteerRole)
role.remote_field.related_name = "+"
application_text = models.CharField(max_length=10000, blank=True)
is_approved = models.BooleanField(default=False)
is_co_owner = models.BooleanField(default=False)
is_team_leader = models.BooleanField(default=False)
projected_end_date = models.DateTimeField(auto_now=False, null=True, blank=True)
application_date = models.DateTimeField(auto_now=False, null=False, default=timezone.now)
approved_date = models.DateTimeField(auto_now=False, null=True, blank=True)
last_reminder_date = models.DateTimeField(auto_now=False, null=True, blank=True)
reminder_count = models.IntegerField(default=0)
re_enrolled_last_date = models.DateTimeField(auto_now=False, null=True, blank=True)
re_enroll_last_reminder_date = models.DateTimeField(auto_now=False, null=True, blank=True)
re_enroll_reminder_count = models.IntegerField(default=0)
def __str__(self):
return 'Project: ' + str(self.project.project_name) + ', User: ' + str(self.volunteer.email)
def to_json(self):
volunteer = self.volunteer
volunteer_json = {
'application_id': self.id,
'user': volunteer.hydrate_to_tile_json(),
'application_text': self.application_text,
'application_date': self.application_date.__str__(),
'platform_date_joined': volunteer.date_joined.__str__(),
'roleTag': Tag.hydrate_to_json(volunteer.id, self.role.all().values())[0],
'isApproved': self.is_approved,
'isCoOwner': self.is_co_owner,
'isTeamLeader': self.is_team_leader,
'isUpForRenewal': self.is_up_for_renewal(),
'projectedEndDate': self.projected_end_date.__str__()
}
return volunteer_json
def hydrate_project_volunteer_info(self):
volunteer_json = self.to_json()
project_json = self.project.hydrate_to_list_json()
return merge_dicts(project_json, volunteer_json)
def is_up_for_renewal(self, now=None):
now = now or timezone.now()
return (self.projected_end_date - now) < settings.VOLUNTEER_REMINDER_OVERALL_PERIOD
@staticmethod
def create(project, volunteer, projected_end_date, role, application_text):
relation = VolunteerRelation()
relation.project = project
relation.volunteer = volunteer
relation.projected_end_date = projected_end_date
relation.application_text = application_text
relation.is_co_owner = False
relation.save()
relation.role.add(role)
return relation
@staticmethod
def get_by_user(user):
return VolunteerRelation.objects.filter(volunteer=user.id)
@staticmethod
def get_by_project(project, active=True):
return VolunteerRelation.objects.filter(project_id=project.id, is_approved=active, deleted=not active)
class TaggedCategory(TaggedItemBase):
content_object = models.ForeignKey('Testimonial', on_delete=models.CASCADE)
class Testimonial(models.Model):
name = models.CharField(max_length=100)
avatar_url = models.CharField(max_length=2083, blank=True)
title = models.CharField(max_length=100, blank=True)
text = models.CharField(max_length=2000)
source = models.CharField(max_length=2000, blank=True)
categories = TaggableManager(blank=True, through=TaggedCategory)
categories.remote_field.related_name = 'category_testimonials'
priority = models.IntegerField(default=0)
active = models.BooleanField(default=True)
def __str__(self):
return self.name
def to_json(self):
return {
'name': self.name,
'avatar_url': self.avatar_url,
'title': self.title,
'text': self.text,
'source': self.source
} | mit | 7,606,037,183,108,077,000 | 43.057803 | 137 | 0.654436 | false |
ChawalitK/odoo | addons/website_quote/controllers/main.py | 1 | 13530 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
from openerp.tools.translate import _
from openerp.addons.website_mail.controllers.main import _message_post_helper
class sale_quote(http.Controller):
@http.route([
"/quote/<int:order_id>",
"/quote/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, pdf=None, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
now = time.strftime('%Y-%m-%d')
if token:
order = request.env['sale.order'].sudo().search([('id', '=', order_id), ('access_token', '=', token)])
# Log only once a day
if order and request.session.get('view_quote', False) != now:
request.session['view_quote'] = now
body = _('Quotation viewed by customer')
_message_post_helper(res_model='sale.order', res_id=order.id, message=body, token=token, token_field="access_token", message_type='notification')
else:
order = request.env['sale.order'].search([('id', '=', order_id)])
if not order:
return request.website.render('website.404')
dummy, action = request.env['ir.model.data'].get_object_reference('sale', 'action_quotations')
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
if pdf:
report_obj = request.registry['report']
pdf = report_obj.get_pdf(request.cr, SUPERUSER_ID, [order_id], 'website_quote.report_quote', data=None, context=dict(request.context, set_viewport_size=True))
pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]
return request.make_response(pdf, headers=pdfhttpheaders)
user = request.registry['res.users'].browse(request.cr, SUPERUSER_ID, request.uid, context=request.context)
tx_id = request.session.get('quote_%s_transaction_id' % order.id)
if not tx_id:
tx_id = request.registry['payment.transaction'].search(request.cr, SUPERUSER_ID, [('reference', '=', order.name)], context=request.context)
tx = request.registry['payment.transaction'].browse(request.cr, SUPERUSER_ID, tx_id, context=request.context) if tx_id else False
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': days,
'action': action,
'breadcrumb': user.partner_id == order.partner_id,
'tx_id': tx_id,
'tx_state': tx.state if tx else False,
'tx_post_msg': tx.acquirer_id.post_msg if tx else False,
'need_payment': order.invoice_status == 'to invoice' and (not tx or tx.state in ['draft', 'cancel', 'error']),
'token': token,
}
if order.require_payment or values['need_payment']:
payment_obj = request.registry.get('payment.acquirer')
acquirer_ids = payment_obj.search(request.cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=request.context)
values['acquirers'] = list(payment_obj.browse(request.cr, token and SUPERUSER_ID or request.uid, acquirer_ids, context=request.context))
render_ctx = dict(request.context, submit_class='btn btn-primary', submit_txt=_('Pay & Confirm'))
for acquirer in values['acquirers']:
acquirer.button = payment_obj.render(
request.cr, SUPERUSER_ID, acquirer.id,
'/',
order.amount_total,
order.pricelist_id.currency_id.id,
values={
'return_url': '/quote/%s/%s' % (order_id, token) if token else '/quote/%s' % order_id,
'type': 'form',
'alias_usage': _('If we store your payment information on our server, subscription payments will be made automatically.'),
'partner_id': order.partner_id.id,
},
context=render_ctx)
return request.website.render('website_quote.so_quotation', values)
@http.route(['/quote/accept'], type='json', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.require_payment:
return request.website.render('website.404')
if order.state != 'sent':
return False
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.action_confirm(request.cr, SUPERUSER_ID, [order_id], context=request.context)
message = _('Order signed by %s') % (signer,)
_message_post_helper(message=message, res_id=order_id, res_model='sale.order', attachments=attachments, **({'token': token, 'token_field': 'access_token'} if token else {}))
return True
@http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", methods=['POST'], website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.state != 'sent':
return werkzeug.utils.redirect("/quote/%s/%s?message=4" % (order_id, token))
request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
_message_post_helper(message=message, res_id=order_id, res_model='sale.order', **{'token': token, 'token_field': 'access_token'} if token else {})
return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token))
@http.route(['/quote/update_line'], type='json', auth="public", website=True)
def update(self, line_id, remove=False, unlink=False, order_id=None, token=None, **post):
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id))
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ('draft','sent'):
return False
line_id=int(line_id)
if unlink:
request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context)
return False
number=(remove and -1 or 1)
order_line_obj = request.registry.get('sale.order.line')
order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0]
quantity = order_line_val['product_uom_qty'] + number
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context)
return [str(quantity), str(order.amount_total)]
@http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('website_quote.so_template', values)
@http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True)
def add(self, option_id, order_id, token, **post):
vals = {}
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ['draft', 'sent']:
return request.website.render('website.http_error', {'status_code': 'Forbidden', 'status_message': _('You cannot add options to a confirmed order.')})
option_obj = request.registry.get('sale.order.option')
option = option_obj.browse(request.cr, SUPERUSER_ID, option_id)
vals = {
'price_unit': option.price_unit,
'website_description': option.website_description,
'name': option.name,
'order_id': order.id,
'product_id': option.product_id.id,
'layout_category_id': option.layout_category_id.id,
'product_uom_qty': option.quantity,
'product_uom': option.uom_id.id,
'discount': option.discount,
}
line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context)
request.registry.get('sale.order.line')._compute_tax_id(request.cr, SUPERUSER_ID, [line], context=request.context)
option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context)
return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
# note dbo: website_sale code
@http.route(['/quote/<int:order_id>/transaction/<int:acquirer_id>'], type='json', auth="public", website=True)
def payment_transaction(self, acquirer_id, order_id):
return self.payment_transaction_token(acquirer_id, order_id, None)
@http.route(['/quote/<int:order_id>/transaction/<int:acquirer_id>/<token>'], type='json', auth="public", website=True)
def payment_transaction_token(self, acquirer_id, order_id, token):
""" Json method that creates a payment.transaction, used to create a
transaction when the user clicks on 'pay now' button. After having
created the transaction, the event continues and the user is redirected
to the acquirer website.
:param int acquirer_id: id of a payment.acquirer record. If not set the
user is redirected to the checkout page
"""
cr, uid, context = request.cr, request.uid, request.context
payment_obj = request.registry.get('payment.acquirer')
transaction_obj = request.registry.get('payment.transaction')
order = request.registry.get('sale.order').browse(cr, SUPERUSER_ID, order_id, context=context)
if not order or not order.order_line or acquirer_id is None:
return request.redirect("/quote/" + str(order_id))
# find an already existing transaction
tx_id = transaction_obj.search(cr, SUPERUSER_ID, [('reference', '=', order.name)], context=context)
tx = transaction_obj.browse(cr, SUPERUSER_ID, tx_id, context=context)
if tx:
if tx.sale_order_id.id != order.id or tx.state in ['error', 'cancel'] or tx.acquirer_id.id != acquirer_id:
tx = False
tx_id = False
elif tx.state == 'draft':
tx.write({
'amount': order.amount_total,
})
if not tx:
tx_id = transaction_obj.create(cr, SUPERUSER_ID, {
'acquirer_id': acquirer_id,
'type': order._get_payment_type(),
'amount': order.amount_total,
'currency_id': order.pricelist_id.currency_id.id,
'partner_id': order.partner_id.id,
'reference': transaction_obj.get_next_reference(cr, uid, order.name, context=context),
'sale_order_id': order.id,
'callback_eval': "self.env['sale.order']._confirm_online_quote(self.sale_order_id.id, self)"
}, context=context)
request.session['quote_%s_transaction_id' % order.id] = tx_id
tx = transaction_obj.browse(cr, SUPERUSER_ID, tx_id, context=context)
# update quotation
request.registry['sale.order'].write(
cr, SUPERUSER_ID, [order.id], {
'payment_acquirer_id': acquirer_id,
'payment_tx_id': tx_id
}, context=context)
# confirm the quotation
if tx.acquirer_id.auto_confirm == 'at_pay_now':
request.registry['sale.order'].action_confirm(cr, SUPERUSER_ID, [order.id], context=dict(request.context, send_email=True))
return payment_obj.render(
request.cr, SUPERUSER_ID, tx.acquirer_id.id,
tx.reference,
order.amount_total,
order.pricelist_id.currency_id.id,
values={
'return_url': '/quote/%s/%s' % (order_id, token) if token else '/quote/%s' % order_id,
'type': order._get_payment_type(),
'alias_usage': _('If we store your payment information on our server, subscription payments will be made automatically.'),
'partner_id': order.partner_shipping_id.id or order.partner_invoice_id.id,
'billing_partner_id': order.partner_invoice_id.id,
},
context=dict(context, submit_class='btn btn-primary', submit_txt=_('Pay & Confirm')))
| gpl-3.0 | -222,172,791,168,102,180 | 56.088608 | 181 | 0.607317 | false |
PDX-Flamingo/codonpdx-python | codonpdx/calc.py | 1 | 5043 | #!/usr/bin/env python
from __future__ import division
from collections import defaultdict
from db import dbManager
# compare a virus to organisms in a sequence database
# db: the database manager used to get the data from
# virus_name: the accession and version number of the virus
# virus_db: the location of the input virus's information (probably 'input')
# seq_db: the name of the sequence database table
# codon_table_name: the name of the codon table
def comparison(db, virus_name, virus_db, seq_db, codon_table_name):
virus = db.getOrganism(virus_name, virus_db)
codon_table = db.getCodonTable(codon_table_name)
scores = defaultdict(int)
shuffle_scores = defaultdict(int)
virus_ratio = ratio(virus, codon_table)
virus_shuffle_ratio = ratio_shuffle(virus, codon_table)
for organism in db.getOrganisms(seq_db, None):
organism_ratio = ratio(organism, codon_table)
# calculate the score for the virus and this organism
for k in virus_ratio:
scores[organism['id']] += abs(virus_ratio[k] - organism_ratio[k])
for k in virus_shuffle_ratio:
shuffle_scores[organism['id']] += \
abs(virus_shuffle_ratio[k] - organism_ratio[k])
return [scores, shuffle_scores]
# same as above but takes a list of accession ids to use from the table
def comparison_list(db, virus_name, virus_db, ids, seq_db, codon_table_name):
virus = db.getOrganism(virus_name, virus_db)
codon_table = db.getCodonTable(codon_table_name)
scores = defaultdict(int)
shuffle_scores = defaultdict(int)
virus_ratio = ratio(virus, codon_table)
virus_shuffle_ratio = ratio_shuffle(virus, codon_table)
# this portion is the only changed part; get subset instead of everything
# maybe consider passing None as the id list and getting everything in
# case so we don't have to have a whole separate method for this option
for organism in db.getOrganisms(seq_db, ids):
organism_ratio = ratio(organism, codon_table)
# calculate the score for the virus and this organism
for k in virus_ratio:
scores[organism['id']] += abs(virus_ratio[k] - organism_ratio[k])
for k in virus_shuffle_ratio:
shuffle_scores[organism['id']] += \
abs(virus_shuffle_ratio[k] - organism_ratio[k])
return [scores, shuffle_scores]
# calculate the ratios for a given organism using a certain codon table
# organism: the organism; needs be a dict that can map codon triplets to counts
# codon_table: the codon table acquired from a dbManager
def ratio(organism, codon_table):
ratios = {}
for acid, codons in codon_table:
acid_total = 0
# calculate the total number of codons for the acid
for codon in codons.split(" "):
acid_total += int(organism[codon.lower()])
# calculate the number of each individual codon
for codon in codons.split(" "):
# normal sequence codons
codon_total = int(organism[codon.lower()])
if codon_total != 0:
ratio_calc = codon_total / acid_total
else:
ratio_calc = 0
# ratio for this codon
ratios[codon] = ratio_calc
return ratios
# as shuffle(), but for organisms with shuffle fields
def ratio_shuffle(organism, codon_table):
ratios = {}
for acid, codons in codon_table:
acid_total = 0
# calculate the total number of codons for the acid
for codon in codons.split(" "):
acid_total += int(organism["shuffle_" + codon.lower()])
# calculate the number of each individual codon
for codon in codons.split(" "):
# normal sequence codons
codon_total = int(organism["shuffle_" + codon.lower()])
if codon_total != 0:
ratio_calc = codon_total / acid_total
else:
ratio_calc = 0
# ratio for this codon
ratios[codon] = ratio_calc
return ratios
def calc(args):
with dbManager('config/db.cfg') as db:
# do custom list comparison if we have an id list
if hasattr(args, 'ids') and args.ids:
scores_calc = comparison_list(db, args.job, 'input',
args.ids, args.dbname, 'standard')
else:
scores_calc = comparison(db, args.job, 'input',
args.dbname, 'standard')
# output if requested
if args.output:
print "Scores for " + args.virus + " versus " + args.dbname
for k in sorted(scores_calc[0], key=scores_calc[0].get):
print scores_calc[0][k], k
print "Shuffle scores for " + args.virus + " versus " + args.dbname
for k in sorted(scores_calc[1], key=scores_calc[1].get):
print scores_calc[1][k], k
# otherwise put in the results table
else:
db.storeResults(args.job, scores_calc[0], scores_calc[1])
| apache-2.0 | -8,722,274,283,386,708,000 | 42.102564 | 79 | 0.622447 | false |
AltarBeastiful/rateItSeven | tests/senscritique/test_sc_list.py | 1 | 1608 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === This file is part of RateItSeven ===
#
# Copyright 2015, Rémi Benoit <[email protected]>
# Copyright 2015, Paolo de Vathaire <[email protected]>
#
# RateItSeven is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RateItSeven is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RateItSeven. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
from rateItSeven.senscritique.domain.sc_list import ScList, ListType
class TestScList(unittest.TestCase):
def test_compute_list_id(self):
sclist = ScList(type=ListType.MOVIE, name="A name", path="liste/a_name/1624343")
self.assertEqual("1624343", sclist.compute_list_id())
def test_compute_list_id_slash_start(self):
sclist = ScList(type=ListType.MOVIE, name="A name", path="/liste/a_name/1624343")
self.assertEqual("1624343", sclist.compute_list_id())
def test_should_construct_page_url(self):
sclist = ScList(type=ListType.MOVIE, name="A name", path="/liste/a_name/1622651")
self.assertEqual("/sc2/liste/1622651/page-1.ajax", sclist.page_url(index=1))
| gpl-3.0 | 8,781,280,135,167,318,000 | 39.175 | 89 | 0.710641 | false |
WesleyPeng/uiXautomation | src/main/python/taf/foundation/plugins/web/selenium/controls/combobox.py | 1 | 3859 | # Copyright (c) 2017-2018 {Flair Inc.} WESLEY PENG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from taf.foundation.api.ui.controls import ComboBox as IComboBox
from taf.foundation.plugins.web.selenium.controls.listitem import ListItem
from taf.foundation.plugins.web.selenium.support.elementfinder import \
ElementFinder
from taf.foundation.plugins.web.selenium.support.locator import Locator
from taf.foundation.plugins.web.selenium.webelement import WebElement
class ComboBox(WebElement, IComboBox):
def __init__(self, *elements, **conditions):
conditions.setdefault('tag', 'select')
_options_kwarg = 'option'
_multi_selection_kwarg = 'multiple'
self._options_tag = conditions.pop(
_options_kwarg
) if _options_kwarg in conditions else _options_kwarg
self._multi_selection_attr = conditions.pop(
_multi_selection_kwarg
) if _multi_selection_kwarg in conditions else _multi_selection_kwarg
WebElement.__init__(
self, *elements, **conditions
)
def set(self, value):
if isinstance(value, (list, tuple)):
if not self.can_select_multiple:
raise RuntimeError(
'Multi-selection is not supported'
)
else:
value = [value]
for val in value:
if str(val).isdigit():
list(self.options)[int(val)].select()
continue
else:
for opt in self.options:
if (
val == opt.current.get_attribute('value')
) or (val == opt.object.text):
opt.select()
break
else:
raise ValueError(
'Could not locate element with value: {}'.format(
val
)
)
@property
def value(self):
if self.exists():
return ';'.join(
opt.object.text
for opt in self.options
if opt.object.is_selected()
)
return r''
@property
def options(self):
if not self._children:
if self.exists():
self._children = [
ListItem(element=element, parent=self)
for element in ElementFinder(
self.object
).find_elements(
Locator.XPATH,
'.//{}'.format(self._options_tag)
) if element # and element.text
]
return (child for child in self._children)
@property
def is_read_only(self):
assert self.exists(), 'N/A - invisible element'
return not self.object.is_enabled()
@property
def can_select_multiple(self):
return self._get_attribute(
self._multi_selection_attr
)
@property
def is_selection_required(self):
return self._get_attribute('required')
def _get_attribute(self, name):
assert not self.is_read_only, \
'N/A - disabled element'
attr_value = self.object.get_attribute(
name
)
return attr_value and attr_value != 'false'
| apache-2.0 | -4,045,562,744,497,723,400 | 30.892562 | 77 | 0.558176 | false |
blckshrk/Weboob | setup.py | 1 | 6697 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2013 Christophe Benz, Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from setuptools import find_packages, setup
import glob
import os
import subprocess
import sys
def find_executable(name, names):
envname = '%s_EXECUTABLE' % name.upper()
if os.getenv(envname):
return os.getenv(envname)
paths = os.getenv('PATH', os.defpath).split(os.pathsep)
exts = os.getenv('PATHEXT', os.pathsep).split(os.pathsep)
for name in names:
for path in paths:
for ext in exts:
fpath = os.path.join(path, name) + ext
if os.path.exists(fpath) and os.access(fpath, os.X_OK):
return fpath
print >>sys.stderr, 'Could not find executable: %s' % name
def build_qt():
print >>sys.stderr, 'Building Qt applications...'
make = find_executable('make', ('gmake', 'make'))
pyuic4 = find_executable('pyuic4', ('python2-pyuic4', 'pyuic4-python2.7', 'pyuic4-python2.6', 'pyuic4'))
if not pyuic4 or not make:
print >>sys.stderr, 'Install missing component(s) (see above) or disable Qt applications (with --no-qt).'
sys.exit(1)
subprocess.check_call(
[make,
'-f', 'build.mk',
'-s', '-j2',
'all',
'PYUIC=%s%s' % (pyuic4, ' WIN32=1' if sys.platform == 'win32' else '')])
def install_weboob():
scripts = set(os.listdir('scripts'))
packages = set(find_packages(exclude=['modules']))
hildon_scripts = set(('masstransit',))
qt_scripts = set(('qboobmsg', 'qhavedate', 'qvideoob', 'weboob-config-qt', 'qwebcontentedit', 'qflatboob', 'qcineoob', 'qcookboob', 'qhandjoob'))
if not options.hildon:
scripts = scripts - hildon_scripts
if options.qt:
build_qt()
else:
scripts = scripts - qt_scripts
hildon_packages = set((
'weboob.applications.masstransit',
))
qt_packages = set((
'weboob.applications.qboobmsg',
'weboob.applications.qboobmsg.ui',
'weboob.applications.qcineoob',
'weboob.applications.qcineoob.ui',
'weboob.applications.qcookboob',
'weboob.applications.qcookboob.ui',
'weboob.applications.qhandjoob',
'weboob.applications.qhandjoob.ui',
'weboob.applications.qhavedate',
'weboob.applications.qhavedate.ui',
'weboob.applications.qvideoob',
'weboob.applications.qvideoob.ui',
'weboob.applications.qweboobcfg',
'weboob.applications.qweboobcfg.ui',
'weboob.applications.qwebcontentedit',
'weboob.applications.qwebcontentedit.ui'
'weboob.applications.qflatboob',
'weboob.applications.qflatboob.ui'
))
if not options.hildon:
packages = packages - hildon_packages
if not options.qt:
packages = packages - qt_packages
data_files = [
('share/man/man1', glob.glob('man/*')),
]
if options.xdg:
data_files.extend([
('share/applications', glob.glob('desktop/*')),
('share/icons/hicolor/64x64/apps', glob.glob('icons/*')),
])
# Do not put PyQt, it does not work properly.
requirements = [
'lxml',
'feedparser',
'mechanize',
'gdata',
'python-dateutil',
'PyYAML',
]
try:
import Image
except ImportError:
requirements.append('Pillow')
else:
if 'PILcompat' not in Image.__file__:
requirements.append('PIL')
else:
requirements.append('Pillow')
if sys.version_info[0] > 2:
print >>sys.stderr, 'Python 3 is not supported.'
sys.exit(1)
if sys.version_info[1] < 6: # older than 2.6
print >>sys.stderr, 'Python older than 2.6 is not supported.'
sys.exit(1)
if not options.deps:
requirements = []
setup(
name='weboob',
version='0.h',
description='Weboob, Web Outside Of Browsers',
long_description=open('README').read(),
author='Romain Bignon',
author_email='[email protected]',
maintainer='Romain Bignon',
maintainer_email='[email protected]',
url='http://weboob.org/',
license='GNU AGPL 3',
classifiers=[
'Environment :: Console',
'Environment :: X11 Applications :: Qt',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Topic :: Communications :: Email',
'Topic :: Internet :: WWW/HTTP',
],
packages=packages,
scripts=[os.path.join('scripts', script) for script in scripts],
data_files=data_files,
install_requires=requirements,
)
class Options(object):
hildon = False
qt = False
xdg = True
deps = True
options = Options()
args = list(sys.argv)
if '--hildon' in args and '--no-hildon' in args:
print >>sys.stderr, '--hildon and --no-hildon options are incompatible'
sys.exit(1)
if '--qt' in args and '--no-qt' in args:
print >>sys.stderr, '--qt and --no-qt options are incompatible'
sys.exit(1)
if '--xdg' in args and '--no-xdg' in args:
print >>sys.stderr, '--xdg and --no-xdg options are incompatible'
sys.exit(1)
if '--hildon' in args or os.environ.get('HILDON') == 'true':
options.hildon = True
if '--hildon' in args:
args.remove('--hildon')
elif '--no-hildon' in args:
options.hildon = False
args.remove('--no-hildon')
if '--qt' in args:
options.qt = True
args.remove('--qt')
elif '--no-qt' in args:
options.qt = False
args.remove('--no-qt')
if '--xdg' in args:
options.xdg = True
args.remove('--xdg')
elif '--no-xdg' in args:
options.xdg = False
args.remove('--no-xdg')
if '--nodeps' in args:
options.deps = False
args.remove('--nodeps')
sys.argv = args
install_weboob()
| agpl-3.0 | -4,294,198,527,596,077,000 | 29.720183 | 149 | 0.60669 | false |
cornell-brg/pymtl | pymtl/tools/integration/systemc_tests/multiple_models/RegAndIncr_test.py | 1 | 1988 | #=======================================================================
# RegAndIncr_test.py
#=======================================================================
import random
import pytest
from pymtl import *
from RegAndIncrSC import RegAndIncrSC
simple_test_vectors = [
( 4, 5),
( 6, 7),
( 2, 3),
(15, 16),
( 8, 9),
( 0, 1),
(10, 11),
]
#-----------------------------------------------------------------------
# test_simple
#-----------------------------------------------------------------------
def test_simple():
# instantiate the model and elaborate it
model = RegAndIncrSC()
model.elaborate()
# create the simulator
sim = SimulationTool( model )
sim.reset() # remember to reset!
# verify the model
print
for input_vector, expected_out in simple_test_vectors:
sim.print_line_trace()
model.in_.value = input_vector
sim.cycle()
assert model.out == expected_out
sim.print_line_trace()
model.destroy()
#-----------------------------------------------------------------------
# gen_test_vectors
#-----------------------------------------------------------------------
def gen_test_vectors( nbits, size=10 ):
vectors = []
for i in range( size ):
input_value = Bits( nbits, random.randrange( 2**nbits ) )
vectors.append( (input_value, input_value + 1) )
return vectors
#-----------------------------------------------------------------------
# test_random
#-----------------------------------------------------------------------
def test_random():
# elaborate model
model = RegAndIncrSC()
model.elaborate()
# create the simulator
sim = SimulationTool( model )
sim.reset() # remember to reset!
# verify the model
print
for input_vector, expected_out in gen_test_vectors( 32 ):
sim.print_line_trace()
model.in_.value = input_vector
sim.cycle()
assert model.out == expected_out
sim.print_line_trace()
model.destroy()
| bsd-3-clause | -3,226,548,373,729,259,500 | 19.708333 | 72 | 0.445171 | false |
edublancas/sklearn-evaluation | tests/nb/conftest.py | 1 | 1692 | import jupytext
import nbformat
import papermill as pm
import pytest
def save_and_execute_notebook(nb_str, path):
nb = jupytext.reads(nb_str, fmt='py:light')
nb.metadata['kernelspec'] = {
'name': 'python3',
'language': 'python',
'display_name': 'Python 3'
}
nbformat.write(nb, path)
pm.execute_notebook(str(path), str(path))
return str(path)
@pytest.fixture
def nb_literals():
content = """
# + tags=["int"]
int_ = 1
print(int_)
# + tags=["list"]
list_ = [1, 2, 3]
print(list_)
# + tags=["dict"]
dict_ = {'x': 1, 'y': 2}
dict_
"""
save_and_execute_notebook(content, 'nb_literals.ipynb')
@pytest.fixture
def nb_other_literals():
content = """
# + tags=["int"]
int_ = 2
print(int_)
# + tags=["list"]
list_ = [2, 3, 4]
print(list_)
# + tags=["dict"]
dict_ = {'x': 2, 'y': 3}
dict_
"""
save_and_execute_notebook(content, 'nb_other_literals.ipynb')
@pytest.fixture
def nb_plot():
content = """
import matplotlib.pyplot as plt
# + tags=["a"]
plt.plot([1, 2, 3], [1, 2, 3])
# + tags=["b"]
42
"""
save_and_execute_notebook(content, 'nb_plot.ipynb')
@pytest.fixture
def nb_table():
content = """
import pandas as pd
# + tags=["a"]
pd.DataFrame({'a': [1,2 ,3]})
# + tags=["b"]
42
"""
save_and_execute_notebook(content, 'nb_table.ipynb')
@pytest.fixture
def nb_no_output():
content = """
import pandas as pd
# + tags=["a"]
x = 1
"""
save_and_execute_notebook(content, 'nb_no_output.ipynb')
@pytest.fixture
def nb_invalid_output():
content = """
import numpy as np
# + tags=["numpy_array"]
np.array([1, 2, 3])
"""
return save_and_execute_notebook(content, 'nb_invalid_output.ipynb')
| mit | 4,126,742,188,571,277,000 | 15.427184 | 72 | 0.594563 | false |
kelle/astropy | astropy/coordinates/builtin_frames/fk5.py | 1 | 3033 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ..representation import SphericalRepresentation
from ..baseframe import (BaseCoordinateFrame, frame_transform_graph,
RepresentationMapping)
from ..frame_attributes import TimeFrameAttribute
from ..transformations import DynamicMatrixTransform
from .. import earth_orientation as earth
from .utils import EQUINOX_J2000
class FK5(BaseCoordinateFrame):
"""
A coordinate or frame in the FK5 system.
Note that this is a barycentric version of FK5 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
ra : `Angle`, optional, must be keyword
The RA for this object (``dec`` must also be given and ``representation``
must be None).
dec : `Angle`, optional, must be keyword
The Declination for this object (``ra`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
(``representation`` must be None).
equinox : `~astropy.time.Time`, optional, must be keyword
The equinox of this frame.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
"""
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'ra'),
RepresentationMapping('lat', 'dec')]
}
frame_specific_representation_info['unitspherical'] = \
frame_specific_representation_info['spherical']
default_representation = SphericalRepresentation
equinox = TimeFrameAttribute(default=EQUINOX_J2000)
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK5 based on Capitaine et
al. 2003/IAU2006. Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth.precession_matrix_Capitaine(oldequinox, newequinox)
# This is the "self-transform". Defined at module level because the decorator
# needs a reference to the FK5 class
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5)
def fk5_to_fk5(fk5coord1, fk5frame2):
return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
| bsd-3-clause | 4,531,921,251,039,461,400 | 38.38961 | 83 | 0.672931 | false |
bgutter/sylvia | sylvia/TestSylvia.py | 1 | 4200 | #
# TestSylvia.py
#
# Unit tests for the Sylvia class
#
import unittest
from SylviaApiWrapper import Sylvia
class TestSylvia( unittest.TestCase ):
"""
Unit testing for the Sylvia class.
"""
def verifyPronunciation( self, pronunciation ):
"""
Complain if this isn't a valid pronunciation.
"""
self.assertIsInstance( pronunciation, list )
self.assertTrue( len( pronunciation ) > 0 )
for phoneme in pronunciation:
self.assertIsInstance( phoneme, basestring )
def setUp( self ):
"""
Create the Sylvia object.
"""
self.sylvia = Sylvia()
def test_getPronunciationKnown( self ):
"""
Test Sylvia.getPronunciation() for words in the dictionary.
"""
known_words = {
"cats": [ [ "K", "AE", "T", "S" ] ],
"dogs": [ [ "D", "AA", "G", "Z", ], [ "D", "AO", "G", "Z", ] ],
"rabbits": [ [ "R", "AE", "B", "AH", "T", "S" ] ],
"she's": [ [ "SH", "IY", "Z" ] ],
"supercalifragilisticexpialidocious": [ [ "S", "UW", "P", "ER", "K", "AE", "L", "AH", "F", "R", "AE", "JH", "AH", "L", "IH", "S", "T", "IH", "K", "EH", "K", "S", "P", "IY", "AE", "L", "AH", "D", "OW", "SH", "AH", "S" ] ],
}
for word, expectedValues in known_words.items():
#
# Test simple case.
#
simplePronunciation = self.sylvia.getPronunciation( word )
self.verifyPronunciation( simplePronunciation )
self.assertIn( simplePronunciation, expectedValues )
#
# Test findAll case
#
retd = self.sylvia.getPronunciation( word, findAll=True )
self.assertIsInstance( retd, tuple )
self.assertTrue( len( retd ) == 2 )
lookups, pronounced = retd
self.verifyPronunciation( pronounced )
self.assertIsInstance( lookups, list )
self.assertEqual( len( lookups ), len( expectedValues ) )
for p in lookups:
self.verifyPronunciation( p )
self.assertIn( p, expectedValues )
def test_getPronunciationUnknown( self ):
"""
Test Sylvia.getPronunciation() for words not in the dictionary.
"""
unknown_words = [ "rafloy", "she'sd", "fihlbart" ]
for word in unknown_words:
#
# Test simple case
#
simplePronunciation = self.sylvia.getPronunciation( word )
self.verifyPronunciation( simplePronunciation )
#
# Test findAll case
#
retd = self.sylvia.getPronunciation( word, findAll=True )
self.assertIsInstance( retd, tuple )
self.assertTrue( len( retd ) == 2 )
lookups, pronounced = retd
self.verifyPronunciation( pronounced )
self.assertIsInstance( lookups, list )
self.assertEqual( len( lookups ), 0 )
for p in lookups:
self.verifyPronunciation( p )
def test_getPhoneticRegex_word( self ):
"""
Test Sylvia.getPhoneticRegex() with words as input
"""
words = [ "cat", "Saturday", "she'sd" ]
for word in words:
for pattern in self.sylvia.phoneticPatterns:
regex = self.sylvia.generatePhoneticRegex( word, pattern )
self.assertIsInstance( regex, list )
# TODO check contents once we have linting
def test_getPhoneticRegex_pronunciation( self ):
"""
Test Sylvia.getPhoneticRegex() with pronunciations as input
"""
words = [ [ "K", "AE", "T" ], [ "SH", "EH", "S", "D" ] ]
for word in words:
for pattern in self.sylvia.phoneticPatterns:
regex = self.sylvia.generatePhoneticRegex( word, pattern )
self.assertIsInstance( regex, basestring )
# TODO check contents once we have linting
if __name__ == '__main__':
unittest.main()
| mit | 6,785,018,480,052,477,000 | 35.521739 | 233 | 0.514762 | false |
francisco-betancourt/ctrlpyme4 | models/settings.py | 1 | 4402 | # settings list
from datetime import timedelta
from constants import FLOW_BASIC, FLOW_MULTIROLE
COMPANY_NAME = 'CtrlPyME'
COMPANY_SLOGAN = 'Press Ctrl + PyME to begin.'
COMPANY_LOGO_URL = URL('static', 'images/ctrlPyME_logo.png')
# the workflow determines how the employees will interact with the application
COMPANY_WORKFLOW = FLOW_BASIC
CASH_OUT_INTERVAL = timedelta(days=1)
EXTRA_FIELD_1_NAME = None
EXTRA_FIELD_2_NAME = None
EXTRA_FIELD_3_NAME = None
# if set true, only those users who have been created by the admin will have access to the online store.
USE_CLIENTS_WHITELIST = True
TICKET_FOOTER = T('This will be a ticket footer... soon')
# PAPER metrics in centimeters
PAPER_WIDTH = 21.59
PAPER_HEIGHT = 27.94
PAPER_MARGIN_TOP = 1
PAPER_MARGIN_RIGHT = 1
PAPER_MARGIN_BOTTOM = 1
PAPER_MARGIN_LEFT = 1
# labels, metrics in centimeters
LABEL_SPACE_X = .5
LABEL_SPACE_Y = .5
LABEL_COLS = 3
LABEL_ROWS = 8
LABEL_SHOW_ITEM_NAME = True
LABEL_SHOW_PRICE = True
PRIMARY_COLOR = '#FFFFFF'
PRIMARY_COLOR_TEXT = '#333333'
ACCENT_COLOR = '#3170F3'
ACCENT_COLOR_TEXT = '#FFFFFF'
BASE_COLOR = '#F3F3F3'
BASE_COLOR_TEXT = '#444'
USE_MATERIAL_ICONS = True
ENABLE_STRIPE = False
TOP_CATEGORIES_STRING = ''
# use it to show the credit notes in the sale ticket, this is useful for people that want to keep the original ticket and print sale ticket with credit note data
MERGE_CREDIT_NOTES_IN_SALE = False
main_settings = db(db.settings.id_store == None).select().first()
if main_settings:
if main_settings.company_name:
COMPANY_NAME = main_settings.company_name
if main_settings.company_slogan:
COMPANY_SLOGAN = main_settings.company_slogan
if main_settings.company_logo:
COMPANY_LOGO_URL = URL('static', 'uploads/'+main_settings.company_logo)
# if main_settings.workflow:
# COMPANY_WORKFLOW = main_settings.workflow
if main_settings.extra_field_1:
EXTRA_FIELD_1_NAME = main_settings.extra_field_1
if main_settings.extra_field_2:
EXTRA_FIELD_2_NAME = main_settings.extra_field_2
if main_settings.extra_field_3:
EXTRA_FIELD_3_NAME = main_settings.extra_field_3
if main_settings.clients_whitelist:
USE_CLIENTS_WHITELIST = main_settings.clients_whitelist
if main_settings.ticket_footer:
TICKET_FOOTER = main_settings.ticket_footer
if main_settings.primary_color:
PRIMARY_COLOR = main_settings.primary_color
if main_settings.primary_color_text:
PRIMARY_COLOR_TEXT = main_settings.primary_color_text
if main_settings.accent_color:
ACCENT_COLOR = main_settings.accent_color
if main_settings.accent_color_text:
ACCENT_COLOR_TEXT = main_settings.accent_color_text
if main_settings.base_color:
BASE_COLOR = main_settings.base_color
if main_settings.base_color_text:
BASE_COLOR_TEXT = main_settings.base_color_text
if main_settings.top_categories_string:
TOP_CATEGORIES_STRING = main_settings.top_categories_string
if main_settings.cash_out_interval_days:
CASH_OUT_INTERVAL = timedelta(days=main_settings.cash_out_interval_days)
if main_settings.merge_credit_notes_in_sale:
MERGE_CREDIT_NOTES_IN_SALE = True
LABEL_WIDTH = (PAPER_WIDTH - (PAPER_MARGIN_LEFT + PAPER_MARGIN_RIGHT + LABEL_SPACE_X * (LABEL_COLS - 1))) / LABEL_COLS
LABEL_HEIGHT = (PAPER_HEIGHT - (PAPER_MARGIN_TOP + PAPER_MARGIN_BOTTOM + LABEL_SPACE_Y * (LABEL_ROWS - 1))) / LABEL_ROWS
# disable registration if the the store only allows whitelisted clients
if USE_CLIENTS_WHITELIST:
auth.settings.actions_disabled = ['register']
BASE_BRANDED_EMAIL = """
<table>
<tbody>
<tr> <td><img src="%s" alt=""></td> <td colspan=2><h2>%s</h2></td> </tr>
{content}
<tr><td colspan=3>LEGAL thing</td></tr>
</tbody>
</table>
""" % (URL('static', 'uploads/' + COMPANY_LOGO_URL, host=True), COMPANY_NAME)
ORDER_EMAIL_CONTENT = '''
<tr> <td colspan=3><h3>Order {code}</h3></td> </tr>
<tr> <td colspan=3><h3>Thank you {user_name}</h3></td> </tr>
<tr><td colspan=3>{order_concept} </td></tr>
<tr></tr>
<tr><td colspan=3>'''+ T('Details') +'''</td></tr>
{items}
<tr> <td></td> <td>'''+ T('Total') +'''</td> <td>$ {total}</td> </tr>
<tr><td colspan=3>'''+ T('You can check your orders in') +'<a href="'+ URL('user', 'client_profile') +'"></td></tr>'
| gpl-3.0 | -7,520,343,691,024,349,000 | 31.367647 | 161 | 0.677192 | false |
Forritarar-FS/Kastali | pythonHus/room22.py | 1 | 27175 | import os
from . import room
def do():
got_thing = False
x = 30
while x < 40:
path = "C:\Python"+ str(x) +"\Lib\site-packages\easygui.py" #credit Hlynur
path2 = "C:\Python"+ str(x) +"\Lib\easygui.py"
cool = os.path.exists(path)
cool2 = os.path.exists(path2)
print(path)
if cool == False and cool2 == False:
print (x)
else:
got_thing = True
x += 1
if got_thing == False:
easy = input("Are you sure you have Easygui installed? Y/N").lower()
if easy == "y" or easy == "yes":
pass
else:
print("You should install it before entering the room")
leaveWOeg()
def leaveWOeg():
directionQ = input("Where do you want to go? (west, north, South, east)").lower()
if directionQ == "w" or directionQ == "west":
fyrirUtan.go("w") #credit Hlynur
elif directionQ == "north" or directionQ == "n":
fyrirUtan.go("n")
elif directionQ == "south" or directionQ == "s":
fyrirUtan.go("s")
elif directionQ == "east" or directionQ == "e":
fyrirUtan.go("e")
else:
leaveWOeg()
do()
import easygui as eg
import os
import time
from . import Basement
from random import randint
Monster_Alive = True
Alive = True
Damage = 0
Enemy_damage = 0
dead = False
Darkness = 0
steps = 0
DownChoice = ["Pick up knife","Open door", "Go towards the hole","Go back"]
fyrirUtan = room.grunnur(22)
fyrirUtan.info = "This looks fancy. I guess."
def dodo():
eg.msgbox("Warning this room is incomplete! Check back soon for updates and fixes!")
if dead == False:
print("its true")
elif dead == True:
eg.msgbox("You feel darkness. You feel as you should not enter")
print("Its false")
else:
pass
enterQ = eg.ynbox("Do you want to enter?")
if enterQ == 1:
eg.msgbox("You continue on to the room.", ok_button="Continue", title=fyrirUtan.info)
eg.msgbox("You open a large door.", ok_button="Continue", title=fyrirUtan.info)
eg.msgbox("(Door opening sounds)", ok_button="Continue", title=fyrirUtan.info)
mainRoom()
else:
eg.msgbox("Fine Then", ok_button="Continue", title=fyrirUtan.info)
leave()
def mainRoom():
updownmiddle = ["Down","Front","Up"]
way = eg.buttonbox("You see 3 doors. One leading UP, one leading DOWN to the basement and one to the FRONT", choices=updownmiddle, title=fyrirUtan.info)
if way == "Down":
Down()
if way == "Front":
Middle()
if way == "Up":
Up()
def dead():
dead = True
def leave():
direction = ["West", "North", "South", "East"]
directionQ = eg.buttonbox("Where do you want to go?", choices=direction)
if directionQ == "West":
fyrirUtan.go("w")
elif directionQ == "North":
fyrirUtan.go("n")
elif directionQ == "South":
fyrirUtan.go("s")
else:
fyrirUtan.go("e")
def Down():
def downchoiceFunc():
if "torch" in fyrirUtan.items:
knifedoorhole = eg.buttonbox("You see a Knife, a door and a hole in the ground.", choices=DownChoice, title=fyrirUtan.info)
if knifedoorhole == "Pick up knife":
eg.msgbox("You put the knife into your pocket", title=fyrirUtan.info)
fyrirUtan.items.append("knife")
del DownChoice[0]
downchoiceFunc()
elif knifedoorhole == "Open door":
Basement.BDoor(22)
elif knifedoorhole == "Go towards the hole":
eg.msgbox("You walk to the hole and look down it.", title=fyrirUtan.info)
time.sleep(0.5)
eg.msgbox("You see light at the bottom. Looks safe enough to jump.", title=fyrirUtan.info)
if eg.ynbox("Jump in the hole?", title=fyrirUtan.info) == 1:
Basement.fall()
else:
eg.msgbox("You decide not to jump", title=fyrirUtan.info)
downchoiceFunc()
elif knifedoorhole == "Go back":
eg.msgbox("You whent back up.", title=fyrirUtan.info)
mainRoom()
else:
eg.msgbox("It sure is dar...", title=fyrirUtan.info)
eg.msgbox("SHIT!", title=fyrirUtan.info)
eg.msgbox("You fell into a hole.", title=fyrirUtan.info)
time.sleep(0.75)
Basement.fall()
eg.msgbox("You walk down the stairs and open the door.", title=fyrirUtan.info)
eg.msgbox("You walk into a dark room. There is a lit torch on the wall.", title=fyrirUtan.info)
print (fyrirUtan.items)
if "torch" not in fyrirUtan.items:
if eg.ynbox("Do you want to take the torch?", title=fyrirUtan.info) == 1:
eg.msgbox("You take it up", title=fyrirUtan.info)
fyrirUtan.items.append("torch")
if eg.ynbox("Do you want to go deeper?", title=fyrirUtan.info) == 1:
eg.msgbox("You go in deeper", title=fyrirUtan.info)
downchoiceFunc()
else:
eg.msgbox("You go back up.", title=fyrirUtan.info)
mainRoom()
else:
eg.msgbox("It sure is dark in here.", title=fyrirUtan.info)
else:
pass
def Middle():
def Attack():
global Monster_Alive
global Alive
global Damage
global Enemy_damage
while Damage < 5 and Enemy_damage < 7:
def counter_Attack():
global Damage
global Enemy_damage
counter_attack_Direction = randint(1,3)
CA_Direction = ("Attack Left!", "Attack Middle!", "Attack Right!")
if counter_attack_Direction == 1:
vulnerable = "Left"
elif counter_attack_Direction == 2:
vulnerable = "Middle"
else:
vulnerable = "Right"
start_time = time.time()
Counter_Attack = eg.buttonbox("The monster is vulnerable to attack from the "+ vulnerable +"!", choices=CA_Direction, title=fyrirUtan.info)
total_time =time.time() - start_time
print (total_time)
print (vulnerable)
print (Counter_Attack)
if Counter_Attack == "Attack Left!" and vulnerable == "Left":
Enemy_Hit()
elif Counter_Attack == "Attack Middle!" and vulnerable == "Middle":
Enemy_Hit()
elif Counter_Attack == "Attack Right!" and vulnerable == "Right":
Enemy_Hit()
else:
eg.msgbox("You missed and the monster regained his strength!", title=fyrirUtan.info)
def Enemy_Hit():
global Enemy_damage
if total_time < AttackTime:
eg.msgbox("You Attacked and hit the monster!", title=fyrirUtan.info)
Enemy_damage += 1
status("enemy")
else:
eg.msgbox("You took too long so the monster regained his strength!", title=fyrirUtan.info)
def status(damage):
global Damage
global Enemy_damage
print (Damage)
if damage == "enemy":
if damage == 1:
eg.msgbox("The monster barely flinches", title=fyrirUtan.info)
elif Enemy_damage == 3:
eg.msgbox("The monster shows signs of pain", title=fyrirUtan.info)
elif Enemy_damage == 5:
eg.msgbox("The monster is severly brused and is showing signs of major pain", title=fyrirUtan.info)
elif Enemy_damage == 7:
eg.msgbox("The monster fell down on its knees, looks up, then falls on his side.", title=fyrirUtan.info)
eg.msgbox("Breathing a few final breaths it finally gave up and exhaled slowly.", title=fyrirUtan.info)
else:
pass
else:
print("Ran")
if Damage == 1:
eg.msgbox("My face got scratched up!", title=fyrirUtan.info)
elif Damage == 2:
eg.msgbox("I can feel the warmth of blood running along my clothes", title=fyrirUtan.info)
elif Damage == 4:
eg.msgbox("I can feel myself fading in and out. I can hear my blood dripping on the ground...", title=fyrirUtan.info)
elif Damage ==5:
eg.msgbox("The Pain is leaving. My Feelings are leaving...", title=fyrirUtan.info)
eg.msgbox("The light is dark.", title=fyrirUtan.info)
eg.msgbox("I am leaving.", title=fyrirUtan.info)
#finish here sometime
leftright = ("Move Left", "Move Right")
AttackType = randint(1,2);
if AttackType == 1:
AttackType = "Left"
else:
AttackType = "Right"
AttackTime = 2
start_time = time.time()
dodge = eg.buttonbox("It looks like its about to attack from the "+AttackType.upper(), choices=leftright, title=fyrirUtan.info)
total_time = time.time() - start_time
print(Enemy_damage)
print(Damage)
if total_time < AttackTime:
if dodge == "Move Left" and AttackType == "Left":
eg.msgbox("The monster hit you!", title=fyrirUtan.info)
Damage+=1
status("")
elif dodge == "Move Right" and AttackType == "Right":
eg.msgbox("The monster hit you!", title=fyrirUtan.info)
Damage+=1
status("")
else:
eg.msgbox("You dodged", title=fyrirUtan.info)
chance = randint(1,3)
if chance == 1:
Counter_Attack = counter_Attack()
else:
eg.msgbox("You took too long so the monster hit you!", title=fyrirUtan.info)
Damage+=1
status("")
if Enemy_damage < 7:
pass
else:
eg.msgbox("You notice a key on a string around the monsters neck.", title=fyrirUtan.info)
eg.msgbox("You picked up the key", title=fyrirUtan.info)
eg.msgbox("You leave the room leaving the monster laying on the ground.", title=fyrirUtan.info)
fyrirUtan.items.append("Key22")
mainRoom()
if Enemy_damage == 7 or Damage == 5:
eg.msgbox("The monster lies on the floor. Dead.", title=fyrirUtan.info)
mainRoom()
else:
eg.msgbox("You enter a empty room.", title=fyrirUtan.info)
eg.msgbox("You hear something.", title=fyrirUtan.info)
eg.msgbox("It is a monster!", title=fyrirUtan.info)
if "torch" in fyrirUtan.items:
eg.msgbox("You drop the torch you were holding.", title=fyrirUtan.info)
else:
pass
eg.msgbox("It attacks!", title=fyrirUtan.info)
Attack()
def Up():
def main():
eg.msgbox("You go up the stairs.", title=fyrirUtan.info)
fyrirUtan.items.append("Key22")
fyrirUtan.items.append("torch")
eg.msgbox("The door is locked. ", title=fyrirUtan.info)
print(fyrirUtan.items)
if "Key22" in fyrirUtan.items:
eg.msgbox("You try the key.", title=fyrirUtan.info)
eg.msgbox("it fits perfectly and the door opens.", title=fyrirUtan.info)
else:
eg.msgbox("I wonder if the key is somewhere in the room.", title=fyrirUtan.info)
eg.msgbox("you go back", title=fyrirUtan.info)
mainRoom()
if "torch" in fyrirUtan.items:
eg.msgbox("You walk in. There is a carpet leading down the middle of the room. Your torch lights up the room exluding the other side.", title=fyrirUtan.info)
else:
eg.msgbox("You walk in. There is a carpet leading down the middle of the room. Everything is dimly lit up with candles on either side.", title=fyrirUtan.info)
eg.msgbox("Everything is quiet.", title=fyrirUtan.info)
eg.msgbox("...", title=fyrirUtan.info)
eg.msgbox("You walk along the carpet", title=fyrirUtan.info)
eg.msgbox("You see a chair and a dark figure sitting in it.", title=fyrirUtan.info)
eg.msgbox("It does not move.", title=fyrirUtan.info)
if eg.ynbox("Do you want to go closer?", title=fyrirUtan.info):
eg.msgbox("You decide to approach", title=fyrirUtan.info)
eg.msgbox("AS you get closer the door slams shut and the figure starts moving.", title=fyrirUtan.info)
else:
eg.msgbox("You decide not to approach and turn around.", title=fyrirUtan.info)
eg.msgbox("As you turn around you see the door slam shut and the figure starts moving.", title=fyrirUtan.info)
one = ["...","I","Who are you!"]
time.sleep(0.5)
one1 = eg.buttonbox("Who goes there!", choices=one)
if one1 == "...":
oneone = ["Fear","Stupidity","Braveness","..."]
oneone1 = eg.buttonbox("Do you not speak out of Fear, Stupidity or Braveness", choices=oneone, title=fyrirUtan.info)
if oneone1 == "Fear" or oneone1 == "...":
if oneone == "...":
eg.msgbox("'Why do you Fear me?'", title=fyrirUtan.info)
if "torch" in fyrirUtan.items:
end_torch()
else:
end1()
elif oneone1 == "Braveness":
eg.msgbox("Brave huh?, More like stupidity for my room!", title=fyrirUtan.info)
eg.msgbox("Il give you an chance to leave for your stupidity", title=fyrirUtan.info)
Quick_leave = ["Leave!","Stay"]
Quick_leaveQ =eg.buttonbox("Il give you 5 seconds to leave!", title=fyrirUtan.info)
elif oneone1 == "Stupidity":
eg.msgbox("'A stupid person does not know he is stupid.'", title=fyrirUtan.info)
eg.msgbox("'Answer me this.'")
riddle = eg.enterbox("You cannot see me, hear me, or touch me. I lie behind the stars and alter what is real, I am what you really fear. Close your eyes and I come near. What am I?", title=fyrirUtan.info)
if riddle.lower() == "dark" or riddle.lower() == "darkness" or riddle.lower() == "you":
eg.msgbox("'Correct'")
eg.msgbox("'I will answer one question that you have for me.'", title=fyrirUtan.info)
else:
eg.msgbox("Guess i was wrong.", title=fyrirUtan.info)
end1()
elif one1 == "I":
name = ["None of your business","First of Who are you?", "My name is..."]
nameQ = eg.buttonbox("Who are you?", choices=name)
if nameQ == "None of your business":
eg.msgbox("Is it not? You walk into my room and disturbe me!")
name2 = ["I sayd none of your business","First tell me who are you?","My name is..."]
nameQ2 = eg.buttonbox("'Now tell me. Who are you'", choices=name2, title=fyrirUtan.info)
if nameQ2 == "I sayd none of your business":
end1()
elif one1 == "Who are you?":
okno = ["Alright","No"]
oknoQ =eg.buttonbox("First tell me who you are", choices=okno, title=fyrirUtan.info)
if oknoQ == "Alright":
Name = eg.enterbox("Now tell me, What is your name", title=fyrirUtan.info)
else:
eg.msgbox("Then you will not know my name", title=fyrirUtan.info)
eg.msgbox("Yet you will know my power!", title=fyrirUtan.info)
end1()
eg.msgbox("I am Erebus. God of darkness.", title=fyrirUtan.info)
def end1():
if "torch" in fyrirUtan.items:
end_torch()
else:
eg.msgbox("The figure fades into the darkness.", title=fyrirUtan.info)
eg.msgbox("The room quickly becomes pitch dark.", title=fyrirUtan.info)
eg.msgbox("You can feel the darkness surround you, pour into you, You can feel your mind getting dark.", title=fyrirUtan.info)
eg.msgbox("You see darkness, You feel darkness", title=fyrirUtan.info)
eg.msgbox("You see nothing, You feel nothing", title=fyrirUtan.info)
def end2():
eg.msgbox("You can feel the darkness surround you, pour into you, You can feel your mind getting dark.", title=fyrirUtan.info)
eg.msgbox("You see darkness, You feel darkness", title=fyrirUtan.info)
eg.msgbox("You see nothing, You feel nothing", title=fyrirUtan.info)
dead()
def end_torch():
global Darkness
global steps
endtorch = ["No!", "Why?", "Yea"]
eg.msgbox("The figure fades into the darkness.", title=fyrirUtan.info)
eg.msgbox("The room, quickly becomes pitch dark excluding around your torch.", title=fyrirUtan.info)
endtorchQ = eg.buttonbox("'Why dont you remove that torch?'", choices=endtorch)
if endtorchQ == "No!":
eg.msgbox("'I guess i can wait then.'", title=fyrirUtan.info)
elif endtorchQ == "Why?":
kill = ["Alright...(give up)", "Never!"]
killQ = eg.buttonbox("So i can kill you quicker!", choices=kill, title=fyrirUtan.info)
if killQ == "Alright...(give up)":
eg.msgbox("You place the torch down. As soon as you do", title=fyrirUtan.info)
end2()
else:
eg.msgbox("So be it.")
End1()
elif endtorchQ == "Yea":
eg.msgbox("You put the torch on the ground.")
eg.msgbox("As soon as you put it down you feel the darkness getting closer!", title=fyrirUtan.info)
Start_time = time.time()
pickup = ["Give up!", "Cant move.","Pick up torch!", "The pain...", "..."]
pickupQ = eg.buttonbox("You can feel your soul, vison and thougts getting dark!, Pain is too much, I have got to get the darkness away! FAST!", choices=pickup)
total_time = time.time() - Start_time
if pickupQ == "Pick up torch!":
if total_time > 1.5:
if total_time > 2:
if total_time > 2.5:
if total_time > 3.5:
eg.msgbox("You cannot find the strength or mind to pick up the torch.", title=fyrirUtan.info)
end2()
else:
eg.msgbox("You can feel the darkness removing some of your strength and mind as you raise the torch back up", title=fyrirUtan.info)
Darkness += 3
else:
eg.msgbox("You could feel the darkness in your mind, body and soul weakening them.", title=fyrirUtan.info)
Darkness += 2
else:
eg.msgbox("You could feel the darknes inside you for a second.", title=fyrirUtan.info)
Darkness += 1
else:
eg.msgbox("You were able to pick up the torch before it was able to touch you.", title=fyrirUtan.info)
else:
eg.msgbox("The darkness is too much, You dont have the power or mind to pick up the torch!", title=fyrirUtan.info)
whatnow = ["Stay still","Try to get out", "Move further in"]
whatnowQ = eg.buttonbox("What now?", choices=whatnow)
if whatnowQ == "Stay still":
eg.msgbox("You stand still, While doing so the darkness attacks you!, You cant afford to stand still", title=fyrirUtan.info)
Darkness += 1
whatnow2 = ["Try to get out","Move further in"]
whatnow2Q = eg.buttonbox("You choose to...",choices=whatnow2)
if whatnow2Q == "Try to get out":
eg.msgbox("You move back to the door, Try to open it but it is locked. While doing that the darkness attacks you!", title=fyrirUtan.info)
Darkness += 1
elif whatnowQ == "Try to get out":
eg.msgbox("You move back to the door, Try to open it but it is locked. While doing that the darkness attacks you!", title=fyrirUtan.info)
Darkness += 1
eg.msgbox("You choose to move to the chair", title=fyrirUtan.info)
End_Attack
elif whatnowQ == "Move further in":
eg.msgbox("You move towards the chair", ok_button="Continue")
End_Attack()
def End_Attack():
global Monster_Alive
global Alive
global Damage
global Enemy_damage
global Darkness
global steps
while Darkness < 5 and steps < 15:
def End_status():
global Darkness
if Darkness == 1:
eg.msgbox("The darkness exsposes me.", title=fyrirUtan.info)
elif Darkness == 2:
eg.msgbox("I can feel my mind getting dark, My memorys getting blanked and replaced with darkness", title=fyrirUtan.info)
elif Darkness == 4:
eg.msgbox("I can feel my power getting smaller as the darkness Darkens my Life.", title=fyrirUtan.info)
elif Darkness ==5:
eg.msgbox("My life, Is dark. My heart, Is dark,My Mind, is dark.", title=fyrirUtan.info)
eg.msgbox("The light is dark.", title=fyrirUtan.info)
eg.msgbox("My life is no longer mine, Its possessed by darkness.", title=fyrirUtan.info)
leftright = ("Move torch Left","Move torch Front", "Move torch Right")
AttackType = randint(1,3);
if AttackType == 1:
AttackType = "LEFT"
elif AttackType == 2:
AttackType = "MIDDLE"
else:
AttackType = "RIGHT"
AttackTime = 2
start_time = time.time()
dodge = eg.buttonbox("The darkness gather to the "+AttackType, choices=leftright, title=fyrirUtan.info)
total_time = time.time() - start_time
if total_time < AttackTime:
if dodge == "Move torch Left" and AttackType == "LEFT":
eg.msgbox("You light up the darkness before its able to attack!", title=fyrirUtan.info)
steps += 1
elif dodge == "Move torch Front" and AttackType == "MIDDLE":
eg.msgbox("You light up the darkness before its able to attack!", title=fyrirUtan.info)
steps += 1
elif dodge == "Move torch Right" and AttackType == "RIGHT":
eg.msgbox("You light up the darkness before its able to attack!", title=fyrirUtan.info)
steps += 1
else:
eg.msgbox("", title=fyrirUtan.info)
eg.msgbox("The darkness enters your body!", title=fyrirUtan.info)
Darkness += 1
End_status()
else:
eg.msgbox("The darkness enters you before you are able to light it up!" , title=fyrirUtan.info)
Darkness+=1
End_status()
if steps == 15:
eg.msgbox("You make it up to the Chair and light it up", title=fyrirUtan.info)
eg.msgbox("You see a man, A mummy like man sitting in the chair.", title=fyrirUtan.info)
eg.msgbox("He looks up to you.", title=fyrirUtan.info)
question = ("Why","I will","I know(give up)")
questionQ = eg.buttonbox("'You will never excape'", choices=question)
if questionQ == "why":
eg.msgbox("Because Darkness will always fall. Eventually", title=fyrirUtan.info)
elif questionQ == "I will":
eg.msgbox("And how are you going to do that?", title=fyrirUtan.info)
else:
eg.msgbox("'Yes...'", title=fyrirUtan.info)
eg.msgbox("While waiting the torch dies down. And as soon as it does.", title=fyrirUtan.info)
end2()
ending = ["hand","torch"]
for x in fyrirUtan.items:
if x == "knife":
ending.append("knife")
elif x == ("revolver"):
ending.append("revolver")
elif x == "stick":
ending.append("StickOfSlightConvenience")
elif x == "gun":
ending.append("gun")
elif x == +"torch":
ending.append("torch")
else:
print("You broke this code")
endingQ = eg.buttonbox("End him with...", choices=ending)
if endingQ == "knife":
eg.msgbox("You take out your knife, Move it up to the Mans neck.")
eg.msgbox("Cutting into the neck, He breathes in.")
eg.msgbox("Then he exhales as you continue your cut, The air escapes through the cut you made.")
eg.msgbox("Spewing small amount of blood onto your arm.")
eg.msgbox("you then proceed to puncher the mans chest, directly into the heart. He flinches a little bit")
eg.msgbox("As soon as he stops moving the Room lights up and all the darkness dissapates")
eg.msgbox("The door opens, You leave the room.")
laeve()
elif endingQ == "revolver" or endingQ == "gun":
eg.msgbox("You take out your gun, Point it at the mans head.")
eg.msgbox("You Squeeze the trigger slowly")
time.sleep(1)
eg.msgbox("The gun goes off hitting the man in the head.")
eg.msgbox("The Room lights up and all the darkness dissapates")
eg.msgbox("The door opens, You leave the room.")
leave()
elif endingQ == "StickOfSlightConvenience":
eg.msgbox("You gently tap his head with the stick.")
eg.msgbox("He explodes into candy.")
eg.msgbox("You eat it all and then leave.")
leave()
elif endingQ == "hand":
eg.msgbox("You put your hands around the mans head, And in a quick motion you break his neck. He flinches a little.")
eg.msgbox("You hear a little crack, As soon as he stops moving the Room lights up and all the darkness dissapates ")
eg.msgbox("The door opens, You leave the room.")
leave()
else:
pass
main()
dodo() | unlicense | 2,538,950,909,533,070,300 | 48.231884 | 220 | 0.547305 | false |
malept/pyoath-toolkit | oath_toolkit/wtforms.py | 1 | 5728 | # -*- coding: utf-8 -*-
#
# Copyright 2013, 2014 Mark Lee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`WTForms`_-related code for one-time password fields.
.. _WTForms: https://pypi.python.org/pypi/WTForms
"""
from __future__ import absolute_import
from . import OATH
from ._compat import to_bytes
from .exc import OATHError
from abc import ABCMeta, abstractmethod
import time
from wtforms import ValidationError
class OTPValidator(object):
"""
WTForms abstract base field validator for a OTP field.
:param int digits: The expected number of digits in the OTP.
:param int window: The number of OTPs before and after the start OTP
to test.
:param bool verbose_errors: Whether to raise verbose validation errors.
:param callable get_secret: If specified, a callable which returns the
OATH secret used to validate the OTP.
"""
__metaclass__ = ABCMeta
def __init__(self, digits, window, verbose_errors=False, get_secret=None):
self.digits = digits
self.window = window
self.verbose = verbose_errors
self.oath = OATH()
self.get_secret = get_secret
def get_oath_secret(self, form, field):
"""
Retrieve the OATH secret from a given form/field.
Either uses the callback passed in when creating the validator, or the
``oath_secret`` attribute of the ``user`` attribute of the ``form``.
:rtype: bytes
"""
if self.get_secret:
secret = self.get_secret(form, field)
else:
secret = form.user.oath_secret
return to_bytes(secret)
@abstractmethod
def otp_validate(self, form, field):
"""
This should call the appropriate OTP validation method.
:return: :data:`True` on success
:raises: :class:`OATHError` on failure
"""
raise NotImplementedError
def _error_msg(self, field, msg):
if self.verbose:
return field.gettext(msg)
else:
# generic error
return field.gettext(u'OTP is invalid.')
def __call__(self, form, field):
if not field.data:
raise ValidationError(field.gettext(u'Field is required.'))
elif len(field.data) != self.digits:
msg = self._error_msg(field, u'OTP must be {digits} digits.')
raise ValidationError(msg.format(digits=self.digits))
try:
self.otp_validate(form, field)
except OATHError as e:
msg = self._error_msg(field, u'Error validating OTP: {err}')
raise ValidationError(msg.format(err=str(e)))
class HOTPValidator(OTPValidator):
"""
Validator for HOTP-based passwords.
:param int digits: The expected number of digits in the OTP.
:param int window: The number of OTPs after the start offset OTP
to test.
:param int start_moving_factor: Unsigned, can be :func:`long`, in theory.
The start counter in the OTP stream.
:param bool verbose_errors: Whether to raise verbose validation errors.
:param callable get_secret: If specified, a callable which returns the
OATH secret used to validate the OTP.
"""
def __init__(self, digits, window, start_moving_factor,
verbose_errors=False, get_secret=None):
super(HOTPValidator, self).__init__(digits, window, verbose_errors,
get_secret)
self.start_moving_factor = start_moving_factor
def otp_validate(self, form, field):
self.oath.hotp_validate(self.get_oath_secret(form, field),
self.start_moving_factor, self.window,
to_bytes(field.data))
class TOTPValidator(OTPValidator):
"""
Validator for TOTP-based passwords.
:param int digits: The expected number of digits in the OTP.
:param int window: The number of OTPs before and after the start OTP
to test.
:param bool verbose_errors: Whether to raise verbose validation errors.
:param callable get_secret: If specified, a callable which returns the
OATH secret used to validate the OTP.
:param int start_time: The UNIX timestamp of when to start counting
time steps (usually should be ``0``).
:param int time_step_size: Unsigned, the time step system parameter. If
set to a negative value, defaults to ``30``.
"""
def __init__(self, digits, window, verbose_errors=False, get_secret=None,
start_time=0, time_step_size=30):
super(TOTPValidator, self).__init__(digits, window, verbose_errors,
get_secret)
self.start_time = int(start_time)
self.time_step_size = time_step_size
def otp_validate(self, form, field):
self.oath.totp_validate(self.get_oath_secret(form, field), time.time(),
self.time_step_size, self.start_time,
self.window, to_bytes(field.data))
| apache-2.0 | 8,989,596,587,930,916,000 | 36.437908 | 79 | 0.6147 | false |
tgodzik/projects | mouse_hand_steering/utils/gesture.py | 1 | 1684 | import math, cv2
def distance(point1, point2):
"""
Euclidean distance.
"""
point1 = point1[0]
point2 = point2[0]
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
class Gesture:
"""
Represent the current state of the hand.
"""
def __init__(self):
self.hull_p = None
self.hull_i = None
self.biggest = None
self.bounding = None
self.defects = None
self.contours = None
def check_convexity(self):
"""
Remove the convexity that is not important.
"""
tolerance = (self.bounding[0] - self.bounding[2]) / 5
new_defects = []
if self.defects is not None:
for i in self.defects:
j = i[0]
start = self.contours[self.biggest][j[1]]
end = self.contours[self.biggest][j[0]]
far = self.contours[self.biggest][j[2]]
if distance(start, far) > tolerance and distance(end, far) > tolerance:
new_defects.append(i)
self.defects = new_defects
def is_hand(self):
"""
Checks if it is a hand.
"""
h = abs(self.bounding[0] - self.bounding[2])
w = abs(self.bounding[1] - self.bounding[3])
hand = True
if h is 0 or w is 0:
hand = False
elif h / w > 4 or w / h > 4:
hand = False
return hand
def get_center(self):
"""
Get the center of the hand.
"""
p = cv2.moments(self.contours[self.biggest], binaryImage=True)
return p["m10"] / p["m00"], p["m01"] / p["m00"] | apache-2.0 | 1,007,076,117,418,026,600 | 26.177419 | 93 | 0.511283 | false |
djangorris/ProFac_Rating | ProFac_Rating.py | 1 | 3204 | import glob
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as mpatches
from matplotlib import style
import numpy as np
import os
import re
'''
The SERFF .xlsm files should be downloaded into a directory within
the current working directory named "networks". And the file
name should be Carrier_Name.xlsm -- Using underscores for spaces.
'''
# empty dict to fill with Carrier Name and Rating later
plot_dict = {}
# loop files in /networks
for file in glob.glob('networks/*.xlsm'):
ext_removed = file.replace('.xlsm','')
carrier_var = ext_removed.replace('networks/','')
carrier_name = carrier_var.replace('_',' ')
# Parse all providers
providers = pd.read_excel(file,
sheetname='IndividualProviders1',
header=1,
parse_cols = [0,12],
)
# Parse all facilities and pharmacies
facilities = pd.read_excel(file,
sheetname='Facilities&Pharmacies1',
header=1,
parse_cols = [0,7],
)
providers.columns = ['NPI','County']
facilities.columns = ['NPI','County']
# Count unique providers
unique_providers = providers.NPI.nunique()
# Count unique facilities and pharmacies
unique_facilities = facilities.NPI.nunique()
if (file == 'networks/Cigna.xlsm') or (file == 'networks/Anthem_Sm_Group.xlsm'):
providers2 = pd.read_excel(file,
sheetname='IndividualProviders2',
header=1,
parse_cols = [0,12],
)
facilities2 = pd.read_excel(file,
sheetname='Facilities&Pharmacies2',
header=1,
parse_cols = [0,7],
)
providers2.columns = ['NPI','County']
facilities2.columns = ['NPI','County']
# Count unique providers
unique_providers2 = providers2.NPI.nunique()
# Count unique facilities and pharmacies
unique_facilities2 = facilities2.NPI.nunique()
# Sum unique1 and unique2
unique_providers = unique_providers + unique_providers2
unique_facilities = unique_facilities + unique_facilities2
# printing html for blog post
print('<ul><li>' + carrier_name + ' has ' + str(unique_providers) + ' unique providers in Colorado</li>')
# printing html for blog post
print('<li>' + carrier_name + ' has ' + str(unique_facilities) + ' unique facilities in Colorado</li>')
# Sum unique providers and unique facilities/pharmacies to get overall "ProFac Rating"
ProFac_Rating = unique_providers + unique_facilities
# printing html for blog post
print('<li>' + carrier_name + ' has ' + str(ProFac_Rating) +
' total unique providers + facilities in Colorado</li></ul>')
## Update dict ##
plot_dict[carrier_name] = [ProFac_Rating]
## Make Dataframe ##
df = pd.DataFrame(plot_dict).T
print('Colorado Totals By Carrier')
print(df)
# PLOT #
style.use('fivethirtyeight')
col = ['darkblue','darkblue','r','g','c','royalblue','m','m','goldenrod','darkslategray']
df.plot(kind='bar', color=col, legend=None)
plt.ylabel('Unique Providers and\nFacilities/Pharmacies')
plt.title('Colorado 2017 Network Size Measured In Unique\n"IndividualProviders" and "Facilities&Pharmacies" Based on SERFF Data')
plt.grid(True)
plt.show() | gpl-3.0 | 5,971,490,916,984,594,000 | 38.085366 | 129 | 0.679151 | false |
qedsoftware/commcare-hq | corehq/apps/sms/models.py | 1 | 85808 | #!/usr/bin/env python
import base64
import jsonfield
import uuid
from dimagi.ext.couchdbkit import *
from datetime import datetime, timedelta
from django.db import models, transaction
from django.http import Http404
from collections import namedtuple
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.models import Form
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.util.mixin import UUIDGeneratorMixin
from corehq.apps.users.models import CouchUser
from corehq.apps.sms.mixin import (CommCareMobileContactMixin,
InvalidFormatException,
apply_leniency, BadSMSConfigException)
from corehq.apps.sms import util as smsutil
from corehq.apps.sms.messages import (MSG_MOBILE_WORKER_INVITATION_START,
MSG_MOBILE_WORKER_ANDROID_INVITATION, MSG_MOBILE_WORKER_JAVA_INVITATION,
MSG_REGISTRATION_INSTALL_COMMCARE, get_message)
from corehq.const import GOOGLE_PLAY_STORE_COMMCARE_URL
from corehq.util.quickcache import quickcache
from corehq.util.view_utils import absolute_reverse
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.load_balance import load_balance
from django.utils.translation import ugettext_noop, ugettext_lazy
INCOMING = "I"
OUTGOING = "O"
CALLBACK_PENDING = "PENDING"
CALLBACK_RECEIVED = "RECEIVED"
CALLBACK_MISSED = "MISSED"
FORWARD_ALL = "ALL"
FORWARD_BY_KEYWORD = "KEYWORD"
FORWARDING_CHOICES = [FORWARD_ALL, FORWARD_BY_KEYWORD]
WORKFLOW_CALLBACK = "CALLBACK"
WORKFLOW_REMINDER = "REMINDER"
WORKFLOW_KEYWORD = "KEYWORD"
WORKFLOW_FORWARD = "FORWARD"
WORKFLOW_BROADCAST = "BROADCAST"
WORKFLOW_PERFORMANCE = "PERFORMANCE"
WORKFLOW_DEFAULT = 'default'
DIRECTION_CHOICES = (
(INCOMING, "Incoming"),
(OUTGOING, "Outgoing"))
class Log(models.Model):
class Meta:
abstract = True
app_label = "sms"
domain = models.CharField(max_length=126, null=True, db_index=True)
date = models.DateTimeField(null=True, db_index=True)
couch_recipient_doc_type = models.CharField(max_length=126, null=True, db_index=True)
couch_recipient = models.CharField(max_length=126, null=True, db_index=True)
phone_number = models.CharField(max_length=126, null=True, db_index=True)
direction = models.CharField(max_length=1, null=True)
error = models.NullBooleanField(default=False)
system_error_message = models.TextField(null=True)
system_phone_number = models.CharField(max_length=126, null=True)
backend_api = models.CharField(max_length=126, null=True)
backend_id = models.CharField(max_length=126, null=True)
billed = models.NullBooleanField(default=False)
# Describes what kind of workflow this log was a part of
workflow = models.CharField(max_length=126, null=True)
# If this log is related to a survey, this points to the couch_id
# of an instance of SQLXFormsSession that this log is tied to
xforms_session_couch_id = models.CharField(max_length=126, null=True, db_index=True)
# If this log is related to a reminder, this points to the _id of a
# CaseReminder instance that it is tied to
reminder_id = models.CharField(max_length=126, null=True)
location_id = models.CharField(max_length=126, null=True)
# The MessagingSubEvent that this log is tied to
messaging_subevent = models.ForeignKey('sms.MessagingSubEvent', null=True, on_delete=models.PROTECT)
def set_system_error(self, message=None):
self.error = True
self.system_error_message = message
self.save()
@classmethod
def by_domain(cls, domain, start_date=None, end_date=None):
qs = cls.objects.filter(domain=domain)
if start_date:
qs = qs.filter(date__gte=start_date)
if end_date:
qs = qs.filter(date__lte=end_date)
return qs
@classmethod
def by_recipient(cls, contact_doc_type, contact_id):
return cls.objects.filter(
couch_recipient_doc_type=contact_doc_type,
couch_recipient=contact_id,
)
@classmethod
def get_last_log_for_recipient(cls, contact_doc_type, contact_id, direction=None):
qs = cls.by_recipient(contact_doc_type, contact_id)
if direction:
qs = qs.filter(direction=direction)
qs = qs.order_by('-date')[:1]
if qs:
return qs[0]
return None
@classmethod
def count_by_domain(cls, domain, direction=None):
qs = cls.objects.filter(domain=domain)
if direction:
qs = qs.filter(direction=direction)
return qs.count()
@property
def recipient(self):
if self.couch_recipient_doc_type == 'CommCareCase':
return CaseAccessors(self.domain).get_case(self.couch_recipient)
else:
return CouchUser.get_by_user_id(self.couch_recipient)
@classmethod
def inbound_entry_exists(cls, contact_doc_type, contact_id, from_timestamp, to_timestamp=None):
qs = cls.by_recipient(
contact_doc_type,
contact_id
).filter(
direction=INCOMING,
date__gte=from_timestamp
)
if to_timestamp:
qs = qs.filter(
date__lte=to_timestamp
)
return len(qs[:1]) > 0
class SMSBase(UUIDGeneratorMixin, Log):
ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS = 'TOO_MANY_UNSUCCESSFUL_ATTEMPTS'
ERROR_MESSAGE_IS_STALE = 'MESSAGE_IS_STALE'
ERROR_INVALID_DIRECTION = 'INVALID_DIRECTION'
ERROR_PHONE_NUMBER_OPTED_OUT = 'PHONE_NUMBER_OPTED_OUT'
ERROR_INVALID_DESTINATION_NUMBER = 'INVALID_DESTINATION_NUMBER'
ERROR_MESSAGE_TOO_LONG = 'MESSAGE_TOO_LONG'
ERROR_CONTACT_IS_INACTIVE = 'CONTACT_IS_INACTIVE'
ERROR_MESSAGES = {
ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS:
ugettext_noop('Gateway error.'),
ERROR_MESSAGE_IS_STALE:
ugettext_noop('Message is stale and will not be processed.'),
ERROR_INVALID_DIRECTION:
ugettext_noop('Unknown message direction.'),
ERROR_PHONE_NUMBER_OPTED_OUT:
ugettext_noop('Phone number has opted out of receiving SMS.'),
ERROR_INVALID_DESTINATION_NUMBER:
ugettext_noop("The gateway can't reach the destination number."),
ERROR_MESSAGE_TOO_LONG:
ugettext_noop("The gateway could not process the message because it was too long."),
ERROR_CONTACT_IS_INACTIVE:
ugettext_noop("The recipient has been deactivated."),
}
UUIDS_TO_GENERATE = ['couch_id']
couch_id = models.CharField(max_length=126, null=True, db_index=True)
text = models.TextField(null=True)
# In cases where decoding must occur, this is the raw text received
# from the gateway
raw_text = models.TextField(null=True)
datetime_to_process = models.DateTimeField(null=True, db_index=True)
processed = models.NullBooleanField(default=True, db_index=True)
num_processing_attempts = models.IntegerField(default=0, null=True)
queued_timestamp = models.DateTimeField(null=True)
processed_timestamp = models.DateTimeField(null=True)
# When an SMS is received on a domain-owned backend, we set this to
# the domain name. This can be used by the framework to handle domain-specific
# processing of unregistered contacts.
domain_scope = models.CharField(max_length=126, null=True)
# Set to True to send the message regardless of whether the destination
# phone number has opted-out. Should only be used to send opt-out
# replies or other info-related queries while opted-out.
ignore_opt_out = models.NullBooleanField(default=False)
# This is the unique message id that the gateway uses to track this
# message, if applicable.
backend_message_id = models.CharField(max_length=126, null=True)
# For outgoing sms only: if this sms was sent from a chat window,
# the _id of the CouchUser who sent this sms; otherwise None
chat_user_id = models.CharField(max_length=126, null=True)
# True if this was an inbound message that was an
# invalid response to a survey question
invalid_survey_response = models.NullBooleanField(default=False)
""" Custom properties. For the initial migration, it makes it easier
to put these here. Eventually they should be moved to a separate table. """
fri_message_bank_lookup_completed = models.NullBooleanField(default=False)
fri_message_bank_message_id = models.CharField(max_length=126, null=True)
fri_id = models.CharField(max_length=126, null=True)
fri_risk_profile = models.CharField(max_length=1, null=True)
class Meta:
abstract = True
app_label = 'sms'
@property
def outbound_backend(self):
if self.backend_id:
return SQLMobileBackend.load(self.backend_id, is_couch_id=True)
return SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
smsutil.clean_phone_number(self.phone_number),
domain=self.domain
)
class SMS(SMSBase):
def to_json(self):
from corehq.apps.sms.serializers import SMSSerializer
data = SMSSerializer(self).data
return data
def publish_change(self):
from corehq.apps.sms.tasks import publish_sms_change
publish_sms_change.delay(self)
class QueuedSMS(SMSBase):
class Meta:
db_table = 'sms_queued'
@classmethod
def get_queued_sms(cls):
return cls.objects.filter(
datetime_to_process__lte=datetime.utcnow(),
)
class SQLLastReadMessage(UUIDGeneratorMixin, models.Model):
class Meta:
db_table = 'sms_lastreadmessage'
app_label = 'sms'
index_together = [
['domain', 'read_by', 'contact_id'],
['domain', 'contact_id'],
]
UUIDS_TO_GENERATE = ['couch_id']
couch_id = models.CharField(max_length=126, null=True, db_index=True)
domain = models.CharField(max_length=126, null=True)
# _id of CouchUser who read it
read_by = models.CharField(max_length=126, null=True)
# _id of the CouchUser or CommCareCase who the message was sent to
# or from
contact_id = models.CharField(max_length=126, null=True)
# couch_id of the SMS
message_id = models.CharField(max_length=126, null=True)
# date of the SMS entry, stored here redundantly to prevent a lookup
message_timestamp = models.DateTimeField(null=True)
@classmethod
def by_anyone(cls, domain, contact_id):
"""
Returns the SQLLastReadMessage representing the last chat message
that was read by anyone in the given domain for the given contact_id.
"""
result = cls.objects.filter(
domain=domain,
contact_id=contact_id
).order_by('-message_timestamp')
result = result[:1]
if len(result) > 0:
return result[0]
return None
@classmethod
def by_user(cls, domain, user_id, contact_id):
"""
Returns the SQLLastReadMessage representing the last chat message
that was read in the given domain by the given user_id for the given
contact_id.
"""
try:
# It's not expected that this can raise MultipleObjectsReturned
# since we lock out creation of these records with a CriticalSection.
# So if that happens, let the exception raise.
return cls.objects.get(
domain=domain,
read_by=user_id,
contact_id=contact_id
)
except cls.DoesNotExist:
return None
class ExpectedCallback(UUIDGeneratorMixin, models.Model):
class Meta:
app_label = 'sms'
index_together = [
['domain', 'date'],
]
STATUS_CHOICES = (
(CALLBACK_PENDING, ugettext_lazy("Pending")),
(CALLBACK_RECEIVED, ugettext_lazy("Received")),
(CALLBACK_MISSED, ugettext_lazy("Missed")),
)
UUIDS_TO_GENERATE = ['couch_id']
couch_id = models.CharField(max_length=126, null=True, db_index=True)
domain = models.CharField(max_length=126, null=True, db_index=True)
date = models.DateTimeField(null=True)
couch_recipient_doc_type = models.CharField(max_length=126, null=True)
couch_recipient = models.CharField(max_length=126, null=True, db_index=True)
status = models.CharField(max_length=126, null=True)
@classmethod
def by_domain(cls, domain, start_date=None, end_date=None):
qs = cls.objects.filter(domain=domain)
if start_date:
qs = qs.filter(date__gte=start_date)
if end_date:
qs = qs.filter(date__lte=end_date)
return qs
@classmethod
def by_domain_recipient_date(cls, domain, recipient_id, date):
try:
return cls.objects.get(
domain=domain,
couch_recipient=recipient_id,
date=date
)
except cls.DoesNotExist:
return None
class ForwardingRule(Document):
domain = StringProperty()
forward_type = StringProperty(choices=FORWARDING_CHOICES)
keyword = StringProperty()
backend_id = StringProperty() # id of MobileBackend which will be used to do the forwarding
def retire(self):
self.doc_type += "-Deleted"
self.save()
class PhoneBlacklist(models.Model):
"""
Each entry represents a single phone number and whether we can send SMS
to that number or make calls to that number.
"""
# This is the domain that the phone number belonged to the last time an opt in
# or opt out operation happened. Can be null if the phone number didn't belong
# to any domain.
domain = models.CharField(max_length=126, null=True, db_index=True)
phone_number = models.CharField(max_length=30, unique=True, null=False, db_index=True)
# True if it's ok to send SMS to this phone number, False if not
send_sms = models.BooleanField(null=False, default=True)
# True if it's ok to call this phone number, False if not
# This is not yet implemented but will be in the future.
send_ivr = models.BooleanField(null=False, default=True)
# True to allow this phone number to opt back in, False if not
can_opt_in = models.BooleanField(null=False, default=True)
last_sms_opt_in_timestamp = models.DateTimeField(null=True)
last_sms_opt_out_timestamp = models.DateTimeField(null=True)
class Meta:
app_label = 'sms'
@classmethod
def get_by_phone_number(cls, phone_number):
phone_number = smsutil.strip_plus(phone_number)
return cls.objects.get(phone_number=phone_number)
@classmethod
def get_by_phone_number_or_none(cls, phone_number):
try:
return cls.get_by_phone_number(phone_number)
except cls.DoesNotExist:
return None
@classmethod
def get_or_create(cls, phone_number):
"""
phone_number - should be a string of digits
"""
phone_number = smsutil.strip_plus(phone_number)
if not phone_number:
return (None, False)
return cls.objects.get_or_create(phone_number=phone_number)
@classmethod
def can_receive_sms(cls, phone_number):
try:
phone_obj = cls.get_by_phone_number(phone_number)
return phone_obj.send_sms
except cls.DoesNotExist:
# This means the phone number has not opted-out
return True
@classmethod
def opt_in_sms(cls, phone_number, domain=None):
"""
Opts a phone number in to receive SMS.
Returns True if the number was actually opted-in, False if not.
"""
try:
phone_obj = cls.get_by_phone_number(phone_number)
if phone_obj.can_opt_in:
phone_obj.domain = domain
phone_obj.send_sms = True
phone_obj.last_sms_opt_in_timestamp = datetime.utcnow()
phone_obj.save()
return True
except cls.DoesNotExist:
pass
return False
@classmethod
def opt_out_sms(cls, phone_number, domain=None):
"""
Opts a phone number out from receiving SMS.
Returns True if the number was actually opted-out, False if not.
"""
phone_obj = cls.get_or_create(phone_number)[0]
if phone_obj:
phone_obj.domain = domain
phone_obj.send_sms = False
phone_obj.last_sms_opt_out_timestamp = datetime.utcnow()
phone_obj.save()
return True
return False
class PhoneNumber(models.Model):
couch_id = models.CharField(max_length=126, db_index=True, null=True)
domain = models.CharField(max_length=126, db_index=True, null=True)
owner_doc_type = models.CharField(max_length=126, null=True)
owner_id = models.CharField(max_length=126, db_index=True, null=True)
phone_number = models.CharField(max_length=126, db_index=True, null=True)
# Points to the name of a SQLMobileBackend (can be domain-level
# or system-level) which represents the backend that will be used
# when sending SMS to this number. Can be None to use domain/system
# defaults.
backend_id = models.CharField(max_length=126, null=True)
# Points to the name of a SQLMobileBackend (can be domain-level
# or system-level) which represents the backend that will be used
# when making calls to this number. Can be None to use domain/system
# defaults.
ivr_backend_id = models.CharField(max_length=126, null=True)
verified = models.NullBooleanField(default=False)
contact_last_modified = models.DateTimeField(null=True)
def __init__(self, *args, **kwargs):
super(PhoneNumber, self).__init__(*args, **kwargs)
self._old_phone_number = self.phone_number
self._old_owner_id = self.owner_id
def __repr__(self):
return '{phone} in {domain} (owned by {owner})'.format(
phone=self.phone_number, domain=self.domain,
owner=self.owner_id
)
@property
def backend(self):
from corehq.apps.sms.util import clean_phone_number
backend_id = self.backend_id.strip() if isinstance(self.backend_id, basestring) else None
if backend_id:
return SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
self.domain,
backend_id
)
else:
return SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
clean_phone_number(self.phone_number),
domain=self.domain
)
@property
def owner(self):
if self.owner_doc_type == 'CommCareCase':
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
return CaseAccessors(self.domain).get_case(self.owner_id)
elif self.owner_doc_type == 'CommCareUser':
from corehq.apps.users.models import CommCareUser
return CommCareUser.get(self.owner_id)
elif self.owner_doc_type == 'WebUser':
from corehq.apps.users.models import WebUser
return WebUser.get(self.owner_id)
else:
return None
def retire(self):
self.delete()
@classmethod
def by_extensive_search(cls, phone_number):
# Try to look up the verified number entry directly
v = cls.by_phone(phone_number)
# If not found, try to see if any number in the database is a substring
# of the number given to us. This can happen if the telco prepends some
# international digits, such as 011...
if v is None:
v = cls.by_phone(phone_number[1:])
if v is None:
v = cls.by_phone(phone_number[2:])
if v is None:
v = cls.by_phone(phone_number[3:])
# If still not found, try to match only the last digits of numbers in
# the database. This can happen if the telco removes the country code
# in the caller id.
if v is None:
v = cls.by_suffix(phone_number)
return v
@classmethod
def by_couch_id(cls, couch_id):
try:
return cls.objects.get(couch_id=couch_id)
except cls.DoesNotExist:
return None
@classmethod
def by_phone(cls, phone_number, include_pending=False):
result = cls._by_phone(apply_leniency(phone_number))
return cls._filter_pending(result, include_pending)
@classmethod
def by_suffix(cls, phone_number, include_pending=False):
"""
Used to lookup a PhoneNumber, trying to exclude country code digits.
"""
result = cls._by_suffix(apply_leniency(phone_number))
return cls._filter_pending(result, include_pending)
@classmethod
@quickcache(['phone_number'], timeout=60 * 60)
def _by_phone(cls, phone_number):
try:
return cls.objects.get(phone_number=phone_number)
except cls.DoesNotExist:
return None
@classmethod
def _by_suffix(cls, phone_number):
# Decided not to cache this method since in order to clear the cache
# we'd have to clear using all suffixes of a number (which would involve
# up to ~10 cache clear calls on each save). Since this method is used so
# infrequently, it's better to not cache vs. clear so many keys on each
# save. Once all of our IVR gateways provide reliable caller id info,
# we can also remove this method.
try:
return cls.objects.get(phone_number__endswith=phone_number)
except cls.DoesNotExist:
return None
except cls.MultipleObjectsReturned:
return None
@classmethod
def _filter_pending(cls, v, include_pending):
if v:
if include_pending:
return v
elif v.verified:
return v
return None
@classmethod
def by_domain(cls, domain, ids_only=False):
qs = cls.objects.filter(domain=domain)
if ids_only:
return qs.values_list('couch_id', flat=True)
else:
return qs
@classmethod
def count_by_domain(cls, domain):
return cls.by_domain(domain).count()
@classmethod
@quickcache(['owner_id'], timeout=60 * 60)
def by_owner_id(cls, owner_id):
"""
Returns all phone numbers belonging to the given contact.
"""
return cls.objects.filter(owner_id=owner_id)
@classmethod
def _clear_quickcaches(cls, owner_id, phone_number, old_owner_id=None, old_phone_number=None):
cls.by_owner_id.clear(cls, owner_id)
if old_owner_id and old_owner_id != owner_id:
cls.by_owner_id.clear(cls, old_owner_id)
cls._by_phone.clear(cls, phone_number)
if old_phone_number and old_phone_number != phone_number:
cls._by_phone.clear(cls, old_phone_number)
def _clear_caches(self):
self._clear_quickcaches(
self.owner_id,
self.phone_number,
old_owner_id=self._old_owner_id,
old_phone_number=self._old_phone_number
)
def save(self, *args, **kwargs):
self._clear_caches()
self._old_phone_number = self.phone_number
self._old_owner_id = self.owner_id
return super(PhoneNumber, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self._clear_caches()
return super(PhoneNumber, self).delete(*args, **kwargs)
class MessagingStatusMixin(object):
def refresh(self):
return self.__class__.objects.get(pk=self.pk)
def error(self, error_code, additional_error_text=None):
self.status = MessagingEvent.STATUS_ERROR
self.error_code = error_code
self.additional_error_text = additional_error_text
self.save()
def completed(self):
obj = self.refresh()
if obj.status != MessagingEvent.STATUS_ERROR:
obj.status = MessagingEvent.STATUS_COMPLETED
obj.save()
return obj
class MessagingEvent(models.Model, MessagingStatusMixin):
"""
Used to track the status of high-level events in the messaging
framework. Examples of such high-level events include the firing
of a reminder instance, the invoking of a keyword, or the sending
of a broadcast.
"""
STATUS_IN_PROGRESS = 'PRG'
STATUS_COMPLETED = 'CMP'
STATUS_NOT_COMPLETED = 'NOT'
STATUS_ERROR = 'ERR'
STATUS_CHOICES = (
(STATUS_IN_PROGRESS, ugettext_noop('In Progress')),
(STATUS_COMPLETED, ugettext_noop('Completed')),
(STATUS_NOT_COMPLETED, ugettext_noop('Not Completed')),
(STATUS_ERROR, ugettext_noop('Error')),
)
SOURCE_BROADCAST = 'BRD'
SOURCE_KEYWORD = 'KWD'
SOURCE_REMINDER = 'RMD'
SOURCE_UNRECOGNIZED = 'UNR'
SOURCE_FORWARDED = 'FWD'
SOURCE_OTHER = 'OTH'
SOURCE_CHOICES = (
(SOURCE_BROADCAST, ugettext_noop('Broadcast')),
(SOURCE_KEYWORD, ugettext_noop('Keyword')),
(SOURCE_REMINDER, ugettext_noop('Reminder')),
(SOURCE_UNRECOGNIZED, ugettext_noop('Unrecognized')),
(SOURCE_FORWARDED, ugettext_noop('Forwarded Message')),
(SOURCE_OTHER, ugettext_noop('Other')),
)
CONTENT_NONE = 'NOP'
CONTENT_SMS = 'SMS'
CONTENT_SMS_CALLBACK = 'CBK'
CONTENT_SMS_SURVEY = 'SVY'
CONTENT_IVR_SURVEY = 'IVR'
CONTENT_PHONE_VERIFICATION = 'VER'
CONTENT_ADHOC_SMS = 'ADH'
CONTENT_API_SMS = 'API'
CONTENT_CHAT_SMS = 'CHT'
CONTENT_EMAIL = 'EML'
CONTENT_CHOICES = (
(CONTENT_NONE, ugettext_noop('None')),
(CONTENT_SMS, ugettext_noop('SMS Message')),
(CONTENT_SMS_CALLBACK, ugettext_noop('SMS Expecting Callback')),
(CONTENT_SMS_SURVEY, ugettext_noop('SMS Survey')),
(CONTENT_IVR_SURVEY, ugettext_noop('IVR Survey')),
(CONTENT_PHONE_VERIFICATION, ugettext_noop('Phone Verification')),
(CONTENT_ADHOC_SMS, ugettext_noop('Manually Sent Message')),
(CONTENT_API_SMS, ugettext_noop('Message Sent Via API')),
(CONTENT_CHAT_SMS, ugettext_noop('Message Sent Via Chat')),
(CONTENT_EMAIL, ugettext_noop('Email')),
)
RECIPIENT_CASE = 'CAS'
RECIPIENT_MOBILE_WORKER = 'MOB'
RECIPIENT_WEB_USER = 'WEB'
RECIPIENT_USER_GROUP = 'UGP'
RECIPIENT_CASE_GROUP = 'CGP'
RECIPIENT_VARIOUS = 'MUL'
RECIPIENT_LOCATION = 'LOC'
RECIPIENT_LOCATION_PLUS_DESCENDANTS = 'LC+'
RECIPIENT_VARIOUS_LOCATIONS = 'VLC'
RECIPIENT_VARIOUS_LOCATIONS_PLUS_DESCENDANTS = 'VL+'
RECIPIENT_UNKNOWN = 'UNK'
RECIPIENT_CHOICES = (
(RECIPIENT_CASE, ugettext_noop('Case')),
(RECIPIENT_MOBILE_WORKER, ugettext_noop('Mobile Worker')),
(RECIPIENT_WEB_USER, ugettext_noop('Web User')),
(RECIPIENT_USER_GROUP, ugettext_noop('User Group')),
(RECIPIENT_CASE_GROUP, ugettext_noop('Case Group')),
(RECIPIENT_VARIOUS, ugettext_noop('Multiple Recipients')),
(RECIPIENT_LOCATION, ugettext_noop('Location')),
(RECIPIENT_LOCATION_PLUS_DESCENDANTS,
ugettext_noop('Location (including child locations)')),
(RECIPIENT_VARIOUS_LOCATIONS, ugettext_noop('Multiple Locations')),
(RECIPIENT_VARIOUS_LOCATIONS_PLUS_DESCENDANTS,
ugettext_noop('Multiple Locations (including child locations)')),
(RECIPIENT_UNKNOWN, ugettext_noop('Unknown Contact')),
)
ERROR_NO_RECIPIENT = 'NO_RECIPIENT'
ERROR_CANNOT_RENDER_MESSAGE = 'CANNOT_RENDER_MESSAGE'
ERROR_UNSUPPORTED_COUNTRY = 'UNSUPPORTED_COUNTRY'
ERROR_NO_PHONE_NUMBER = 'NO_PHONE_NUMBER'
ERROR_NO_TWO_WAY_PHONE_NUMBER = 'NO_TWO_WAY_PHONE_NUMBER'
ERROR_INVALID_CUSTOM_CONTENT_HANDLER = 'INVALID_CUSTOM_CONTENT_HANDLER'
ERROR_CANNOT_LOAD_CUSTOM_CONTENT_HANDLER = 'CANNOT_LOAD_CUSTOM_CONTENT_HANDLER'
ERROR_CANNOT_FIND_FORM = 'CANNOT_FIND_FORM'
ERROR_FORM_HAS_NO_QUESTIONS = 'FORM_HAS_NO_QUESTIONS'
ERROR_CASE_EXTERNAL_ID_NOT_FOUND = 'CASE_EXTERNAL_ID_NOT_FOUND'
ERROR_MULTIPLE_CASES_WITH_EXTERNAL_ID_FOUND = 'MULTIPLE_CASES_WITH_EXTERNAL_ID_FOUND'
ERROR_NO_CASE_GIVEN = 'NO_CASE_GIVEN'
ERROR_NO_EXTERNAL_ID_GIVEN = 'NO_EXTERNAL_ID_GIVEN'
ERROR_COULD_NOT_PROCESS_STRUCTURED_SMS = 'COULD_NOT_PROCESS_STRUCTURED_SMS'
ERROR_SUBEVENT_ERROR = 'SUBEVENT_ERROR'
ERROR_TOUCHFORMS_ERROR = 'TOUCHFORMS_ERROR'
ERROR_INTERNAL_SERVER_ERROR = 'INTERNAL_SERVER_ERROR'
ERROR_GATEWAY_ERROR = 'GATEWAY_ERROR'
ERROR_NO_SUITABLE_GATEWAY = 'NO_SUITABLE_GATEWAY'
ERROR_GATEWAY_NOT_FOUND = 'GATEWAY_NOT_FOUND'
ERROR_NO_EMAIL_ADDRESS = 'NO_EMAIL_ADDRESS'
ERROR_TRIAL_EMAIL_LIMIT_REACHED = 'TRIAL_EMAIL_LIMIT_REACHED'
ERROR_MESSAGES = {
ERROR_NO_RECIPIENT:
ugettext_noop('No recipient'),
ERROR_CANNOT_RENDER_MESSAGE:
ugettext_noop('Error rendering message; please check syntax.'),
ERROR_UNSUPPORTED_COUNTRY:
ugettext_noop('Gateway does not support the destination country.'),
ERROR_NO_PHONE_NUMBER:
ugettext_noop('Contact has no phone number.'),
ERROR_NO_TWO_WAY_PHONE_NUMBER:
ugettext_noop('Contact has no two-way phone number.'),
ERROR_INVALID_CUSTOM_CONTENT_HANDLER:
ugettext_noop('Invalid custom content handler.'),
ERROR_CANNOT_LOAD_CUSTOM_CONTENT_HANDLER:
ugettext_noop('Cannot load custom content handler.'),
ERROR_CANNOT_FIND_FORM:
ugettext_noop('Cannot find form.'),
ERROR_FORM_HAS_NO_QUESTIONS:
ugettext_noop('No questions were available in the form. Please '
'check that the form has questions and that display conditions '
'are not preventing questions from being asked.'),
ERROR_CASE_EXTERNAL_ID_NOT_FOUND:
ugettext_noop('The case with the given external ID was not found.'),
ERROR_MULTIPLE_CASES_WITH_EXTERNAL_ID_FOUND:
ugettext_noop('Multiple cases were found with the given external ID.'),
ERROR_NO_CASE_GIVEN:
ugettext_noop('The form requires a case but no case was provided.'),
ERROR_NO_EXTERNAL_ID_GIVEN:
ugettext_noop('No external ID given; please include case external ID after keyword.'),
ERROR_COULD_NOT_PROCESS_STRUCTURED_SMS:
ugettext_noop('Error processing structured SMS.'),
ERROR_SUBEVENT_ERROR:
ugettext_noop('View details for more information.'),
ERROR_TOUCHFORMS_ERROR:
ugettext_noop('An error occurred in the formplayer service.'),
ERROR_INTERNAL_SERVER_ERROR:
ugettext_noop('Internal Server Error'),
ERROR_GATEWAY_ERROR:
ugettext_noop('Gateway error.'),
ERROR_NO_SUITABLE_GATEWAY:
ugettext_noop('No suitable gateway could be found.'),
ERROR_GATEWAY_NOT_FOUND:
ugettext_noop('Gateway could not be found.'),
ERROR_NO_EMAIL_ADDRESS:
ugettext_noop('Recipient has no email address.'),
ERROR_TRIAL_EMAIL_LIMIT_REACHED:
ugettext_noop("Cannot send any more reminder emails. The limit for "
"sending reminder emails on a Trial plan has been reached."),
}
domain = models.CharField(max_length=126, null=False, db_index=True)
date = models.DateTimeField(null=False, db_index=True)
source = models.CharField(max_length=3, choices=SOURCE_CHOICES, null=False)
source_id = models.CharField(max_length=126, null=True)
content_type = models.CharField(max_length=3, choices=CONTENT_CHOICES, null=False)
# Only used when content_type is CONTENT_SMS_SURVEY or CONTENT_IVR_SURVEY
# This is redundantly stored here (as well as the subevent) so that it
# doesn't have to be looked up for reporting.
form_unique_id = models.CharField(max_length=126, null=True)
form_name = models.TextField(null=True)
# If any of the MessagingSubEvent status's are STATUS_ERROR, this is STATUS_ERROR
status = models.CharField(max_length=3, choices=STATUS_CHOICES, null=False)
error_code = models.CharField(max_length=126, null=True)
additional_error_text = models.TextField(null=True)
recipient_type = models.CharField(max_length=3, choices=RECIPIENT_CHOICES, null=True, db_index=True)
recipient_id = models.CharField(max_length=126, null=True, db_index=True)
class Meta:
app_label = 'sms'
@classmethod
def get_recipient_type_from_doc_type(cls, recipient_doc_type):
return {
'CommCareUser': cls.RECIPIENT_MOBILE_WORKER,
'WebUser': cls.RECIPIENT_WEB_USER,
'CommCareCase': cls.RECIPIENT_CASE,
'Group': cls.RECIPIENT_USER_GROUP,
'CommCareCaseGroup': cls.RECIPIENT_CASE_GROUP,
}.get(recipient_doc_type, cls.RECIPIENT_UNKNOWN)
@classmethod
def get_recipient_type(cls, recipient):
return cls.get_recipient_type_from_doc_type(recipient.doc_type)
@classmethod
def _get_recipient_doc_type(cls, recipient_type):
return {
MessagingEvent.RECIPIENT_MOBILE_WORKER: 'CommCareUser',
MessagingEvent.RECIPIENT_WEB_USER: 'WebUser',
MessagingEvent.RECIPIENT_CASE: 'CommCareCase',
MessagingEvent.RECIPIENT_USER_GROUP: 'Group',
MessagingEvent.RECIPIENT_CASE_GROUP: 'CommCareCaseGroup',
MessagingEvent.RECIPIENT_LOCATION: 'SQLLocation',
MessagingEvent.RECIPIENT_LOCATION_PLUS_DESCENDANTS: 'SQLLocation',
}.get(recipient_type, None)
def get_recipient_doc_type(self):
return MessagingEvent._get_recipient_doc_type(self.recipient_type)
def create_subevent(self, reminder_definition, reminder, recipient):
recipient_type = MessagingEvent.get_recipient_type(recipient)
content_type, form_unique_id, form_name = self.get_content_info_from_reminder(
reminder_definition, reminder, parent=self)
obj = MessagingSubEvent.objects.create(
parent=self,
date=datetime.utcnow(),
recipient_type=recipient_type,
recipient_id=recipient.get_id if recipient_type else None,
content_type=content_type,
form_unique_id=form_unique_id,
form_name=form_name,
case_id=reminder.case_id,
status=MessagingEvent.STATUS_IN_PROGRESS,
)
return obj
def create_ivr_subevent(self, recipient, form_unique_id, case_id=None):
recipient_type = MessagingEvent.get_recipient_type(recipient)
obj = MessagingSubEvent.objects.create(
parent=self,
date=datetime.utcnow(),
recipient_type=recipient_type,
recipient_id=recipient.get_id if recipient_type else None,
content_type=MessagingEvent.CONTENT_IVR_SURVEY,
form_unique_id=form_unique_id,
form_name=MessagingEvent.get_form_name_or_none(form_unique_id),
case_id=case_id,
status=MessagingEvent.STATUS_IN_PROGRESS,
)
return obj
@classmethod
def create_event_for_adhoc_sms(cls, domain, recipient=None,
content_type=CONTENT_ADHOC_SMS, source=SOURCE_OTHER):
if recipient:
recipient_type = cls.get_recipient_type(recipient)
recipient_id = recipient.get_id
else:
recipient_type = cls.RECIPIENT_UNKNOWN
recipient_id = None
obj = cls.objects.create(
domain=domain,
date=datetime.utcnow(),
source=source,
content_type=content_type,
status=cls.STATUS_IN_PROGRESS,
recipient_type=recipient_type,
recipient_id=recipient_id,
)
return obj
def create_structured_sms_subevent(self, case_id):
obj = MessagingSubEvent.objects.create(
parent=self,
date=datetime.utcnow(),
recipient_type=self.recipient_type,
recipient_id=self.recipient_id,
content_type=MessagingEvent.CONTENT_SMS_SURVEY,
form_unique_id=self.form_unique_id,
form_name=self.form_name,
case_id=case_id,
status=MessagingEvent.STATUS_IN_PROGRESS,
)
return obj
def create_subevent_for_single_sms(self, recipient_doc_type=None,
recipient_id=None, case=None, completed=False):
obj = MessagingSubEvent.objects.create(
parent=self,
date=datetime.utcnow(),
recipient_type=MessagingEvent.get_recipient_type_from_doc_type(recipient_doc_type),
recipient_id=recipient_id,
content_type=MessagingEvent.CONTENT_SMS,
case_id=case.case_id if case else None,
status=(MessagingEvent.STATUS_COMPLETED
if completed
else MessagingEvent.STATUS_IN_PROGRESS),
)
return obj
@property
def subevents(self):
return self.messagingsubevent_set.all()
@classmethod
def get_source_from_reminder(cls, reminder_definition):
from corehq.apps.reminders.models import (REMINDER_TYPE_ONE_TIME,
REMINDER_TYPE_DEFAULT)
default = (cls.SOURCE_OTHER, None)
return {
REMINDER_TYPE_ONE_TIME:
(cls.SOURCE_BROADCAST, reminder_definition.get_id),
REMINDER_TYPE_DEFAULT:
(cls.SOURCE_REMINDER, reminder_definition.get_id),
}.get(reminder_definition.reminder_type, default)
@classmethod
def get_form_name_or_none(cls, form_unique_id):
try:
form = Form.get_form(form_unique_id)
return form.full_path_name
except:
return None
@classmethod
def get_content_info_from_reminder(cls, reminder_definition, reminder, parent=None):
from corehq.apps.reminders.models import (METHOD_SMS, METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY, METHOD_IVR_SURVEY, METHOD_EMAIL)
content_type = {
METHOD_SMS: cls.CONTENT_SMS,
METHOD_SMS_CALLBACK: cls.CONTENT_SMS_CALLBACK,
METHOD_SMS_SURVEY: cls.CONTENT_SMS_SURVEY,
METHOD_IVR_SURVEY: cls.CONTENT_IVR_SURVEY,
METHOD_EMAIL: cls.CONTENT_EMAIL,
}.get(reminder_definition.method, cls.CONTENT_SMS)
form_unique_id = reminder.current_event.form_unique_id
if parent and parent.form_unique_id == form_unique_id:
form_name = parent.form_name
else:
form_name = (cls.get_form_name_or_none(form_unique_id)
if form_unique_id else None)
return (content_type, form_unique_id, form_name)
@classmethod
def get_content_info_from_keyword(cls, keyword):
from corehq.apps.reminders.models import (METHOD_SMS, METHOD_SMS_SURVEY,
METHOD_STRUCTURED_SMS, RECIPIENT_SENDER)
content_type = cls.CONTENT_NONE
form_unique_id = None
form_name = None
for action in keyword.actions:
if action.recipient == RECIPIENT_SENDER:
if action.action in (METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS):
content_type = cls.CONTENT_SMS_SURVEY
form_unique_id = action.form_unique_id
form_name = cls.get_form_name_or_none(action.form_unique_id)
elif action.action == METHOD_SMS:
content_type = cls.CONTENT_SMS
return (content_type, form_unique_id, form_name)
@classmethod
def create_from_reminder(cls, reminder_definition, reminder, recipient=None):
if reminder_definition.messaging_event_id:
return cls.objects.get(pk=reminder_definition.messaging_event_id)
source, source_id = cls.get_source_from_reminder(reminder_definition)
content_type, form_unique_id, form_name = cls.get_content_info_from_reminder(
reminder_definition, reminder)
if recipient and reminder_definition.recipient_is_list_of_locations(recipient):
if len(recipient) == 1:
recipient_type = (cls.RECIPIENT_LOCATION_PLUS_DESCENDANTS
if reminder_definition.include_child_locations
else cls.RECIPIENT_LOCATION)
recipient_id = recipient[0].location_id
elif len(recipient) > 1:
recipient_type = (cls.RECIPIENT_VARIOUS_LOCATIONS_PLUS_DESCENDANTS
if reminder_definition.include_child_locations
else cls.RECIPIENT_VARIOUS_LOCATIONS)
recipient_id = None
else:
# len(recipient) should never be 0 when we invoke this method,
# but catching this situation here just in case
recipient_type = cls.RECIPIENT_UNKNOWN
recipient_id = None
elif isinstance(recipient, list):
recipient_type = cls.RECIPIENT_VARIOUS
recipient_id = None
elif recipient is None:
recipient_type = cls.RECIPIENT_UNKNOWN
recipient_id = None
else:
recipient_type = cls.get_recipient_type(recipient)
recipient_id = recipient.get_id if recipient_type else None
return cls.objects.create(
domain=reminder_definition.domain,
date=datetime.utcnow(),
source=source,
source_id=source_id,
content_type=content_type,
form_unique_id=form_unique_id,
form_name=form_name,
status=cls.STATUS_IN_PROGRESS,
recipient_type=recipient_type,
recipient_id=recipient_id
)
@classmethod
def create_from_keyword(cls, keyword, contact):
"""
keyword - the keyword object
contact - the person who initiated the keyword
"""
content_type, form_unique_id, form_name = cls.get_content_info_from_keyword(
keyword)
recipient_type = cls.get_recipient_type(contact)
return cls.objects.create(
domain=keyword.domain,
date=datetime.utcnow(),
source=cls.SOURCE_KEYWORD,
source_id=keyword.get_id,
content_type=content_type,
form_unique_id=form_unique_id,
form_name=form_name,
status=cls.STATUS_IN_PROGRESS,
recipient_type=recipient_type,
recipient_id=contact.get_id if recipient_type else None
)
@classmethod
def create_verification_event(cls, domain, contact):
recipient_type = cls.get_recipient_type(contact)
return cls.objects.create(
domain=domain,
date=datetime.utcnow(),
source=cls.SOURCE_OTHER,
content_type=cls.CONTENT_PHONE_VERIFICATION,
status=cls.STATUS_IN_PROGRESS,
recipient_type=recipient_type,
recipient_id=contact.get_id if recipient_type else None
)
@classmethod
def get_current_verification_event(cls, domain, contact_id, phone_number):
"""
Returns the latest phone verification event that is in progress
for the given contact and phone number, or None if one does not exist.
"""
qs = cls.objects.filter(
domain=domain,
recipient_id=contact_id,
messagingsubevent__sms__phone_number=smsutil.clean_phone_number(phone_number),
content_type=cls.CONTENT_PHONE_VERIFICATION,
status=cls.STATUS_IN_PROGRESS
)
return qs.order_by('-date')[0] if qs.count() > 0 else None
class MessagingSubEvent(models.Model, MessagingStatusMixin):
"""
Used to track the status of a MessagingEvent for each of its recipients.
"""
RECIPIENT_CHOICES = (
(MessagingEvent.RECIPIENT_CASE, ugettext_noop('Case')),
(MessagingEvent.RECIPIENT_MOBILE_WORKER, ugettext_noop('Mobile Worker')),
(MessagingEvent.RECIPIENT_WEB_USER, ugettext_noop('Web User')),
)
parent = models.ForeignKey('MessagingEvent', on_delete=models.CASCADE)
date = models.DateTimeField(null=False, db_index=True)
recipient_type = models.CharField(max_length=3, choices=RECIPIENT_CHOICES, null=False)
recipient_id = models.CharField(max_length=126, null=True)
content_type = models.CharField(max_length=3, choices=MessagingEvent.CONTENT_CHOICES, null=False)
# Only used when content_type is CONTENT_SMS_SURVEY or CONTENT_IVR_SURVEY
form_unique_id = models.CharField(max_length=126, null=True)
form_name = models.TextField(null=True)
xforms_session = models.ForeignKey('smsforms.SQLXFormsSession', null=True, on_delete=models.PROTECT)
# If this was a reminder that spawned off of a case, this is the case's id
case_id = models.CharField(max_length=126, null=True)
status = models.CharField(max_length=3, choices=MessagingEvent.STATUS_CHOICES, null=False)
error_code = models.CharField(max_length=126, null=True)
additional_error_text = models.TextField(null=True)
class Meta:
app_label = 'sms'
def save(self, *args, **kwargs):
super(MessagingSubEvent, self).save(*args, **kwargs)
parent = self.parent
# If this event is in an errored state, also set the parent
# event to an errored state.
if self.status == MessagingEvent.STATUS_ERROR:
parent.status = MessagingEvent.STATUS_ERROR
parent.save()
# If the parent event had various recipients, mark it as such,
# unless the source was a keyword in which case the recipient
# listed should always be the keyword initiator.
if (parent.source != MessagingEvent.SOURCE_KEYWORD and
(parent.recipient_id != self.recipient_id or self.recipient_id is None) and
parent.recipient_type not in (
MessagingEvent.RECIPIENT_USER_GROUP,
MessagingEvent.RECIPIENT_CASE_GROUP,
MessagingEvent.RECIPIENT_VARIOUS,
MessagingEvent.RECIPIENT_LOCATION,
MessagingEvent.RECIPIENT_LOCATION_PLUS_DESCENDANTS,
MessagingEvent.RECIPIENT_VARIOUS_LOCATIONS,
MessagingEvent.RECIPIENT_VARIOUS_LOCATIONS_PLUS_DESCENDANTS,
) and len(parent.subevents) > 1):
parent.recipient_type = MessagingEvent.RECIPIENT_VARIOUS
parent.recipient_id = None
parent.save()
def get_recipient_doc_type(self):
return MessagingEvent._get_recipient_doc_type(self.recipient_type)
class SelfRegistrationInvitation(models.Model):
PHONE_TYPE_ANDROID = 'android'
PHONE_TYPE_OTHER = 'other'
PHONE_TYPE_CHOICES = (
(PHONE_TYPE_ANDROID, ugettext_lazy('Android')),
(PHONE_TYPE_OTHER, ugettext_lazy('Other')),
)
STATUS_PENDING = 'pending'
STATUS_REGISTERED = 'registered'
STATUS_EXPIRED = 'expired'
domain = models.CharField(max_length=126, null=False, db_index=True)
phone_number = models.CharField(max_length=30, null=False, db_index=True)
token = models.CharField(max_length=126, null=False, unique=True, db_index=True)
app_id = models.CharField(max_length=126, null=True)
expiration_date = models.DateField(null=False)
created_date = models.DateTimeField(null=False)
phone_type = models.CharField(max_length=20, null=True, choices=PHONE_TYPE_CHOICES)
registered_date = models.DateTimeField(null=True)
# True if we are assuming that the recipient has an Android phone
android_only = models.BooleanField(default=False)
# True to make email address a required field on the self-registration page
require_email = models.BooleanField(default=False)
# custom user data that will be set to the CommCareUser's user_data property
# when it is created
custom_user_data = jsonfield.JSONField(default=dict)
class Meta:
app_label = 'sms'
@property
@memoized
def odk_url(self):
if not self.app_id:
return None
try:
return self.get_app_odk_url(self.domain, self.app_id)
except Http404:
return None
@property
def already_registered(self):
return self.registered_date is not None
@property
def expired(self):
"""
The invitation is valid until 11:59pm UTC on the expiration date.
"""
return datetime.utcnow().date() > self.expiration_date
@property
def status(self):
if self.already_registered:
return self.STATUS_REGISTERED
elif self.expired:
return self.STATUS_EXPIRED
else:
return self.STATUS_PENDING
def completed(self):
self.registered_date = datetime.utcnow()
self.save()
def send_step1_sms(self, custom_message=None):
from corehq.apps.sms.api import send_sms
if self.android_only:
self.send_step2_android_sms(custom_message)
return
send_sms(
self.domain,
None,
self.phone_number,
custom_message or get_message(MSG_MOBILE_WORKER_INVITATION_START, domain=self.domain)
)
def send_step2_java_sms(self):
from corehq.apps.sms.api import send_sms
send_sms(
self.domain,
None,
self.phone_number,
get_message(MSG_MOBILE_WORKER_JAVA_INVITATION, context=(self.domain,), domain=self.domain)
)
def get_user_registration_url(self):
from corehq.apps.users.views.mobile.users import CommCareUserSelfRegistrationView
return absolute_reverse(
CommCareUserSelfRegistrationView.urlname,
args=[self.domain, self.token]
)
@classmethod
def get_app_info_url(cls, domain, app_id):
from corehq.apps.sms.views import InvitationAppInfoView
return absolute_reverse(
InvitationAppInfoView.urlname,
args=[domain, app_id]
)
@classmethod
def get_sms_install_link(cls, domain, app_id):
"""
If CommCare detects this SMS on the phone during start up,
it gives the user the option to install the given app.
"""
app_info_url = cls.get_app_info_url(domain, app_id)
return '[commcare app - do not delete] %s' % base64.b64encode(app_info_url)
def send_step2_android_sms(self, custom_message=None):
from corehq.apps.sms.api import send_sms
registration_url = self.get_user_registration_url()
if custom_message:
message = custom_message.format(registration_url)
else:
message = get_message(MSG_MOBILE_WORKER_ANDROID_INVITATION, context=(registration_url,),
domain=self.domain)
send_sms(
self.domain,
None,
self.phone_number,
message
)
if self.odk_url:
send_sms(
self.domain,
None,
self.phone_number,
self.get_sms_install_link(self.domain, self.app_id),
)
def expire(self):
self.expiration_date = datetime.utcnow().date() - timedelta(days=1)
self.save()
@classmethod
def get_unexpired_invitations(cls, phone_number):
current_date = datetime.utcnow().date()
return cls.objects.filter(
phone_number=phone_number,
expiration_date__gte=current_date,
registered_date__isnull=True
)
@classmethod
def expire_invitations(cls, phone_number):
"""
Expire all invitations for the given phone number that have not
yet expired.
"""
for invitation in cls.get_unexpired_invitations(phone_number):
invitation.expire()
@classmethod
def by_token(cls, token):
try:
return cls.objects.get(token=token)
except cls.DoesNotExist:
return None
@classmethod
def by_phone(cls, phone_number, expire_duplicates=True):
"""
Look up the unexpired invitation for the given phone number.
In the case of duplicates, only the most recent invitation
is returned.
If expire_duplicates is True, then any duplicates are automatically
expired.
Returns the invitation, or None if no unexpired invitations exist.
"""
phone_number = apply_leniency(phone_number)
result = cls.get_unexpired_invitations(phone_number).order_by('-created_date')
if len(result) == 0:
return None
invitation = result[0]
if expire_duplicates and len(result) > 1:
for i in result[1:]:
i.expire()
return invitation
@classmethod
def get_app_odk_url(cls, domain, app_id):
"""
Get the latest starred build (or latest build if none are
starred) for the app and return it's odk install url.
"""
app = get_app(domain, app_id, latest=True)
if not app.copy_of:
# If latest starred build is not found, use the latest build
app = get_app(domain, app_id, latest=True, target='build')
if not app.copy_of:
# If no build is found, return None
return None
return app.get_short_odk_url(with_media=True)
@classmethod
def initiate_workflow(cls, domain, users, app_id=None,
days_until_expiration=30, custom_first_message=None,
android_only=False, require_email=False):
"""
If app_id is passed in, then an additional SMS will be sent to Android
phones containing a link to the latest starred build (or latest
build if no starred build exists) for the app. Once ODK is installed,
it will automatically search for this SMS and install this app.
If app_id is left blank, the additional SMS is not sent, and once
ODK is installed it just skips the automatic app install step.
"""
success_numbers = []
invalid_format_numbers = []
numbers_in_use = []
for user_info in users:
phone_number = apply_leniency(user_info.phone_number)
try:
CommCareMobileContactMixin.validate_number_format(phone_number)
except InvalidFormatException:
invalid_format_numbers.append(phone_number)
continue
if PhoneNumber.by_phone(phone_number, include_pending=True):
numbers_in_use.append(phone_number)
continue
cls.expire_invitations(phone_number)
expiration_date = (datetime.utcnow().date() +
timedelta(days=days_until_expiration))
invitation = cls(
domain=domain,
phone_number=phone_number,
token=uuid.uuid4().hex,
app_id=app_id,
expiration_date=expiration_date,
created_date=datetime.utcnow(),
android_only=android_only,
require_email=require_email,
custom_user_data=user_info.custom_user_data or {},
)
if android_only:
invitation.phone_type = cls.PHONE_TYPE_ANDROID
invitation.save()
invitation.send_step1_sms(custom_first_message)
success_numbers.append(phone_number)
return (success_numbers, invalid_format_numbers, numbers_in_use)
@classmethod
def send_install_link(cls, domain, users, app_id, custom_message=None):
"""
This method sends two SMS to each user: 1) an SMS with the link to the
Google Play store to install Commcare, and 2) an install SMS for the
given app.
Use this method to reinstall CommCare on a user's phone. The user must
already have a mobile worker account. If the user doesn't yet have a
mobile worker account, use SelfRegistrationInvitation.initiate_workflow()
so that they can set one up as part of the process.
:param domain: the name of the domain this request is for
:param users: a list of SelfRegistrationUserInfo objects
:param app_id: the app_id of the app for which to send the install link
:param custom_message: (optional) a custom message to use when sending the
Google Play URL.
"""
from corehq.apps.sms.api import send_sms, send_sms_to_verified_number
if custom_message:
custom_message = custom_message.format(GOOGLE_PLAY_STORE_COMMCARE_URL)
domain_translated_message = custom_message or get_message(
MSG_REGISTRATION_INSTALL_COMMCARE,
domain=domain,
context=(GOOGLE_PLAY_STORE_COMMCARE_URL,)
)
sms_install_link = cls.get_sms_install_link(domain, app_id)
success_numbers = []
invalid_format_numbers = []
error_numbers = []
for user in users:
try:
CommCareMobileContactMixin.validate_number_format(user.phone_number)
except InvalidFormatException:
invalid_format_numbers.append(user.phone_number)
continue
phone_number = PhoneNumber.by_phone(user.phone_number)
if phone_number:
if phone_number.domain != domain:
error_numbers.append(user.phone_number)
continue
user_translated_message = custom_message or get_message(
MSG_REGISTRATION_INSTALL_COMMCARE,
verified_number=phone_number,
context=(GOOGLE_PLAY_STORE_COMMCARE_URL,)
)
send_sms_to_verified_number(phone_number, user_translated_message)
send_sms_to_verified_number(phone_number, sms_install_link)
else:
send_sms(domain, None, user.phone_number, domain_translated_message)
send_sms(domain, None, user.phone_number, sms_install_link)
success_numbers.append(user.phone_number)
return (success_numbers, invalid_format_numbers, error_numbers)
class ActiveMobileBackendManager(models.Manager):
def get_queryset(self):
return super(ActiveMobileBackendManager, self).get_queryset().filter(deleted=False)
class SQLMobileBackend(UUIDGeneratorMixin, models.Model):
SMS = 'SMS'
IVR = 'IVR'
TYPE_CHOICES = (
(SMS, ugettext_lazy('SMS')),
(IVR, ugettext_lazy('IVR')),
)
UUIDS_TO_GENERATE = ['couch_id', 'inbound_api_key']
objects = models.Manager()
active_objects = ActiveMobileBackendManager()
# We can't really get rid of this until all the messaging models are in
# postgres. Once that happens we can migrate references to the couch_id
# as a foreign key to postgres id and get rid of this field.
couch_id = models.CharField(max_length=126, db_index=True, unique=True)
backend_type = models.CharField(max_length=3, choices=TYPE_CHOICES, default=SMS)
# This is an api key that the gateway uses when making inbound requests to hq.
# This enforces gateway security and also allows us to tie every inbound request
# to a specific backend.
inbound_api_key = models.CharField(max_length=126, unique=True, db_index=True)
# This tells us which type of backend this is
hq_api_id = models.CharField(max_length=126, null=True)
# Global backends are system owned and can be used by anyone
is_global = models.BooleanField(default=False)
# This is the domain that the backend belongs to, or None for
# global backends
domain = models.CharField(max_length=126, null=True, db_index=True)
# A short name for a backend instance which is referenced when
# setting a case contact's preferred backend
name = models.CharField(max_length=126)
# Simple name to display to users - e.g. "Twilio"
display_name = models.CharField(max_length=126, null=True)
# Optionally, a description of this backend
description = models.TextField(null=True)
# A JSON list of countries that this backend supports.
# This information is displayed in the gateway list UI.
# If this backend represents an international gateway,
# set this to: ["*"]
supported_countries = jsonfield.JSONField(default=list)
# To avoid having many tables with so few records in them, all
# SMS backends are stored in this same table. This field is a
# JSON dict which stores any additional fields that the SMS
# backend subclasses need.
# NOTE: Do not access this field directly, instead use get_extra_fields()
# and set_extra_fields()
extra_fields = jsonfield.JSONField(default=dict)
# For a historical view of sms data, we can't delete backends.
# Instead, set a deleted flag when a backend should no longer be used.
deleted = models.BooleanField(default=False)
# If the backend uses load balancing, this is a JSON list of the
# phone numbers to load balance over.
load_balancing_numbers = jsonfield.JSONField(default=list)
# The phone number which you can text to or call in order to reply
# to this backend
reply_to_phone_number = models.CharField(max_length=126, null=True)
class Meta:
db_table = 'messaging_mobilebackend'
app_label = 'sms'
@quickcache(['self.pk', 'domain'], timeout=5 * 60)
def domain_is_shared(self, domain):
"""
Returns True if this backend has been shared with domain and domain
has accepted the invitation.
"""
count = self.mobilebackendinvitation_set.filter(domain=domain, accepted=True).count()
return count > 0
def domain_is_authorized(self, domain):
"""
Returns True if the given domain is authorized to use this backend.
"""
return (self.is_global or
domain == self.domain or
self.domain_is_shared(domain))
@classmethod
def name_is_unique(cls, name, domain=None, backend_id=None):
if domain:
result = cls.objects.filter(
is_global=False,
domain=domain,
name=name,
deleted=False
)
else:
result = cls.objects.filter(
is_global=True,
name=name,
deleted=False
)
result = result.values_list('id', flat=True)
if len(result) == 0:
return True
if len(result) == 1:
return result[0] == backend_id
return False
def get_authorized_domain_list(self):
return (self.mobilebackendinvitation_set.filter(accepted=True)
.order_by('domain').values_list('domain', flat=True))
@classmethod
def get_domain_backends(cls, backend_type, domain, count_only=False, offset=None, limit=None):
"""
Returns all the backends that the given domain has access to (that is,
owned backends, shared backends, and global backends).
"""
domain_owned_backends = models.Q(is_global=False, domain=domain)
domain_shared_backends = models.Q(
is_global=False,
mobilebackendinvitation__domain=domain,
mobilebackendinvitation__accepted=True
)
global_backends = models.Q(is_global=True)
# The left join to MobileBackendInvitation may cause there to be
# duplicates here, so we need to call .distinct()
result = SQLMobileBackend.objects.filter(
(domain_owned_backends | domain_shared_backends | global_backends),
deleted=False,
backend_type=backend_type
).distinct()
if count_only:
return result.count()
result = result.order_by('name').values_list('id', flat=True)
if offset is not None and limit is not None:
result = result[offset:offset + limit]
return [cls.load(pk) for pk in result]
@classmethod
def get_global_backends_for_this_class(cls, backend_type):
return cls.objects.filter(
is_global=True,
deleted=False,
backend_type=backend_type,
hq_api_id=cls.get_api_id()
).all()
@classmethod
def get_global_backend_ids(cls, backend_type, couch_id=False):
id_field = 'couch_id' if couch_id else 'id'
return SQLMobileBackend.active_objects.filter(
backend_type=backend_type,
is_global=True
).values_list(id_field, flat=True)
@classmethod
def get_global_backends(cls, backend_type, count_only=False, offset=None, limit=None):
result = SQLMobileBackend.objects.filter(
is_global=True,
deleted=False,
backend_type=backend_type
)
if count_only:
return result.count()
result = result.order_by('name').values_list('id', flat=True)
if offset is not None and limit is not None:
result = result[offset:offset + limit]
return [cls.load(pk) for pk in result]
@classmethod
def get_domain_default_backend(cls, backend_type, domain, id_only=False):
result = SQLMobileBackendMapping.objects.filter(
is_global=False,
domain=domain,
backend_type=backend_type,
prefix='*'
).values_list('backend_id', flat=True)
if len(result) > 1:
raise cls.MultipleObjectsReturned(
"More than one default backend found for backend_type %s, "
"domain %s" % (backend_type, domain)
)
elif len(result) == 1:
if id_only:
return result[0]
else:
return cls.load(result[0])
else:
return None
@classmethod
def load_default_backend(cls, backend_type, phone_number, domain=None):
"""
Chooses the appropriate backend based on the phone number's
prefix, or returns None if no catch-all backend is configured.
backend_type - SQLMobileBackend.SMS or SQLMobileBackend.IVR
phone_number - the phone number
domain - pass in a domain to choose the default backend from the domain's
configured backends, otherwise leave None to choose from the
system's configured backends
"""
backend_map = SQLMobileBackendMapping.get_prefix_to_backend_map(
backend_type, domain=domain)
backend_id = backend_map.get_backend_id_by_prefix(phone_number)
if backend_id:
return cls.load(backend_id)
return None
@classmethod
def load_default_by_phone_and_domain(cls, backend_type, phone_number, domain=None):
"""
Get the appropriate outbound backend to communicate with phone_number.
backend_type - SQLMobileBackend.SMS or SQLMobileBackend.IVR
phone_number - the phone number
domain - the domain
"""
backend = None
if domain:
backend = cls.load_default_backend(backend_type, phone_number, domain=domain)
if not backend:
backend = cls.load_default_backend(backend_type, phone_number)
if not backend:
raise BadSMSConfigException("No suitable backend found for phone "
"number and domain %s, %s" %
(phone_number, domain))
return backend
@classmethod
@quickcache(['hq_api_id', 'inbound_api_key'], timeout=60 * 60)
def get_backend_info_by_api_key(cls, hq_api_id, inbound_api_key):
"""
Looks up a backend by inbound_api_key and returns a tuple of
(domain, couch_id). Including hq_api_id in the filter is an
implicit way of making sure that the returned backend info belongs
to a backend of that type.
(The entire backend is not returned to reduce the amount of data
needed to be returned by the cache)
Raises cls.DoesNotExist if not found.
"""
result = (cls.active_objects
.filter(hq_api_id=hq_api_id, inbound_api_key=inbound_api_key)
.values_list('domain', 'couch_id'))
if len(result) == 0:
raise cls.DoesNotExist
return result[0]
@classmethod
@quickcache(['backend_id', 'is_couch_id'], timeout=60 * 60)
def get_backend_api_id(cls, backend_id, is_couch_id=False):
filter_args = {'couch_id': backend_id} if is_couch_id else {'pk': backend_id}
result = (cls.active_objects
.filter(**filter_args)
.values_list('hq_api_id', flat=True))
if len(result) == 0:
raise cls.DoesNotExist
return result[0]
@classmethod
@quickcache(['backend_id', 'is_couch_id', 'include_deleted'], timeout=5 * 60)
def load(cls, backend_id, api_id=None, is_couch_id=False, include_deleted=False):
"""
backend_id - the pk of the SQLMobileBackend to load
api_id - if you know the hq_api_id of the SQLMobileBackend, pass it
here for a faster lookup; otherwise, it will be looked up
automatically
is_couch_id - if True, then backend_id should be the couch_id to use
during lookup instead of the postgres model's pk;
we have to support both for a little while until all
foreign keys are migrated over
"""
backend_classes = smsutil.get_backend_classes()
api_id = api_id or cls.get_backend_api_id(backend_id, is_couch_id=is_couch_id)
if api_id not in backend_classes:
raise BadSMSConfigException("Unexpected backend api id found '%s' for "
"backend '%s'" % (api_id, backend_id))
klass = backend_classes[api_id]
if include_deleted:
result = klass.objects
else:
result = klass.active_objects
if is_couch_id:
return result.get(couch_id=backend_id)
else:
return result.get(pk=backend_id)
@classmethod
def get_backend_from_id_and_api_id_result(cls, result):
if len(result) > 0:
return cls.load(result[0]['id'], api_id=result[0]['hq_api_id'])
return None
@classmethod
def get_owned_backend_by_name(cls, backend_type, domain, name):
name = name.strip().upper()
result = cls.active_objects.filter(
is_global=False,
backend_type=backend_type,
domain=domain,
name=name
).values('id', 'hq_api_id')
return cls.get_backend_from_id_and_api_id_result(result)
@classmethod
def get_shared_backend_by_name(cls, backend_type, domain, name):
name = name.strip().upper()
result = cls.active_objects.filter(
is_global=False,
backend_type=backend_type,
mobilebackendinvitation__domain=domain,
mobilebackendinvitation__accepted=True,
name=name
).values('id', 'hq_api_id').order_by('domain')
return cls.get_backend_from_id_and_api_id_result(result)
@classmethod
def get_global_backend_by_name(cls, backend_type, name):
name = name.strip().upper()
result = cls.active_objects.filter(
is_global=True,
backend_type=backend_type,
name=name
).values('id', 'hq_api_id')
return cls.get_backend_from_id_and_api_id_result(result)
@classmethod
def load_by_name(cls, backend_type, domain, name):
"""
Attempts to load the backend with the given name.
If no matching backend is found, a BadSMSConfigException is raised.
backend_type - SQLMobileBackend.SMS or SQLMobileBackend.IVR
domain - the domain
name - the name of the backend (corresponding to SQLMobileBackend.name)
"""
backend = cls.get_owned_backend_by_name(backend_type, domain, name)
if not backend:
backend = cls.get_shared_backend_by_name(backend_type, domain, name)
if not backend:
backend = cls.get_global_backend_by_name(backend_type, name)
if not backend:
raise BadSMSConfigException("Could not find %s backend '%s' from "
"domain '%s'" % (backend_type, name, domain))
return backend
@classmethod
def get_api_id(cls):
"""
This method should return the backend's api id.
"""
raise NotImplementedError("Please implement this method")
@classmethod
def get_generic_name(cls):
"""
This method should return a descriptive name for this backend
(such as "Unicel" or "Tropo"), for use in identifying it to an end user.
"""
raise NotImplementedError("Please implement this method")
@classmethod
def get_form_class(cls):
"""
This method should return a subclass of corehq.apps.sms.forms.BackendForm
"""
raise NotImplementedError("Please implement this method")
@classmethod
def get_available_extra_fields(cls):
"""
Should return a list of field names that are the keys in
the extra_fields dict.
"""
raise NotImplementedError("Please implement this method")
@property
def config(self):
"""
Returns self.get_extra_fields() converted into a namedtuple so that
you can reference self.config.gateway_user_id, for example,
instead of self.get_extra_fields()['gateway_user_id']
"""
BackendConfig = namedtuple('BackendConfig', self.get_available_extra_fields())
return BackendConfig(**self.get_extra_fields())
def get_extra_fields(self):
result = {field: None for field in self.get_available_extra_fields()}
result.update(self.extra_fields)
return result
def set_extra_fields(self, **kwargs):
"""
Only updates the fields that are passed as kwargs, and leaves
the rest untouched.
"""
result = self.get_extra_fields()
for k, v in kwargs.iteritems():
if k not in self.get_available_extra_fields():
raise Exception("Field %s is not an available extra field for %s"
% (k, self.__class__.__name__))
result[k] = v
self.extra_fields = result
def __clear_shared_domain_cache(self, new_domains):
current_domains = self.mobilebackendinvitation_set.values_list('domain', flat=True)
# Clear the cache for domains in new_domains or current_domains, but not both
for domain in set(current_domains) ^ set(new_domains):
self.domain_is_shared.clear(self, domain)
def set_shared_domains(self, domains):
if self.id is None:
raise Exception("Please call .save() on the backend before "
"calling set_shared_domains()")
with transaction.atomic():
self.__clear_shared_domain_cache(domains)
self.mobilebackendinvitation_set.all().delete()
for domain in domains:
MobileBackendInvitation.objects.create(
domain=domain,
accepted=True,
backend=self,
)
def soft_delete(self):
with transaction.atomic():
self.deleted = True
self.__clear_shared_domain_cache([])
self.mobilebackendinvitation_set.all().delete()
for mapping in self.sqlmobilebackendmapping_set.all():
# Delete one at a time so the backend map cache gets cleared
# for the respective domain(s)
mapping.delete()
self.save()
def __clear_caches(self):
if self.pk:
self.load.clear(SQLMobileBackend, self.pk, is_couch_id=False)
self.get_backend_api_id.clear(SQLMobileBackend, self.pk, is_couch_id=False)
if self.couch_id:
self.load.clear(SQLMobileBackend, self.couch_id, is_couch_id=True)
self.get_backend_api_id.clear(SQLMobileBackend, self.couch_id, is_couch_id=True)
def save(self, *args, **kwargs):
self.__clear_caches()
return super(SQLMobileBackend, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.__clear_caches()
return super(SQLMobileBackend, self).delete(*args, **kwargs)
class SQLSMSBackend(SQLMobileBackend):
class Meta:
proxy = True
app_label = 'sms'
def get_sms_rate_limit(self):
"""
Override to use rate limiting. Return None to not use rate limiting,
otherwise return the maximum number of SMS that should be sent by
this backend instance in a one minute period.
"""
return None
def send(self, msg, *args, **kwargs):
raise NotImplementedError("Please implement this method.")
@classmethod
def get_opt_in_keywords(cls):
"""
Override to specify a set of opt-in keywords to use for this
backend type.
"""
return []
@classmethod
def get_opt_out_keywords(cls):
"""
Override to specify a set of opt-out keywords to use for this
backend type.
"""
return []
class PhoneLoadBalancingMixin(object):
"""
If you need a backend to balance the outbound SMS load over a set of
phone numbers, use this mixin. To use it:
1) Include this mixin in your backend class.
2) Have the send() method expect an orig_phone_number kwarg, which will
be the phone number to send from. This parameter is always sent in for
instances of PhoneLoadBalancingMixin, even if there's just one phone
number in self.load_balancing_numbers.
3) Have the backend's form class use the LoadBalancingBackendFormMixin to
automatically set the load balancing phone numbers in the UI.
If the backend also uses rate limiting, then each phone number is rate
limited separately as you would expect.
(We could also just define these methods on the backend class itself, but
it's useful in other parts of the framework to check if a backend
is an instance of this mixin for performing various operations.)
"""
def get_load_balance_redis_key(self):
return 'load-balance-phones-for-backend-%s' % self.pk
def get_next_phone_number(self):
if (
not isinstance(self.load_balancing_numbers, list) or
len(self.load_balancing_numbers) == 0
):
raise Exception("Expected load_balancing_numbers to not be "
"empty for backend %s" % self.pk)
if len(self.load_balancing_numbers) == 1:
# If there's just one number, no need to go through the
# process to figure out which one is next.
return self.load_balancing_numbers[0]
redis_key = self.get_load_balance_redis_key()
return load_balance(redis_key, self.load_balancing_numbers)
class BackendMap(object):
def __init__(self, catchall_backend_id, backend_map):
"""
catchall_backend_id - the pk of the backend that is the default if
no prefixes match (can be None if there is no
catch all)
backend_map - a dictionary of {prefix: backend pk} which
maps a phone prefix to the backend which should be
used for that phone prefix
"""
self.catchall_backend_id = catchall_backend_id
self.backend_map_dict = backend_map
self.backend_map_tuples = backend_map.items()
# Sort by length of prefix descending
self.backend_map_tuples.sort(key=lambda x: len(x[0]), reverse=True)
def get_backend_id_by_prefix(self, phone_number):
phone_number = smsutil.strip_plus(phone_number)
for prefix, backend_id in self.backend_map_tuples:
if phone_number.startswith(prefix):
return backend_id
return self.catchall_backend_id
class SQLMobileBackendMapping(models.Model):
"""
A SQLMobileBackendMapping instance is used to map SMS or IVR traffic
to a given backend based on phone prefix.
"""
class Meta:
db_table = 'messaging_mobilebackendmapping'
app_label = 'sms'
unique_together = ('domain', 'backend_type', 'prefix')
couch_id = models.CharField(max_length=126, null=True, db_index=True)
# True if this mapping applies globally (system-wide). False if it only applies
# to a domain
is_global = models.BooleanField(default=False)
# The domain for which this mapping is valid; ignored if is_global is True
domain = models.CharField(max_length=126, null=True)
# Specifies whether this mapping is valid for SMS or IVR backends
backend_type = models.CharField(max_length=3, choices=SQLMobileBackend.TYPE_CHOICES)
# The phone prefix, or '*' for catch-all
prefix = models.CharField(max_length=25)
# The backend to use for the given phone prefix
backend = models.ForeignKey('SQLMobileBackend', on_delete=models.CASCADE)
@classmethod
def __set_default_domain_backend(cls, domain, backend_type, backend=None):
fields = dict(
is_global=False,
domain=domain,
backend_type=backend_type,
prefix='*'
)
obj = None
try:
# We can't use get_or_create because backend is a
# required field
obj = cls.objects.get(**fields)
except cls.DoesNotExist:
pass
if not backend:
if obj:
obj.delete()
return
if not obj:
obj = cls(**fields)
obj.backend = backend
obj.save()
@classmethod
def set_default_domain_backend(cls, domain, backend, backend_type=SQLMobileBackend.SMS):
cls.__set_default_domain_backend(domain, backend_type, backend=backend)
@classmethod
def unset_default_domain_backend(cls, domain, backend_type=SQLMobileBackend.SMS):
cls.__set_default_domain_backend(domain, backend_type)
@classmethod
@quickcache(['backend_type', 'domain'], timeout=5 * 60)
def get_prefix_to_backend_map(cls, backend_type, domain=None):
"""
backend_type - SQLMobileBackend.SMS or SQLMobileBackend.IVR
domain - the domain for which to retrieve the backend map, otherwise if left None
the global backend map will be returned.
Returns a BackendMap
"""
if domain:
filter_args = {'backend_type': backend_type, 'is_global': False, 'domain': domain}
else:
filter_args = {'backend_type': backend_type, 'is_global': True}
catchall_backend_id = None
backend_map = {}
for instance in cls.objects.filter(**filter_args):
if instance.prefix == '*':
catchall_backend_id = instance.backend_id
else:
backend_map[instance.prefix] = instance.backend_id
return BackendMap(catchall_backend_id, backend_map)
def __clear_prefix_to_backend_map_cache(self):
self.get_prefix_to_backend_map.clear(self.__class__, self.backend_type, domain=self.domain)
def save(self, *args, **kwargs):
self.__clear_prefix_to_backend_map_cache()
return super(SQLMobileBackendMapping, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.__clear_prefix_to_backend_map_cache()
return super(SQLMobileBackendMapping, self).delete(*args, **kwargs)
class MobileBackendInvitation(models.Model):
class Meta:
db_table = 'messaging_mobilebackendinvitation'
app_label = 'sms'
unique_together = ('backend', 'domain')
# The domain that is being invited to share another domain's backend
domain = models.CharField(max_length=126, null=True, db_index=True)
# The backend that is being shared
backend = models.ForeignKey('SQLMobileBackend', on_delete=models.CASCADE)
accepted = models.BooleanField(default=False)
class MigrationStatus(models.Model):
"""
A model to keep track of whether certain messaging migrations have
been run yet or not.
"""
MIGRATION_BACKEND = 'backend'
MIGRATION_BACKEND_MAP = 'backend_map'
MIGRATION_DOMAIN_DEFAULT_BACKEND = 'domain_default_backend'
MIGRATION_LOGS = 'logs'
MIGRATION_PHONE_NUMBERS = 'phone_numbers'
class Meta:
db_table = 'messaging_migrationstatus'
app_label = "sms"
# The name of the migration (one of the MIGRATION_* constants above)
name = models.CharField(max_length=126)
# The timestamp that the migration was run
timestamp = models.DateTimeField(null=True)
@classmethod
def set_migration_completed(cls, name):
obj, created = cls.objects.get_or_create(name=name)
obj.timestamp = datetime.utcnow()
obj.save()
@classmethod
def has_migration_completed(cls, name):
try:
cls.objects.get(name=name)
return True
except cls.DoesNotExist:
return False
from corehq.apps.sms import signals
| bsd-3-clause | 3,594,400,381,136,475,000 | 36.340296 | 104 | 0.62951 | false |
faustedition/faust-base | faust-utils/static-publication/render-transcripts.py | 1 | 6900 | import requests, urllib, os, os.path, sys, tempfile, shutil
from subprocess import call, check_call
latex_header = """\\documentclass[11pt,oneside]{book}
\\usepackage{makeidx}
\\usepackage{graphicx}
\\usepackage[german]{babel}
\\usepackage[utf8]{inputenc}
\usepackage[hmargin=1cm,vmargin=1.5cm]{geometry}
\usepackage{hyperref}
\hypersetup{
colorlinks,
citecolor=black,
filecolor=black,
linkcolor=black,
urlcolor=black
}
\DeclareUnicodeCharacter{00D7}{x}
\DeclareUnicodeCharacter{00A0}{~}
\\begin{document}
\\author{Johann Wolfgang Goethe}
\\title{Faust. Historisch-kritische Ausgabe}
\\date{\\today}
\\maketitle
\\frontmatter
\\setcounter{secnumdepth}{0}
\\setcounter{tocdepth}{1}
\\tableofcontents
\\mainmatter
\\chapter{Handschriften}
"""
latex_footer = """
\\backmatter
\\printindex
\\end{document}
"""
def extract_pages(mu):
result = [];
if mu['type'] == 'page':
# print " seite: " + str(mu['transcript'] if 'transcript' in mu else '--')
result.append(mu)
for child in mu['contents']:
result.extend(extract_pages(child))
return result
def get_pageurls(url):
answer = requests.get(url).json()
answer_pages = extract_pages(answer)
return [a_page['transcript']['source'] for a_page in answer_pages if 'transcript' in a_page]
def get_doc_src(doc_data):
doc_src = doc_data['document-source']
return doc_src if doc_src else "Keine URI"
def quote_filename(filename):
return urllib.quote_plus(filename.encode('utf-8').replace('.', '_') + u'.png').replace('%', '_')
def generate_out_filepath(page_url, tmp_dir):
out_filename = quote_filename(page_url)
return os.path.join(tmp_dir, 'graphics', out_filename)
def render_document(url, tmp_dir):
print "document ", url
for (i, page_url) in enumerate(get_pageurls(url)):
#pagenumbers starting from 1
pagenum = i + 1
out_filepath = generate_out_filepath(page_url, tmp_dir)
print " rendering page ", pagenum, ": ", page_url
if not page_url == 'faust://self/none/':
if not os.path.exists(out_filepath):
print " (rendering to " + out_filepath + ")"
check_call(['phantomjs', 'render-transcript.js', url + '?view=transcript-bare#' + str(i+1), out_filepath])
check_call(['mogrify', '-resize', '6000x6000', out_filepath])
else:
print " (already exists at " + out_filepath + ")"
def latex_escape_text(text):
return text\
.replace('#', '\\#')\
.replace('$', '\\$')\
.replace('%', '\\%')\
.replace('&', '\\&')\
.replace('\\', '\\textbackslash{}')\
.replace('^', '\\textasciicircum{}')\
.replace('_', '\\_')\
.replace('{', '\\{')\
.replace('}', '\\}')\
.replace('~', '\\textasciitilde{}')\
.replace('-', '\\textendash{}')\
.replace(u'\u03B1', '\\ensuremath{\\alpha}')\
.replace(u'\u03B2', '\\ensuremath{\\beta}')\
.replace(u'\u03B3', '\\ensuremath{\\gamma}')\
.replace(u'\u03B4', '\\ensuremath{\\delta}')
def metadata_if_exists(value):
return u'\\noindent{}' + latex_escape_text(value) + u'\n\n' if value and value != "none" else ""
def generate_document_overview(url, doc_data):
result = u''
doc_src = get_doc_src(doc_data)
result = result + u'\clearpage\n'
result = result + u'\\vfill{}\n'
result = result + u'\section{' + latex_escape_text(doc_data['name']) + u'}\n\n\n'
result = result + metadata_if_exists(doc_data['callnumber.wa-faust'])
result = result + metadata_if_exists(doc_data['callnumber.gsa-1'])
result = result + metadata_if_exists(doc_data['callnumber.gsa-2'])
result = result + u'\\begin{verbatim}\n' + doc_src + u'\n\\end{verbatim}\n\n'
num_pages = len(get_pageurls(url))
result = result + str(num_pages) + u' Seiten\n\n'
#result = result + u'\\begin{verbatim}\n'
#if doc_data['note']: result = result + doc_data['note'] + u'\n'
#result = result + u'\\end{verbatim}\n'
result = result + u'\\vfill\n{}'
return result
def generate_latex(manuscript_urls, tmp_dir):
result = ''
for url in manuscript_urls:
try:
doc_data = requests.get(url).json()
result = result + generate_document_overview(url, doc_data)
for (i, page_url) in enumerate(get_pageurls(url)):
pagenum = i + 1
#if pagenum != 1:
result = result + u'\clearpage\n'
result = result + u'\subsection{Seite ' + str(pagenum) + "}\n"
result = result + u'\\vfill{}\n'
# TODO hack
if "self/none" in page_url:
result = result + u"[Leere Seite]"
else:
transcript_graphic_path = generate_out_filepath(page_url, tmp_dir)
if os.path.exists(transcript_graphic_path):
result = result + u'\centering\includegraphics[width=\\linewidth,height=0.9\\textheight,keepaspectratio]{' + transcript_graphic_path + u'}\n'
else:
result = result + u'[Fehler beim generieren des Transkripts]'
except Exception as e:
#result = result + 'Fehler beim Einlesen der Handschriftenbeschreibung \n\n'
print "Error: ", e
return result
def main():
if len(sys.argv) < 3 or len(sys.argv) > 4:
print 'usage: render-transcripts.py manuscript_list pdf_result [tmp_dir]'
print ' tmp_dir caches rendered graphics to be reused'
exit(-1)
manuscript_list = os.path.abspath(sys.argv[1])
pdf_result = os.path.abspath(sys.argv[2])
tmp_dir = os.path.abspath(sys.argv[3]) if len(sys.argv) > 3 else tempfile.mkdtemp()
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
manuscript_urls = []
for line in open(manuscript_list).read().splitlines():
manuscript_urls.append(line)
for url in manuscript_urls:
try:
render_document(url, tmp_dir)
except Exception as e:
print "Error rendering document: ", e
latex_tmp_dir = tempfile.mkdtemp()
latex_filename = os.path.join(latex_tmp_dir, 'faust.tex')
latex_out = open(latex_filename, 'w')
print "writing latex to " + latex_filename
latex_out.write(latex_header)
latex_out.write(generate_latex(manuscript_urls, tmp_dir).encode('utf-8'))
latex_out.write(latex_footer)
latex_out.close()
os.chdir(latex_tmp_dir)
# twice for toc indexing
check_call(['pdflatex', '-output-directory ' + latex_tmp_dir, latex_filename])
check_call(['pdflatex', '-output-directory ' + latex_tmp_dir, latex_filename])
shutil.copyfile(os.path.join(latex_tmp_dir, "faust.pdf"), pdf_result)
if __name__ == '__main__':
main()
| agpl-3.0 | 1,083,686,129,250,725,600 | 35.125654 | 166 | 0.591739 | false |
callofdutyops/YXH2016724098982 | eye_multi_gpu_train.py | 1 | 9869 | """A binary to train eye using multiple GPU's with synchronous updates.
Just for experimence because I don't have the computer comparable wich GPU computing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import eye_model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/eye_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the eye model.
Args:
scope: unique prefix string identifying the eye tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels of eye.
images, labels = eye_model.distorted_inputs()
# Build inference Graph.
logits = eye_model.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = eye_model.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % eye_model.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train eye images for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (eye_model.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * eye_model.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(eye_model.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
eye_model.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (eye_model.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the eye model. This function
# constructs the entire eye model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this eye tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
eye_model.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in range(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| mit | -6,208,120,418,257,338,000 | 39.446721 | 88 | 0.608471 | false |
ASU-CompMethodsPhysics-PHY494/final-rendezvous-with-ramageddon | Submission/getcoordinates.py | 1 | 8715 | #Copyright (C) 2016 Jordan Boyd, Ashley Mascareno, Andrew Winhold
# Released under the GNU Public Licence, v3 or any higher version
# Please cite where necessary.
import numpy as np
#
#Units of mass are in solar mass
#Units of time are in years
#Units of position are in AU
#Units of velocity are in AU/year
#
mu = 3.964016 * 9.945193 #AU^3/(solarmass * yr^2)
#Masses in solar masses
mass = np.array([
1., #Sun
0.330104/(1.989e6), #Mercury
4.86732/(1.989e6), #Venus
5.97219/(1.989e6), #Earth
0.641693/(1.989e6), #Mars
1898.19/(1.989e6), #Jupiter
568.319/(1.989e6), #Saturn
86.8103/(1.989e6), #Uranus
102.410/(1.989e6), #Neptune
0.0166/(1.989e6), #Eris
])
#Semi-Major Axis in AU
smAxis = np.array([
0., #Sun
0.38709927, #Mercury
0.72333566, #Venus
1.00000261, #Earth
1.52371034, #Mars
5.200441899, #Jupiter
9.53667594, #Saturn
19.18916464, #Uranus
30.06992276, #Neptune
67.68871444,# Eris
])
# Eccentricity, dimensionless
eccentricity = np.array([
0., #Sun
0.20563593, #Mercury
0.00677672, #Venus
0.01671123, #Earth
0.09339410, #Mars
0.04838624, #Jupiter
0.05386179, #Saturn
0.04725744, #Uranus
0.00859048, #Neptune
0.44068 #Eris
])
#Inclination in degrees
inclination = np.array([
0., #Sun
7.00497902, #Mercury
3.39467605, #Venus
-0.00001531, #Earth
1.84969142, #Mars
1.30439695, #Jupiter
2.48599187, #Saturn
0.77263783, #Uranus
1.77004347, #Neptune
44.0445 #Eris
])
inclination = np.deg2rad(inclination) #converting to radians
#Mean Longitude in degrees
mLong = np.array([
0., #Sun
252.25032350, #Mercury
181.97909950, #Venus
100.46457166, #Earth
-4.55343205, #Mars
34.39644051, #Jupiter
49.95424423, #Saturn
313.23810451, #Uranus
-55.12002969, #Neptune
204.16 #Eris
])
mLong = np.deg2rad(mLong) #converting to radians
#Argument of perihelion in degrees
pLong = np.array([
0., #Sun
77.45779628, #Mercury
131.60246718, #Venus
102.93768193, #Earth
-23.94362959, #Mars
14.72847983, #Jupiter
92.59887831, #Saturn
170.95427630, #Uranus
44.96476227, #Neptune
187.1498689 #Eris ** Found by adding argument of perihelion
#and longitude of the ascending node, per JPL details.
])
pLong = np.deg2rad(pLong) #converting to radians
#Longitude of ascending node in degrees
aLong = np.array([
0., #Sun
48.33076593, #Mercury
76.67984255, #Venus
0.0, #Earth
49.55953891, #Mars
100.47390909, #Jupiter
113.66242448, #Saturn
74.01692503, #Uranus
35.906450258, #Neptune
131.78422574 #Eris
])
aLong = np.deg2rad(aLong) #converting to radians
system = {"Mass":mass, "Semi-Major Axis":smAxis, "Eccentricity":eccentricity, "Inclination":inclination, "Mean Longitude":mLong,"Perihelion Longitude":pLong, "Ascending Longitude":aLong}
def ang_mom(mu, a, e):
""" Define angular momentum for state vector calculation.
Inputs
------
mu : gravitational parameter, units of AU^3/(solarmass * yr^2)
a : semi-major axis, units of AU
e : eccentricity, dimensionless
Outputs
-------
h : scalar, angular momentum of body, units in solarmass * AU/year^2
"""
h = np.sqrt(mu*a*(1-e**2))
return h
# True Anomaly (p.112 OMES, eqn 3.10)
def t_anom(E, e):
""" Calculates the true anomaly of a body
angular parameter that defines the position of a body moving along a Keplerian orbit.
It is the angle between the direction of periapsis (closest approach to cent. body)
and the current position of the body, as seen from the main focus of the ellipse
(the point around which the object orbits).
Inputs
------
E : eccentric anomaly, dimensionless
e : eccentricity of orbiting body, dimensionless
Outputs
-------
theta : angle, in radians
"""
theta = 2*np.arctan(np.tan(E/2)*np.sqrt((1-e)/(1+e)))
return theta
#Calculate angular Momentum, amomentum
h = ang_mom(mu, system["Semi-Major Axis"],system["Eccentricity"])
#Calculate mean Anomaly, m_anomaly
m_anomaly = system["Mean Longitude"] - system["Perihelion Longitude"]
#Calculate eccentric Anomaly, E
tol = 1e-5
E = m_anomaly + system["Eccentricity"] * np.sin(m_anomaly)
deltaE = np.zeros_like(E)
count = 0
while np.abs(np.linalg.norm(deltaE)) > tol:
deltaE = (M - (E - system["Eccentricity"] * np.sin(E))) / (1 - system["Eccentricity"] * np.cos(E))
E = E + deltaE
count += 1
if count > 1000:
print("E did not converge to a solution after 1000 iterations.")
break
#Calculate true Anomaly, real_anomaly
real_anomaly = t_anom(E, system["Eccentricity"])
#Calculate argument of perihelion, a_long
a_long = system["Perihelion Longitude"] - system["Ascending Longitude"]
def get_perifocal():
"""Calculates perifocal coordinates from orbital elements.
Perifocal coordinates do not take the z-axis into account, only the x and y orientation of the object.
Outputs
-------
rp : N x 3 array
array of sun-centered coordinates for each object in the system dictionary, units in AU
vp : N x 3 array
velocities of each object in the solar system, units in AU/year
"""
# 4.37 position array
r_ar = np.zeros((len(system["Mass"]),3))
r_ar[:,0] = np.cos(a_long)
r_ar[:,1] = np.sin(a_long)
#4.38 velocity array
v_ar = np.zeros((len(system["Mass"]),3))
v_ar[:,0] = -np.sin(a_long)
v_ar[:,1] = system["Eccentricity"] + np.cos(a_long)
# equations 4.37 and 4.38 in OMES p. 173
rp = np.zeros((len(system["Mass"]),3))
vp = np.zeros((len(system["Mass"]),3))
rp[:,0] = (h**2/mu) * (1/(1 + system["Eccentricity"]*np.cos(a_long))) * r_ar[:,0]
rp[:,1] = (h**2/mu) * (1/(1 + system["Eccentricity"]*np.cos(a_long))) * r_ar[:,1]
vp[1:,0] = (mu/h[1:]) * v_ar[1:,0]
vp[1:,1] = (mu/h[1:]) * v_ar[1:,1]
return rp, vp
def get_heliocentric(r, v):
"""Transforms perifocal coordinates into heliocentric cordinates.
Heliocentric coordinates are oriented with respect to the ecliptic plane of the solar system
and correctly model the solar system.
Outputs
-------
ecliptic_r : N x 3 array
array of sun-centered coordinates for each object in the system dictionary
in heliocentric frame, units of AU
ecliptic_v : N x 3 array
velocities of each object in the solar system in heliocentric frame, units of AU/year
"""
ecliptic_r = np.zeros((len(system["Mass"]),3))
ecliptic_v = np.zeros((len(system["Mass"]),3))
cosw = np.cos(a_long) #small omega
sinw = np.sin(a_long)
cosO = np.cos(system["Ascending Longitude"]) #big omega
sinO = np.sin(system["Ascending Longitude"])
cosI = np.cos(system["Inclination"]) #i
sinI = np.sin(system["Inclination"])
#Equations derived from rotation matrix
ecliptic_r[:,0] = (cosO * cosw - sinO * sinw * cosI) * r[:,0] + (-cosO * sinw - sinO * cosw * cosI) * r[:,1]
ecliptic_r[:,1] = (sinO * cosw + cosO * sinw * cosI) * r[:,0] + (-sinO * sinw + cosO * cosw * cosI) * r[:,1]
ecliptic_r[:,2] = sinw * sinI * r[:,0] + cosw * sinI * r[:,1]
#Equations derived from rotation matrix
ecliptic_v[:,0] = (cosO * cosw - sinO * sinw * cosI) * v[:,0] + (-cosO * sinw - sinO * cosw * cosI) * v[:,1]
ecliptic_v[:,1] = (sinO * cosw + cosO * sinw * cosI) * v[:,0] + (-sinO * sinw + cosO * cosw * cosI) * v[:,1]
ecliptic_v[:,2] = sinw * sinI * v[:,0] + cosw * sinI * v[:,1]
return ecliptic_r, ecliptic_v
def add_Rama(r, v, rama_r, rama_v):
"""
r : positions
v : velocity
theta : true anomaly
"""
r = np.append(r, rama_r, axis=0)
v = np.append(v, rama_v, axis = 0)
return r, v
| gpl-3.0 | 376,904,541,612,496,700 | 30.806569 | 186 | 0.562823 | false |
ecowan/mlb-realtime | realtime/url_builder.py | 1 | 1428 | __author__ = 'ecowan'
from time_converter import TimeConverter
from link_finder import LinkFinder
# TODO: Need to handle case where team is not playing that day.
# Currently returns:
'''
Traceback (most recent call last):
File "/usr/local/bin/mlb-realtime", line 9, in <module>
load_entry_point('mlb-realtime==0.0.13', 'console_scripts', 'mlb-realtime')()
File "build/bdist.linux-x86_64/egg/realtime/__main__.py", line 41, in main
File "build/bdist.linux-x86_64/egg/realtime/url_builder.py", line 29, in build_url
TypeError: sequence item 1: expected string, NoneType found
'''
class UrlBuilder:
def __init__(self, time_dict, team_code):
self.base_url = "http://gd2.mlb.com/components/game/mlb/"
self.time_dict = dict((k, self.pad_single_digit(v)) for k,v in time_dict.items())
self.team_code = team_code
def pad_single_digit(self, num):
if num < 10:
return '0' + str(num)
else:
return str(num)
def build_search_url(self):
return self.base_url + "/".join(["year_"+self.time_dict['year'], "month_"+self.time_dict['month'], "day_"+self.time_dict['day']])
def get_gid(self):
return LinkFinder(self.build_search_url(), self.team_code).get_link()
def build_url(self):
search_url = self.build_search_url()
gid = self.get_gid()
return "/".join([search_url, gid]) + "runScoringPlays.plist"
| gpl-2.0 | -1,075,126,945,044,526,100 | 33.829268 | 138 | 0.637955 | false |
wbonnet/lffs | toolkit/setup.py | 1 | 1412 | #
# The contents of this file are subject to the Apache 2.0 license you may not
# use this file except in compliance with the License.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
#
# Copyright 2016 DFT project (http://www.debianfirmwaretoolkit.org).
# All rights reserved. Use is subject to license terms.
#
# Debian Firmware Toolkit is the new name of Linux Firmware From Scratch
# Copyright 2014 LFFS project (http://www.linuxfirmwarefromscratch.org).
#
#
# Contributors list :
#
# William Bonnet [email protected], [email protected]
#
#
from dft.release import __version__, __author__, __author_email__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Debian Firmware Toolkit',
'long_description': 'DFT is a firmware used to produce firmware from a standard Debian repositories',
'author': __author__,
'url': 'https://github.com/wbonnet/dft/',
'download_url': 'https://github.com/wbonnet/dft/',
'author_email': __author_email__,
'version': __version__,
'install_requires': [ 'pyyaml', 'pyparted' ],
'packages': ['dft'],
'scripts': [ 'bin/dft' ],
'name': 'dft'
}
setup(**config)
| apache-2.0 | 6,004,843,945,266,503,000 | 29.042553 | 105 | 0.697592 | false |
jeremiahyan/odoo | addons/fleet/models/fleet_vehicle_model.py | 1 | 2127 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, api, fields, models
class FleetVehicleModel(models.Model):
_name = 'fleet.vehicle.model'
_description = 'Model of a vehicle'
_order = 'name asc'
name = fields.Char('Model name', required=True)
brand_id = fields.Many2one('fleet.vehicle.model.brand', 'Manufacturer', required=True, help='Manufacturer of the vehicle')
category_id = fields.Many2one('fleet.vehicle.model.category', 'Category')
vendors = fields.Many2many('res.partner', 'fleet_vehicle_model_vendors', 'model_id', 'partner_id', string='Vendors')
image_128 = fields.Image(related='brand_id.image_128', readonly=True)
active = fields.Boolean(default=True)
vehicle_type = fields.Selection([('car', 'Car'), ('bike', 'Bike')], default='car', required=True)
transmission = fields.Selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle')
vehicle_count = fields.Integer(compute='_compute_vehicle_count')
@api.depends('name', 'brand_id')
def name_get(self):
res = []
for record in self:
name = record.name
if record.brand_id.name:
name = record.brand_id.name + '/' + name
res.append((record.id, name))
return res
def _compute_vehicle_count(self):
group = self.env['fleet.vehicle'].read_group(
[('model_id', 'in', self.ids)], ['id', 'model_id'], groupby='model_id', lazy=False,
)
count_by_model = {entry['model_id'][0]: entry['__count'] for entry in group}
for model in self:
model.vehicle_count = count_by_model.get(model.id, 0)
def action_model_vehicle(self):
self.ensure_one()
view = {
'type': 'ir.actions.act_window',
'view_mode': 'kanban,tree,form',
'res_model': 'fleet.vehicle',
'name': _('Vehicles'),
'context': {'search_default_model_id': self.id, 'default_model_id': self.id}
}
return view
| gpl-3.0 | -3,313,334,346,631,041,500 | 41.54 | 144 | 0.607428 | false |
82Flex/DCRM | suit/sortables.py | 1 | 6638 | from copy import deepcopy, copy
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.contrib.contenttypes.admin import GenericTabularInline, GenericStackedInline
from django.forms import ModelForm, NumberInput
from django.db import models
class SortableModelAdminBase(object):
"""
Base class for SortableTabularInline and SortableModelAdmin
"""
sortable = 'order'
class Media:
js = ('suit/js/suit.sortables.js',)
class SortableListForm(ModelForm):
"""
Just Meta holder class
"""
class Meta:
widgets = {
'order': NumberInput(
attrs={'class': 'hidden-xs-up suit-sortable'})
}
class SortableChangeList(ChangeList):
"""
Class that forces ordering by sortable param only
"""
def get_ordering(self, request, queryset):
if self.model_admin.sortable_is_enabled():
return [self.model_admin.sortable, '-' + self.model._meta.pk.name]
return super(SortableChangeList, self).get_ordering(request, queryset)
class SortableTabularInlineBase(SortableModelAdminBase):
"""
Sortable tabular inline
"""
def __init__(self, *args, **kwargs):
super(SortableTabularInlineBase, self).__init__(*args, **kwargs)
self.ordering = (self.sortable,)
self.fields = self.fields or []
if self.fields and self.sortable not in self.fields:
self.fields = list(self.fields) + [self.sortable]
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == self.sortable:
kwargs['widget'] = SortableListForm.Meta.widgets['order']
return super(SortableTabularInlineBase, self).formfield_for_dbfield(
db_field, **kwargs)
class SortableTabularInline(SortableTabularInlineBase, admin.TabularInline):
pass
class SortableGenericTabularInline(SortableTabularInlineBase,
GenericTabularInline):
pass
class SortableStackedInlineBase(SortableModelAdminBase):
"""
Sortable stacked inline
"""
def __init__(self, *args, **kwargs):
super(SortableStackedInlineBase, self).__init__(*args, **kwargs)
self.ordering = (self.sortable,)
def get_fieldsets(self, *args, **kwargs):
"""
Iterate all fieldsets and make sure sortable is in the first fieldset
Remove sortable from every other fieldset, if by some reason someone
has added it
"""
fieldsets = super(SortableStackedInlineBase, self).get_fieldsets(*args, **kwargs)
sortable_added = False
for fieldset in fieldsets:
for line in fieldset:
if not line or not isinstance(line, dict):
continue
fields = line.get('fields')
if self.sortable in fields:
fields.remove(self.sortable)
# Add sortable field always as first
if not sortable_added:
fields.insert(0, self.sortable)
sortable_added = True
break
return fieldsets
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == self.sortable:
kwargs['widget'] = deepcopy(SortableListForm.Meta.widgets['order'])
kwargs['widget'].attrs['class'] += ' suit-sortable-stacked'
kwargs['widget'].attrs['rowclass'] = ' suit-sortable-stacked-row'
return super(SortableStackedInlineBase, self).formfield_for_dbfield(db_field, **kwargs)
class SortableStackedInline(SortableStackedInlineBase, admin.StackedInline):
pass
class SortableGenericStackedInline(SortableStackedInlineBase,
GenericStackedInline):
pass
class SortableModelAdmin(SortableModelAdminBase, admin.ModelAdmin):
"""
Sortable change list
"""
def __init__(self, *args, **kwargs):
super(SortableModelAdmin, self).__init__(*args, **kwargs)
# Keep originals for restore
self._original_ordering = copy(self.ordering)
self._original_list_display = copy(self.list_display)
self._original_list_editable = copy(self.list_editable)
self._original_exclude = copy(self.exclude)
self._original_list_per_page = self.list_per_page
self.enable_sortable()
def merge_form_meta(self, form):
"""
Prepare Meta class with order field widget
"""
if not getattr(form, 'Meta', None):
form.Meta = SortableListForm.Meta
if not getattr(form.Meta, 'widgets', None):
form.Meta.widgets = {}
form.Meta.widgets[self.sortable] = SortableListForm.Meta.widgets[
'order']
def get_changelist_form(self, request, **kwargs):
form = super(SortableModelAdmin, self).get_changelist_form(request,
**kwargs)
self.merge_form_meta(form)
return form
def get_changelist(self, request, **kwargs):
return SortableChangeList
def enable_sortable(self):
self.list_per_page = 500
self.ordering = (self.sortable,)
if self.list_display and self.sortable not in self.list_display:
self.list_display = list(self.list_display) + [self.sortable]
self.list_editable = self.list_editable or []
if self.sortable not in self.list_editable:
self.list_editable = list(self.list_editable) + [self.sortable]
self.exclude = self.exclude or []
if self.sortable not in self.exclude:
self.exclude = list(self.exclude) + [self.sortable]
def disable_sortable(self):
if not self.sortable_is_enabled():
return
self.ordering = self._original_ordering
self.list_display = self._original_list_display
self.list_editable = self._original_list_editable
self.exclude = self._original_exclude
self.list_per_page = self._original_list_per_page
def sortable_is_enabled(self):
return self.list_display and self.sortable in self.list_display
def save_model(self, request, obj, form, change):
if not obj.pk:
max_order = obj.__class__.objects.aggregate(
models.Max(self.sortable))
try:
next_order = max_order['%s__max' % self.sortable] + 1
except TypeError:
next_order = 1
setattr(obj, self.sortable, next_order)
super(SortableModelAdmin, self).save_model(request, obj, form, change)
| agpl-3.0 | 7,824,759,042,278,630,000 | 33.041026 | 95 | 0.622477 | false |
imdjh/quickanswer | bin/myfilter.py | 1 | 3139 | #!/usr/bin/env python
# encoding: utf-8
from pandocfilters import walk, stringify, Para
import re
import json
import sys
def purify(k, v, fmt, meta):
"""
First Step: Remove nonsense from unoconv
"""
if k == 'Span':
return [stringify(v[1])]
inselection = False
def phaseSelection(k, v, fmt, meta):
"""
Second Step: Foce on selection
"""
global inselection
if k == 'Para':
if isinstance(v[0], dict):
return []
if v[0] == '%%kaishi%%':
# print(v[0])
if inselection:
inselection = False
else:
inselection = True
return []
if not inselection:
return []
countQ = 0
consA = ''
phaseA = False
consJSON = {'a': [], 'r': True, 'q': []}
# If consQ unit is completed
consUnit = False
consQ = {'qNum': 0, 'qc': '', 'qa_1': '',
'qa_2': '', 'qa_3': '', 'qa_4': ''}
def phaseQA(k, v, fmt, meta):
"""
Thrid Step: write formot JSON
"""
global consJSON, consQ, consUnit, phaseA, consA
if k == 'Para':
if isinstance(v[0], dict):
return []
if re.search(u'.*答案.*[A-D]*', v[0]):
# ZhengZhi's answer layout
r = re.findall(u'[A-D]', v[0])
consA = ''.join(r)
elif re.search(u'(\s)*\d(\s)*[\.)。/|\"].*', v[0]):
# It's question, may multi lines
phaseA = False
consUnit = False
r = re.split(u'[\.)。/|\"]', v[0].strip())
consQ['qNum'] = r[0].strip() + "." # add delimiter
r.pop(0)
consQ['qc'] = ''.join(r).strip()
elif re.search(u'(\s)*[A-D](\s)*[\.)。/|\"].*', v[0]):
# It's answers, one at a time
phaseA = True
if re.search(u'(\s)*A(\s)*[\.)。/|"].*', v[0]):
consQ['qa_1'] = v[0].strip()
if re.search(u'(\s)*B(\s)*[\.)。/|"].*', v[0]):
consQ['qa_2'] = v[0].strip()
if re.search(u'(\s)*C(\s)*[\.)。/|"].*', v[0]):
consQ['qa_3'] = v[0].strip()
if re.search(u'(\s)*D(\s)*[\.)。/|"].*', v[0]):
consQ['qa_4'] = v[0].strip()
else:
# Fallback if have multiline of question
consUnit = False
consQ['qc'] += v[0]
if (consQ['qa_1'] and consQ['qa_2'] and consQ['qa_3'] and consQ['qa_4'] and consA):
# If qa_[1-4] is fullfilled
consJSON['q'].append(consQ)
consJSON['a'].append(consA)
consQ = {'qNum': 0, 'qc': '', 'qa_1': '',
'qa_2': '', 'qa_3': '', 'qa_4': ''}
consA = ''
phaseA = False
if __name__ == "__main__":
file_c = open("out.json", "r")
c = json.loads(file_c.read())
purified = walk(c, purify, '', '')
#purifiedJSON = json.dumps(altered, ensure_ascii=False)
mySelections = walk(purified, phaseSelection, '', '')
#selectionJSON = json.dumps(altered, mySelections, ensure_ascii=False)
walk(mySelections, phaseQA, '', '')
f = open('qa.json', 'w+')
json.dump(consJSON, f, ensure_ascii=False)
| mit | 7,108,870,104,143,124,000 | 28.72381 | 87 | 0.459789 | false |
andreymal/mini_fiction | mini_fiction/logic/characters.py | 1 | 3044 | from flask_babel import lazy_gettext
from mini_fiction.logic.adminlog import log_addition, log_changed_fields, log_deletion
from mini_fiction.logic.image import CharacterBundle, cleanup_image, save_image
from mini_fiction.models import Author, Character, CharacterGroup
from mini_fiction.utils.misc import call_after_request as later
from mini_fiction.validation import RawData, ValidationError, Validator
from mini_fiction.validation.sorting import CHARACTER, CHARACTER_FOR_UPDATE
def create(author: Author, data: RawData) -> Character:
data = Validator(CHARACTER).validated(data)
errors = {}
exist_character = Character.get(name=data["name"])
if exist_character:
errors["name"] = [lazy_gettext("Character already exists")]
group = CharacterGroup.get(id=data["group"])
if not group:
errors["group"] = [lazy_gettext("Group not found")]
if errors:
raise ValidationError(errors)
raw_data = data.pop("picture").stream.read()
saved_image = save_image(bundle=CharacterBundle, raw_data=raw_data)
if not saved_image:
raise ValidationError({"picture": ["Cannot save image"]})
character = Character(**data)
character.image = saved_image
character.flush()
log_addition(by=author, what=character)
return character
def update(character: Character, author: Author, data: RawData) -> None:
data = Validator(CHARACTER_FOR_UPDATE).validated(data, update=True)
errors = {}
if "name" in data:
exist_character = Character.get(name=data["name"])
if exist_character and exist_character.id != character.id:
errors["name"] = [lazy_gettext("Character already exists")]
if "group" in data:
group = CharacterGroup.get(id=data["group"])
if not group:
errors["group"] = [lazy_gettext("Group not found")]
if errors:
raise ValidationError(errors)
changed_fields = set()
raw_picture = data.pop("picture", None)
if raw_picture:
old_saved_image = character.image
raw_data = raw_picture.stream.read()
saved_image = save_image(bundle=CharacterBundle, raw_data=raw_data)
if not saved_image:
raise ValidationError({"picture": ["Cannot save image"]})
character.image = saved_image
changed_fields |= {"image_bundle"}
later(lambda: cleanup_image(old_saved_image))
for key, value in data.items():
if key == "group":
if character.group.id != value:
setattr(character, key, value)
changed_fields |= {key}
elif getattr(character, key) != value:
setattr(character, key, value)
changed_fields |= {key}
if changed_fields:
log_changed_fields(by=author, what=character, fields=sorted(changed_fields))
def delete(character: Character, author: Author) -> None:
log_deletion(by=author, what=character)
old_saved_image = character.image
later(lambda: cleanup_image(old_saved_image))
character.delete()
| gpl-3.0 | 4,021,155,636,073,459,700 | 33.202247 | 86 | 0.664258 | false |
openmaraude/geofaker | setup.py | 1 | 1496 | from setuptools import find_packages, setup
import os
import re
PACKAGE = 'geofaker'
DEPENDENCIES = [
'gpxpy'
]
def get_pkgvar(name):
"""Get the value of :param name: from __init__.py.
The package cannot be imported since dependencies might not be installed
yet."""
here = os.path.abspath(os.path.dirname(__file__))
init_path = os.path.join(here, PACKAGE, '__init__.py')
# Cache file content into get_pkgvar.init_content to avoid reading the
# __init__.py file several times.
if not hasattr(get_pkgvar, 'init_content'):
with open(init_path) as handle:
get_pkgvar.init_content = handle.read().splitlines()
for line in get_pkgvar.init_content:
res = re.search(r'^%s\s*=\s*["\'](.*)["\']' % name, line)
if res:
return res.groups()[0]
raise ValueError('%s not found in %s' % (name, init_path))
setup(
name=PACKAGE,
version=get_pkgvar('__version__'),
description=get_pkgvar('__doc__'),
url=get_pkgvar('__homepage__'),
author=get_pkgvar('__author__'),
author_email=get_pkgvar('__contact__'),
license='MIT',
classifiers=[
'Development Status :: 4 Beta',
'Intended Audience :: Developpers',
'Programming Language :: Python :: 3'
],
keywords='taxi transportation',
packages=find_packages(),
install_requires=DEPENDENCIES,
entry_points={
'console_scripts': [
'geofaker = geofaker:main',
],
},
)
| mit | 9,050,405,847,044,962,000 | 25.714286 | 76 | 0.600267 | false |
ionitadaniel19/testframeworksevolution | src/tests/utests.py | 1 | 4794 | '''
Created on 01.06.2014
@author: ionitadaniel19
'''
import unittest
import traceback
import os
from config.utilities import load_browser_driver
from selenium.webdriver import FirefoxProfile
from selenium.webdriver import Firefox
from selenium.webdriver import Chrome
from selenium.webdriver import Ie
from config.constants import EXPECTED_ANSWER
import logging,time
class FrameworkTests(unittest.TestCase):
def __init__(self,test,browser_name,url,test_data=None):
super(FrameworkTests,self).__init__(test)
self.test=test
self.browser_name=browser_name
self.url=url
self.driver=None
if self.browser_name=='firefox':
ffp = FirefoxProfile()
ffp.update_preferences()
self.driver = Firefox(firefox_profile=ffp)
elif self.browser_name=='chrome':
chromedriver = load_browser_driver("chromedriver")
os.environ["webdriver.chrome.driver"] = chromedriver
self.driver=Chrome(chromedriver)
elif self.browser_name=='ie':
iedriver = load_browser_driver("IEDriverServer")
os.environ["webdriver.ie.driver"] = iedriver
self.driver=Ie(iedriver)
self.verification = []
self.verification.append("Test %s on browser %s" %(self.test,self.browser_name))
self.test_data=test_data
self.errors=[]
def setUp(self):
"""
set up data used in the tests.
setUp is called before each test function execution.
"""
self.driver.get(self.url)
time.sleep(5)
def tearDown(self):
"""
tearDown is called after all other test methods have been invoked.
"""
if self.driver:
try:
time.sleep(2)
self.driver.quit()
except:
print traceback.format_exc()
for item in self.verification:
logging.info(item)
for err in self.errors:
self.fail(err)
logging.error(item)
def test_recordplayback(self):
try:
self.verification.append('Test record and playback')
from linearframework.recordtests import show_answer_record
actual_answer=show_answer_record(self.driver)
self.assertEqual(actual_answer, EXPECTED_ANSWER, 'Actual answer incorrect:%s.Expected answer is:%s' %(actual_answer,EXPECTED_ANSWER))
except Exception,ex:
raise Exception('Test record playback failed with Exception:%s' %ex)
def test_modularframework(self):
try:
self.verification.append('Test modular driven framework')
from modularframework.modulartests import show_answer_modular
actual_answer=show_answer_modular(self.driver)
self.assertEqual(actual_answer, EXPECTED_ANSWER, 'Actual answer incorrect:%s.Expected answer is:%s' %(actual_answer,EXPECTED_ANSWER))
except Exception,ex:
raise Exception('Test modular failed with Exception:%s' %ex)
def test_dataframework(self):
try:
self.verification.append('Test data driven framework')
from datadrivenframework.datatests import show_answer_datadriven
actual_answer,expected_answer=show_answer_datadriven(self.driver,2)
self.assertEqual(actual_answer, expected_answer, 'Actual answer incorrect:%s.Expected answer is:%s' %(actual_answer,expected_answer))
except Exception,ex:
raise Exception('Test data driven failed with Exception:%s' %ex)
def test_keywordframework(self):
try:
self.verification.append('Test keyword driven framework')
from keydrivenframework.keytests import show_answer_keydriven
validate,actual_answer=show_answer_keydriven(self.driver,1)
if validate is False:
self.assertTrue(validate, 'Actual answer incorrect:%s'%actual_answer)
except Exception,ex:
raise Exception('Test keyword failed with Exception:%s.Traceback is %s' %(ex,traceback.format_exc()))
def test_hybridframework(self):
try:
self.verification.append('Test hybrid framework')
from hybridframework.hybridtests import show_answer_hybrid_simple
actual_answer=show_answer_hybrid_simple(self.driver,self.test_data)
self.assertEqual(actual_answer, EXPECTED_ANSWER, 'Actual answer incorrect:%s.Expected answer is:%s' %(actual_answer,EXPECTED_ANSWER))
except Exception,ex:
raise Exception('Test hybrid failed with Exception:%s' %ex)
| mit | 6,501,051,434,895,794,000 | 41.6 | 145 | 0.626617 | false |
kaltura/nginx-vod-module | test/stream_compare.py | 1 | 2741 | from urlparse import urlparse
import manifest_utils
import compare_utils
import stress_base
import http_utils
import random
import time
import re
from stream_compare_params import *
manifest_utils.CHUNK_LIST_ITEMS_TO_COMPARE = CHUNK_LIST_ITEMS_TO_COMPARE
def convertBody(body):
try:
return body.decode('ascii')
except UnicodeDecodeError:
return body[:100].encode('hex')
class TestThread(stress_base.TestThreadBase):
def getURL(self, hostHeader, url):
headers = {}
headers.update(EXTRA_HEADERS)
headers['Host'] = hostHeader
code, headers, body = http_utils.getUrl(url, headers)
if code == 0:
self.writeOutput(body)
return code, headers, body
def compareUrls(self, hostHeader, url1, url2):
for retry in xrange(URL_COMPARE_RETRIES):
if retry != 0:
time.sleep(URL_COMPARE_RETRIES_SLEEP_INTERVAL)
if LOG_LEVEL['UrlCompareLog']:
self.writeOutput('Compare %s with %s (retry %d)' % (url1, url2, retry))
code1, headers1, body1 = self.getURL(hostHeader, url1)
code2, headers2, body2 = self.getURL(hostHeader, url2)
if code1 != code2:
self.writeOutput('Error: got different status codes %s vs %s, url1=%s, url2=%s' % (code1, code2, url1, url2))
continue
headerCompare = compare_utils.compareHeaders(headers1, headers2)
if headerCompare != None:
self.writeOutput(headerCompare)
continue
if str(code1) != '200':
self.writeOutput('Notice: got status code %s, url1=%s, url2=%s' % (code1, url1, url2))
if body1 != body2:
if retry >= URL_COMPARE_RETRIES-1:
severity = "Error"
else:
severity = "Notice"
self.writeOutput('%s: comparison failed, url1=%s, url2=%s\n%s\n%s' % (severity, url1, url2, convertBody(body1), convertBody(body2)))
continue
return code1, headers1, body1
return False
def runTest(self, uri):
hostHeader, uri = uri.split(' ')
uri = uri.replace('@time@', str(int(time.time())))
urlBase1 = random.choice(URL1_BASE)
urlBase2 = random.choice(URL2_BASE)
url1 = urlBase1 + uri
url2 = urlBase2 + uri
self.writeOutput('Info: testing %s %s' % (url1, url2))
compareResult = self.compareUrls(hostHeader, url1, url2)
if compareResult == False:
return False
code, headers, body = compareResult
if str(code) != '200':
return True
mimeType = headers['content-type'][0]
urls = manifest_utils.getManifestUrls(url1.rsplit('/', 1)[0], body, mimeType, {'Host':hostHeader})
urls = map(lambda x: urlBase1 + urlparse(x).path, urls) # the urls may contain the host header
result = True
for url in urls:
if not self.compareUrls(hostHeader, url, url.replace(urlBase1, urlBase2)):
result = False
return result
if __name__ == '__main__':
stress_base.main(TestThread, STOP_FILE)
| agpl-3.0 | -7,451,293,146,518,988,000 | 27.852632 | 136 | 0.68807 | false |
lzamparo/SdA_reduce | utils/dpgmm_test.py | 1 | 3458 | from sklearn.mixture import DPGMM
import sys
import logging, os
from optparse import OptionParser
import numpy as np
from tables import *
from extract_datasets import extract_unlabeled_chunkrange, extract_chunk_sizes
import contextlib,time
@contextlib.contextmanager
def timeit():
t=time.time()
yield
print(time.time()-t,"sec")
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# Parse commandline arguments
op = OptionParser()
op.add_option("--h5file",
dest="inputfile", help="Read unlabeled data from this hdf5 file.")
op.add_option("--size",
dest="size", type="int", help="Use this many chunks of labeled data for the test.")
(opts, args) = op.parse_args()
np.random.seed(0)
# there is a max of 211 chunks of data in sm_rep1, so cap opts.size to this
if opts.size > 211:
opts.size = 211
###############################################################################
# The unlabeled data h5 file
unlabeled_datafile = openFile(opts.inputfile, mode = "r")
# The labeled data h5 file
#labeled_datafile = openFile(labeldata, mode = "r")
# Load the reduced data from a different file
X_unlabeled = extract_unlabeled_chunkrange(unlabeled_datafile, opts.size)
chunk_sizes = extract_chunk_sizes(unlabeled_datafile)
# Extract some of the dataset from the datafile
# X_labeled, labels = extract_labeled_chunkrange(labeled_datafile, opts.size)
# done, close h5 files
#labeled_datafile.close()
unlabeled_datafile.close()
for chunks in np.arange(1, opts.size, step = 3):
# Sample the specified number of points from X_unlabeled
size = np.cumsum(chunk_sizes[:chunks])[-1]
# Fit a Dirichlet process mixture of Gaussians using up to ten components
dpgmm = DPGMM(n_components=10, alpha=10.0, covariance_type='full')
indices = np.arange(X_unlabeled.shape[0])
np.random.shuffle(indices)
X = X_unlabeled[indices[:size],]
print("fitting a model with", size, "data points")
with timeit():
dpgmm.fit(X)
print("Done!")
print("AIC for this model & data: ", dpgmm.aic(X))
print("BIC for this model & data: ", dpgmm.bic(X))
Y_hat = dpgmm.predict(X)
print ("Model assigned points to", np.max(Y_hat), "components")
# How can I best check this out?
#color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
#for i, (clf, title) in enumerate([(gmm, 'GMM'),
#(dpgmm, 'Dirichlet Process GMM')]):
#splot = plt.subplot(2, 1, 1 + i)
#Y_ = clf.predict(X)
#for i, (mean, covar, color) in enumerate(zip(
#clf.means_, clf._get_covars(), color_iter)):
#v, w = linalg.eigh(covar)
#u = w[0] / linalg.norm(w[0])
## as the DP will not use every component it has access to
## unless it needs it, we shouldn't plot the redundant
## components.
#if not np.any(Y_ == i):
#continue
#plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
## Plot an ellipse to show the Gaussian component
#angle = np.arctan(u[1] / u[0])
#angle = 180 * angle / np.pi # convert to degrees
#ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
#ell.set_clip_box(splot.bbox)
#ell.set_alpha(0.5)
#splot.add_artist(ell)
#plt.xlim(-10, 10)
#plt.ylim(-3, 6)
#plt.xticks(())
#plt.yticks(())
#plt.title(title)
#plt.show() | bsd-3-clause | -1,998,240,313,538,520,600 | 31.942857 | 97 | 0.62782 | false |
avanzosc/avanzosc6.1 | avanzosc_inverse_mrp_production/stock_move_ext.py | 1 | 3324 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from datetime import datetime
from osv import osv, fields
import decimal_precision as dp
from tools import float_compare
from tools.translate import _
import netsvc
import time
import tools
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {# Percentage
'percentage': fields.float('%', digits=(3,2)),
}
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
if (not product_id) or (product_qty <=0.0):
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['coef_amount'])
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty / uos_coeff['coef_amount']
else:
result['product_uos_qty'] = product_qty
return {'value': result}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty, product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_qty': 0.00
}
if (not product_id) or (product_uos_qty <=0.0):
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['coef_amount'])
if product_uos and product_uom and (product_uom != product_uos):
result['product_qty'] = product_uos_qty * uos_coeff['coef_amount']
else:
result['product_qty'] = product_uos_qty
return {'value': result}
stock_move() | agpl-3.0 | 8,336,362,185,875,381,000 | 35.944444 | 105 | 0.59988 | false |
jhpyle/docassemble | docassemble_demo/docassemble/demo/google_drive.py | 1 | 3505 | from docassemble.base.util import DAGoogleAPI, DAFile
import apiclient
api = DAGoogleAPI()
__all__ = ['get_folder_names', 'get_files_in_folder', 'write_file_to_folder', 'download_file']
def get_folder_names():
service = api.drive_service()
items = list()
while True:
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and sharedWithMe").execute()
for the_file in response.get('files', []):
items.append(the_file)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return [item['name'] for item in items]
def get_folder_id(folder_name):
service = api.drive_service()
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and sharedWithMe and name='" + str(folder_name) + "'").execute()
folder_id = None
for item in response.get('files', []):
folder_id = item['id']
return folder_id
def get_file_id(filename, folder_name):
folder_id = get_folder_id(folder_name)
if folder_id is None:
raise Exception("The folder was not found")
service = api.drive_service()
file_id = None
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType!='application/vnd.google-apps.folder' and '" + str(folder_id) + "' in parents and name='" + str(filename) + "'").execute()
for item in response.get('files', []):
file_id = item['id']
return file_id
def get_files_in_folder(folder_name):
folder_id = get_folder_id(folder_name)
if folder_id is None:
raise Exception("The folder was not found")
service = api.drive_service()
items = list()
while True:
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType!='application/vnd.google-apps.folder' and trashed=false and '" + str(folder_id) + "' in parents").execute()
for the_file in response.get('files', []):
items.append(the_file)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return [item['name'] for item in items]
def write_file_to_folder(path, mimetype, filename, folder_name):
folder_id = get_folder_id(folder_name)
if folder_id is None:
raise Exception("The folder was not found")
service = api.drive_service()
file_metadata = { 'name': filename, 'parents': [folder_id] }
media = apiclient.http.MediaFileUpload(path, mimetype=mimetype)
the_new_file = service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
return the_new_file.get('id')
def download_file(filename, folder_name):
file_id = get_file_id(filename, folder_name)
if file_id is None:
raise Exception("The file was not found")
the_file = DAFile()
the_file.set_random_instance_name()
the_file.initialize(filename=filename)
service = api.drive_service()
with open(the_file.path(), 'wb') as fh:
response = service.files().get_media(fileId=file_id)
downloader = apiclient.http.MediaIoBaseDownload(fh, response)
done = False
while done is False:
status, done = downloader.next_chunk()
the_file.commit()
return the_file
| mit | 3,498,028,417,425,838,000 | 41.228916 | 227 | 0.634522 | false |
r03ert0/ldsc | ldscore/regressions.py | 1 | 28907 | '''
(c) 2014 Brendan Bulik-Sullivan and Hilary Finucane
Estimators of heritability and genetic correlation.
Shape convention is (n_snp, n_annot) for all classes.
Last column = intercept.
'''
from __future__ import division
import numpy as np
import pandas as pd
from scipy.stats import norm, chi2
import jackknife as jk
from irwls import IRWLS
from scipy.stats import t as tdist
from collections import namedtuple
np.seterr(divide='raise', invalid='raise')
s = lambda x: remove_brackets(str(np.matrix(x)))
def update_separators(s, ii):
'''s are separators with ii masked. Returns unmasked separators.'''
maplist = np.arange(len(ii) + 1)[np.squeeze(ii)]
mask_to_unmask = lambda i: maplist[i]
t = np.apply_along_axis(mask_to_unmask, 0, s[1:-1])
t = np.hstack(((0), t, (len(ii))))
return t
def p_z_norm(est, se):
'''Convert estimate and se to Z-score and P-value.'''
try:
Z = est / se
except (FloatingPointError, ZeroDivisionError):
Z = float('inf')
P = chi2.sf(Z ** 2, 1, loc=0, scale=1) # 0 if Z=inf
return P, Z
def remove_brackets(x):
'''Get rid of brackets and trailing whitespace in numpy arrays.'''
return x.replace('[', '').replace(']', '').strip()
def append_intercept(x):
'''
Appends an intercept term to the design matrix for a linear regression.
Parameters
----------
x : np.matrix with shape (n_row, n_col)
Design matrix. Columns are predictors; rows are observations.
Returns
-------
x_new : np.matrix with shape (n_row, n_col+1)
Design matrix with intercept term appended.
'''
n_row = x.shape[0]
intercept = np.ones((n_row, 1))
x_new = np.concatenate((x, intercept), axis=1)
return x_new
def remove_intercept(x):
'''Removes the last column.'''
n_col = x.shape[1]
return x[:, 0:n_col - 1]
def gencov_obs_to_liab(gencov_obs, P1, P2, K1, K2):
'''
Converts genetic covariance on the observed scale in an ascertained sample to genetic
covariance on the liability scale in the population
Parameters
----------
gencov_obs : float
Genetic covariance on the observed scale in an ascertained sample.
P1, P2 : float in (0,1)
Prevalences of phenotypes 1,2 in the sample.
K1, K2 : float in (0,1)
Prevalences of phenotypes 1,2 in the population.
Returns
-------
gencov_liab : float
Genetic covariance between liabilities in the population.
Note: if a trait is a QT, set P = K = None.
'''
c1 = 1
c2 = 1
if P1 is not None and K1 is not None:
c1 = np.sqrt(h2_obs_to_liab(1, P1, K1))
if P2 is not None and K2 is not None:
c2 = np.sqrt(h2_obs_to_liab(1, P2, K2))
return gencov_obs * c1 * c2
def h2_obs_to_liab(h2_obs, P, K):
'''
Converts heritability on the observed scale in an ascertained sample to heritability
on the liability scale in the population.
Parameters
----------
h2_obs : float
Heritability on the observed scale in an ascertained sample.
P : float in (0,1)
Prevalence of the phenotype in the sample.
K : float in (0,1)
Prevalence of the phenotype in the population.
Returns
-------
h2_liab : float
Heritability of liability in the population.
'''
if np.isnan(P) and np.isnan(K):
return h2_obs
if K <= 0 or K >= 1:
raise ValueError('K must be in the range (0,1)')
if P <= 0 or P >= 1:
raise ValueError('P must be in the range (0,1)')
thresh = norm.isf(K)
conversion_factor = K ** 2 * \
(1 - K) ** 2 / (P * (1 - P) * norm.pdf(thresh) ** 2)
return h2_obs * conversion_factor
class LD_Score_Regression(object):
def __init__(self, y, x, w, N, M, n_blocks, intercept=None, slow=False, step1_ii=None, old_weights=False):
for i in [y, x, w, M, N]:
try:
if len(i.shape) != 2:
raise TypeError('Arguments must be 2D arrays.')
except AttributeError:
raise TypeError('Arguments must be arrays.')
n_snp, self.n_annot = x.shape
if any(i.shape != (n_snp, 1) for i in [y, w, N]):
raise ValueError(
'N, weights and response (z1z2 or chisq) must have shape (n_snp, 1).')
if M.shape != (1, self.n_annot):
raise ValueError('M must have shape (1, n_annot).')
M_tot = float(np.sum(M))
x_tot = np.sum(x, axis=1).reshape((n_snp, 1))
self.constrain_intercept = intercept is not None
self.intercept = intercept
self.n_blocks = n_blocks
tot_agg = self.aggregate(y, x_tot, N, M_tot, intercept)
initial_w = self._update_weights(
x_tot, w, N, M_tot, tot_agg, intercept)
Nbar = np.mean(N) # keep condition number low
x = np.multiply(N, x) / Nbar
if not self.constrain_intercept:
x, x_tot = append_intercept(x), append_intercept(x_tot)
yp = y
else:
yp = y - intercept
self.intercept_se = 'NA'
del y
self.twostep_filtered = None
if step1_ii is not None and self.constrain_intercept:
raise ValueError(
'twostep is not compatible with constrain_intercept.')
elif step1_ii is not None and self.n_annot > 1:
raise ValueError(
'twostep not compatible with partitioned LD Score yet.')
elif step1_ii is not None:
n1 = np.sum(step1_ii)
self.twostep_filtered = n_snp - n1
x1 = x[np.squeeze(step1_ii), :]
yp1, w1, N1, initial_w1 = map(
lambda a: a[step1_ii].reshape((n1, 1)), (yp, w, N, initial_w))
update_func1 = lambda a: self._update_func(
a, x1, w1, N1, M_tot, Nbar, ii=step1_ii)
step1_jknife = IRWLS(
x1, yp1, update_func1, n_blocks, slow=slow, w=initial_w1)
step1_int, _ = self._intercept(step1_jknife)
yp = yp - step1_int
x = remove_intercept(x)
x_tot = remove_intercept(x_tot)
update_func2 = lambda a: self._update_func(
a, x_tot, w, N, M_tot, Nbar, step1_int)
s = update_separators(step1_jknife.separators, step1_ii)
step2_jknife = IRWLS(
x, yp, update_func2, n_blocks, slow=slow, w=initial_w, separators=s)
c = np.sum(np.multiply(initial_w, x)) / \
np.sum(np.multiply(initial_w, np.square(x)))
jknife = self._combine_twostep_jknives(
step1_jknife, step2_jknife, M_tot, c, Nbar)
elif old_weights:
initial_w = np.sqrt(initial_w)
x = IRWLS._weight(x, initial_w)
y = IRWLS._weight(yp, initial_w)
jknife = jk.LstsqJackknifeFast(x, y, n_blocks)
else:
update_func = lambda a: self._update_func(
a, x_tot, w, N, M_tot, Nbar, intercept)
jknife = IRWLS(
x, yp, update_func, n_blocks, slow=slow, w=initial_w)
self.coef, self.coef_cov, self.coef_se = self._coef(jknife, Nbar)
self.cat, self.cat_cov, self.cat_se =\
self._cat(jknife, M, Nbar, self.coef, self.coef_cov)
self.tot, self.tot_cov, self.tot_se = self._tot(self.cat, self.cat_cov)
self.prop, self.prop_cov, self.prop_se =\
self._prop(jknife, M, Nbar, self.cat, self.tot)
self.enrichment, self.M_prop = self._enrichment(
M, M_tot, self.cat, self.tot)
if not self.constrain_intercept:
self.intercept, self.intercept_se = self._intercept(jknife)
self.jknife = jknife
self.tot_delete_values = self._delete_vals_tot(jknife, Nbar, M)
if not self.constrain_intercept:
self.intercept_delete_values = jknife.delete_values[
:, self.n_annot]
self.M = M
@classmethod
def aggregate(cls, y, x, N, M, intercept=None):
if intercept is None:
intercept = cls.__null_intercept__
num = M * (np.mean(y) - intercept)
denom = np.mean(np.multiply(x, N))
return num / denom
def _update_func(self, x, ref_ld_tot, w_ld, N, M, Nbar, intercept=None, ii=None):
raise NotImplementedError
def _delete_vals_tot(self, jknife, Nbar, M):
'''Get delete values for total h2 or gencov.'''
n_annot = self.n_annot
tot_delete_vals = jknife.delete_values[
:, 0:n_annot] # shape (n_blocks, n_annot)
# shape (n_blocks, 1)
tot_delete_vals = np.dot(tot_delete_vals, M.T) / Nbar
return tot_delete_vals
def _coef(self, jknife, Nbar):
'''Get coefficient estimates + cov from the jackknife.'''
n_annot = self.n_annot
coef = jknife.est[0, 0:n_annot] / Nbar
coef_cov = jknife.jknife_cov[0:n_annot, 0:n_annot] / Nbar ** 2
coef_se = np.sqrt(np.diag(coef_cov))
return coef, coef_cov, coef_se
def _cat(self, jknife, M, Nbar, coef, coef_cov):
'''Convert coefficients to per-category h2 or gencov.'''
cat = np.multiply(M, coef)
cat_cov = np.multiply(np.dot(M.T, M), coef_cov)
cat_se = np.sqrt(np.diag(cat_cov))
return cat, cat_cov, cat_se
def _tot(self, cat, cat_cov):
'''Convert per-category h2 to total h2 or gencov.'''
tot = np.sum(cat)
tot_cov = np.sum(cat_cov)
tot_se = np.sqrt(tot_cov)
return tot, tot_cov, tot_se
def _prop(self, jknife, M, Nbar, cat, tot):
'''Convert total h2 and per-category h2 to per-category proportion h2 or gencov.'''
n_annot = self.n_annot
n_blocks = jknife.delete_values.shape[0]
numer_delete_vals = np.multiply(
M, jknife.delete_values[:, 0:n_annot]) / Nbar # (n_blocks, n_annot)
denom_delete_vals = np.sum(
numer_delete_vals, axis=1).reshape((n_blocks, 1))
denom_delete_vals = np.dot(denom_delete_vals, np.ones((1, n_annot)))
prop = jk.RatioJackknife(
cat / tot, numer_delete_vals, denom_delete_vals)
return prop.est, prop.jknife_cov, prop.jknife_se
def _enrichment(self, M, M_tot, cat, tot):
'''Compute proportion of SNPs per-category enrichment for h2 or gencov.'''
M_prop = M / M_tot
enrichment = np.divide(cat, M) / (tot / M_tot)
return enrichment, M_prop
def _intercept(self, jknife):
'''Extract intercept and intercept SE from block jackknife.'''
n_annot = self.n_annot
intercept = jknife.est[0, n_annot]
intercept_se = jknife.jknife_se[0, n_annot]
return intercept, intercept_se
def _combine_twostep_jknives(self, step1_jknife, step2_jknife, M_tot, c, Nbar=1):
'''Combine free intercept and constrained intercept jackknives for --two-step.'''
n_blocks, n_annot = step1_jknife.delete_values.shape
n_annot -= 1
if n_annot > 2:
raise ValueError(
'twostep not yet implemented for partitioned LD Score.')
step1_int, _ = self._intercept(step1_jknife)
est = np.hstack(
(step2_jknife.est, np.array(step1_int).reshape((1, 1))))
delete_values = np.zeros((n_blocks, n_annot + 1))
delete_values[:, n_annot] = step1_jknife.delete_values[:, n_annot]
delete_values[:, 0:n_annot] = step2_jknife.delete_values -\
c * (step1_jknife.delete_values[:, n_annot] -
step1_int).reshape((n_blocks, n_annot)) # check this
pseudovalues = jk.Jackknife.delete_values_to_pseudovalues(
delete_values, est)
jknife_est, jknife_var, jknife_se, jknife_cov = jk.Jackknife.jknife(
pseudovalues)
jknife = namedtuple('jknife',
['est', 'jknife_se', 'jknife_est', 'jknife_var', 'jknife_cov', 'delete_values'])
return jknife(est, jknife_se, jknife_est, jknife_var, jknife_cov, delete_values)
class Hsq(LD_Score_Regression):
__null_intercept__ = 1
def __init__(self, y, x, w, N, M, n_blocks=200, intercept=None, slow=False, twostep=None, old_weights=False):
step1_ii = None
if twostep is not None:
step1_ii = y < twostep
LD_Score_Regression.__init__(self, y, x, w, N, M, n_blocks, intercept=intercept,
slow=slow, step1_ii=step1_ii, old_weights=old_weights)
self.mean_chisq, self.lambda_gc = self._summarize_chisq(y)
if not self.constrain_intercept:
self.ratio, self.ratio_se = self._ratio(
self.intercept, self.intercept_se, self.mean_chisq)
def _update_func(self, x, ref_ld_tot, w_ld, N, M, Nbar, intercept=None, ii=None):
'''
Update function for IRWLS
x is the output of np.linalg.lstsq.
x[0] is the regression coefficients
x[0].shape is (# of dimensions, 1)
the last element of x[0] is the intercept.
intercept is None --> free intercept
intercept is not None --> constrained intercept
'''
hsq = M * x[0][0] / Nbar
if intercept is None:
intercept = max(x[0][1]) # divide by zero error if intercept < 0
else:
if ref_ld_tot.shape[1] > 1:
raise ValueError(
'Design matrix has intercept column for constrained intercept regression!')
ld = ref_ld_tot[:, 0].reshape(w_ld.shape) # remove intercept
w = self.weights(ld, w_ld, N, M, hsq, intercept, ii)
return w
def _summarize_chisq(self, chisq):
'''Compute mean chi^2 and lambda_GC.'''
mean_chisq = np.mean(chisq)
# median and matrix don't play nice
lambda_gc = np.median(np.asarray(chisq)) / 0.4549
return mean_chisq, lambda_gc
def _ratio(self, intercept, intercept_se, mean_chisq):
'''Compute ratio (intercept - 1) / (mean chi^2 -1 ).'''
if mean_chisq > 1:
ratio_se = intercept_se / (mean_chisq - 1)
ratio = (intercept - 1) / (mean_chisq - 1)
else:
ratio = 'NA'
ratio_se = 'NA'
return ratio, ratio_se
def _overlap_output(self, category_names, overlap_matrix, M_annot, M_tot, print_coefficients):
'''LD Score regression summary for overlapping categories.'''
overlap_matrix_prop = np.zeros([self.n_annot,self.n_annot])
for i in range(self.n_annot):
overlap_matrix_prop[i, :] = overlap_matrix[i, :] / M_annot
prop_hsq_overlap = np.dot(
overlap_matrix_prop, self.prop.T).reshape((1, self.n_annot))
prop_hsq_overlap_var = np.diag(
np.dot(np.dot(overlap_matrix_prop, self.prop_cov), overlap_matrix_prop.T))
prop_hsq_overlap_se = np.sqrt(
np.maximum(0, prop_hsq_overlap_var)).reshape((1, self.n_annot))
one_d_convert = lambda x: np.array(x).reshape(np.prod(x.shape))
prop_M_overlap = M_annot / M_tot
enrichment = prop_hsq_overlap / prop_M_overlap
enrichment_se = prop_hsq_overlap_se / prop_M_overlap
overlap_matrix_diff = np.zeros([self.n_annot,self.n_annot])
for i in range(self.n_annot):
if not M_tot == M_annot[0,i]:
overlap_matrix_diff[i, :] = overlap_matrix[i,:]/M_annot[0,i] - \
(M_annot - overlap_matrix[i,:]) / (M_tot-M_annot[0,i])
diff_est = np.dot(overlap_matrix_diff,self.coef)
diff_cov = np.dot(np.dot(overlap_matrix_diff,self.coef_cov),overlap_matrix_diff.T)
diff_se = np.sqrt(np.diag(diff_cov))
diff_p = ['NA' if diff_se[i]==0 else 2*tdist.sf(abs(diff_est[i]/diff_se[i]),self.n_blocks) \
for i in range(self.n_annot)]
df = pd.DataFrame({
'Category': category_names,
'Prop._SNPs': one_d_convert(prop_M_overlap),
'Prop._h2': one_d_convert(prop_hsq_overlap),
'Prop._h2_std_error': one_d_convert(prop_hsq_overlap_se),
'Enrichment': one_d_convert(enrichment),
'Enrichment_std_error': one_d_convert(enrichment_se),
'Enrichment_p':diff_p,
'Coefficient': one_d_convert(self.coef),
'Coefficient_std_error': self.coef_se,
'Coefficient_z-score': one_d_convert(self.coef) / one_d_convert(self.coef_se)
})
if print_coefficients:
df = df[['Category', 'Prop._SNPs', 'Prop._h2', 'Prop._h2_std_error',
'Enrichment','Enrichment_std_error', 'Enrichment_p',
'Coefficient', 'Coefficient_std_error','Coefficient_z-score']]
else:
df = df[['Category', 'Prop._SNPs', 'Prop._h2', 'Prop._h2_std_error',
'Enrichment','Enrichment_std_error', 'Enrichment_p']]
return df
def summary(self, ref_ld_colnames=None, P=None, K=None, overlap=False):
'''Print summary of the LD Score Regression.'''
if P is not None and K is not None:
T = 'Liability'
c = h2_obs_to_liab(1, P, K)
else:
T = 'Observed'
c = 1
out = ['Total ' + T + ' scale h2: ' +
s(c * self.tot) + ' (' + s(c * self.tot_se) + ')']
if self.n_annot > 1:
if ref_ld_colnames is None:
ref_ld_colnames = ['CAT_' + str(i)
for i in xrange(self.n_annot)]
out.append('Categories: ' + ' '.join(ref_ld_colnames))
if not overlap:
out.append(T + ' scale h2: ' + s(c * self.cat))
out.append(T + ' scale h2 SE: ' + s(c * self.cat_se))
out.append('Proportion of SNPs: ' + s(self.M_prop))
out.append('Proportion of h2g: ' + s(self.prop))
out.append('Enrichment: ' + s(self.enrichment))
out.append('Coefficients: ' + s(self.coef))
out.append('Coefficient SE: ' + s(self.coef_se))
out.append('Lambda GC: ' + s(self.lambda_gc))
out.append('Mean Chi^2: ' + s(self.mean_chisq))
if self.constrain_intercept:
out.append(
'Intercept: constrained to {C}'.format(C=s(self.intercept)))
else:
out.append(
'Intercept: ' + s(self.intercept) + ' (' + s(self.intercept_se) + ')')
if self.mean_chisq > 1:
if self.ratio < 0:
out.append(
'Ratio < 0 (ususally indicates GC correction).')
else:
out.append(
'Ratio: ' + s(self.ratio) + ' (' + s(self.ratio_se) + ')')
else:
out.append('Ratio: NA (mean chi^2 < 1)')
return remove_brackets('\n'.join(out))
def _update_weights(self, ld, w_ld, N, M, hsq, intercept, ii=None):
if intercept is None:
intercept = self.__null_intercept__
return self.weights(ld, w_ld, N, M, hsq, intercept, ii)
@classmethod
def weights(cls, ld, w_ld, N, M, hsq, intercept=None, ii=None):
'''
Regression weights.
Parameters
----------
ld : np.matrix with shape (n_snp, 1)
LD Scores (non-partitioned).
w_ld : np.matrix with shape (n_snp, 1)
LD Scores (non-partitioned) computed with sum r^2 taken over only those SNPs included
in the regression.
N : np.matrix of ints > 0 with shape (n_snp, 1)
Number of individuals sampled for each SNP.
M : float > 0
Number of SNPs used for estimating LD Score (need not equal number of SNPs included in
the regression).
hsq : float in [0,1]
Heritability estimate.
Returns
-------
w : np.matrix with shape (n_snp, 1)
Regression weights. Approx equal to reciprocal of conditional variance function.
'''
M = float(M)
if intercept is None:
intercept = 1
hsq = max(hsq, 0.0)
hsq = min(hsq, 1.0)
ld = np.fmax(ld, 1.0)
w_ld = np.fmax(w_ld, 1.0)
c = hsq * N / M
het_w = 1.0 / (2 * np.square(intercept + np.multiply(c, ld)))
oc_w = 1.0 / w_ld
w = np.multiply(het_w, oc_w)
return w
class Gencov(LD_Score_Regression):
__null_intercept__ = 0
def __init__(self, z1, z2, x, w, N1, N2, M, hsq1, hsq2, intercept_hsq1, intercept_hsq2,
n_blocks=200, intercept_gencov=None, slow=False, twostep=None):
self.intercept_hsq1 = intercept_hsq1
self.intercept_hsq2 = intercept_hsq2
self.hsq1 = hsq1
self.hsq2 = hsq2
self.N1 = N1
self.N2 = N2
y = z1 * z2
step1_ii = None
if twostep is not None:
step1_ii = np.logical_and(z1**2 < twostep, z2**2 < twostep)
LD_Score_Regression.__init__(self, y, x, w, np.sqrt(N1 * N2), M, n_blocks,
intercept=intercept_gencov, slow=slow, step1_ii=step1_ii)
self.p, self.z = p_z_norm(self.tot, self.tot_se)
self.mean_z1z2 = np.mean(np.multiply(z1, z2))
def summary(self, ref_ld_colnames, P=None, K=None):
'''Print summary of the LD Score regression.'''
out = []
if P is not None and K is not None and\
all((i is not None for i in P)) and all((i is not None for i in K)):
T = 'Liability'
c = gencov_obs_to_liab(1, P[0], P[1], K[0], K[1])
else:
T = 'Observed'
c = 1
out.append('Total ' + T + ' scale gencov: ' +
s(self.tot) + ' (' + s(self.tot_se) + ')')
if self.n_annot > 1:
out.append('Categories: ' + str(' '.join(ref_ld_colnames)))
out.append(T + ' scale gencov: ' + s(c * self.cat))
out.append(T + ' scale gencov SE: ' + s(c * self.cat_se))
out.append('Proportion of SNPs: ' + s(self.M_prop))
out.append('Proportion of gencov: ' + s(self.prop))
out.append('Enrichment: ' + s(self.enrichment))
out.append('Mean z1*z2: ' + s(self.mean_z1z2))
if self.constrain_intercept:
out.append(
'Intercept: constrained to {C}'.format(C=s(self.intercept)))
else:
out.append(
'Intercept: ' + s(self.intercept) + ' (' + s(self.intercept_se) + ')')
return remove_brackets('\n'.join(out))
def _update_func(self, x, ref_ld_tot, w_ld, N, M, Nbar, intercept=None, ii=None):
'''
Update function for IRWLS
x is the output of np.linalg.lstsq.
x[0] is the regression coefficients
x[0].shape is (# of dimensions, 1)
the last element of x[0] is the intercept.
'''
rho_g = M * x[0][0] / Nbar
if intercept is None: # if the regression includes an intercept
intercept = x[0][1]
# remove intercept if we have one
ld = ref_ld_tot[:, 0].reshape(w_ld.shape)
if ii is not None:
N1 = self.N1[ii].reshape((w_ld.shape))
N2 = self.N2[ii].reshape((w_ld.shape))
else:
N1 = self.N1
N2 = self.N2
return self.weights(ld, w_ld, N1, N2, np.sum(M), self.hsq1, self.hsq2, rho_g,
intercept, self.intercept_hsq1, self.intercept_hsq2, ii)
def _update_weights(self, ld, w_ld, sqrt_n1n2, M, rho_g, intercept, ii=None):
'''Weight function with the same signature for Hsq and Gencov.'''
w = self.weights(ld, w_ld, self.N1, self.N2, M, self.hsq1, self.hsq2, rho_g,
intercept, self.intercept_hsq1, self.intercept_hsq2)
return w
@classmethod
def weights(cls, ld, w_ld, N1, N2, M, h1, h2, rho_g, intercept_gencov=None,
intercept_hsq1=None, intercept_hsq2=None, ii=None):
'''
Regression weights.
Parameters
----------
ld : np.matrix with shape (n_snp, 1)
LD Scores (non-partitioned)
w_ld : np.matrix with shape (n_snp, 1)
LD Scores (non-partitioned) computed with sum r^2 taken over only those SNPs included
in the regression.
M : float > 0
Number of SNPs used for estimating LD Score (need not equal number of SNPs included in
the regression).
N1, N2 : np.matrix of ints > 0 with shape (n_snp, 1)
Number of individuals sampled for each SNP for each study.
h1, h2 : float in [0,1]
Heritability estimates for each study.
rhog : float in [0,1]
Genetic covariance estimate.
intercept : float
Genetic covariance intercept, on the z1*z2 scale (so should be Ns*rho/sqrt(N1*N2)).
Returns
-------
w : np.matrix with shape (n_snp, 1)
Regression weights. Approx equal to reciprocal of conditional variance function.
'''
M = float(M)
if intercept_gencov is None:
intercept_gencov = 0
if intercept_hsq1 is None:
intercept_hsq1 = 1
if intercept_hsq2 is None:
intercept_hsq2 = 1
h1, h2 = max(h1, 0.0), max(h2, 0.0)
h1, h2 = min(h1, 1.0), min(h2, 1.0)
rho_g = min(rho_g, 1.0)
rho_g = max(rho_g, -1.0)
ld = np.fmax(ld, 1.0)
w_ld = np.fmax(w_ld, 1.0)
a = np.multiply(N1, h1 * ld) / M + intercept_hsq1
b = np.multiply(N2, h2 * ld) / M + intercept_hsq2
sqrt_n1n2 = np.sqrt(np.multiply(N1, N2))
c = np.multiply(sqrt_n1n2, rho_g * ld) / M + intercept_gencov
try:
het_w = 1.0 / (np.multiply(a, b) + np.square(c))
except FloatingPointError: # bizarre error; should never happen
raise FloatingPointError('Why did you set hsq intercept <= 0?')
oc_w = 1.0 / w_ld
w = np.multiply(het_w, oc_w)
return w
class RG(object):
def __init__(self, z1, z2, x, w, N1, N2, M, intercept_hsq1=None, intercept_hsq2=None,
intercept_gencov=None, n_blocks=200, slow=False, twostep=None):
self._negative_hsq = None
n_snp, n_annot = x.shape
hsq1 = Hsq(np.square(z1), x, w, N1, M, n_blocks=n_blocks, intercept=intercept_hsq1,
slow=slow, twostep=twostep)
hsq2 = Hsq(np.square(z2), x, w, N2, M, n_blocks=n_blocks, intercept=intercept_hsq2,
slow=slow, twostep=twostep)
gencov = Gencov(z1, z2, x, w, N1, N2, M, hsq1.tot, hsq2.tot, hsq1.intercept,
hsq2.intercept, n_blocks, intercept_gencov=intercept_gencov, slow=slow,
twostep=twostep)
gencov.N1 = None # save memory
gencov.N2 = None
self.hsq1, self.hsq2, self.gencov = hsq1, hsq2, gencov
if (hsq1.tot <= 0 or hsq2.tot <= 0):
self._negative_hsq = True
self.rg_ratio = self.rg = self.rg_se = 'NA'
self.p = self.z = 'NA'
else:
rg_ratio = np.array(
gencov.tot / np.sqrt(hsq1.tot * hsq2.tot)).reshape((1, 1))
denom_delete_values = np.sqrt(
np.multiply(hsq1.tot_delete_values, hsq2.tot_delete_values))
rg = jk.RatioJackknife(
rg_ratio, gencov.tot_delete_values, denom_delete_values)
self.rg_jknife = float(rg.jknife_est)
self.rg_se = float(rg.jknife_se)
self.rg_ratio = float(rg_ratio)
self.p, self.z = p_z_norm(self.rg_ratio, self.rg_se)
def summary(self, silly=False):
'''Print output of Gencor object.'''
out = []
if self._negative_hsq:
out.append('Genetic Correlation: nan (nan) (h2 out of bounds) ')
out.append('Z-score: nan (nan) (h2 out of bounds)')
out.append('P: nan (nan) (h2 out of bounds)')
out.append('WARNING: One of the h2\'s was out of bounds.')
out.append(
'This usually indicates a data-munging error or that h2 or N is low.')
elif (self.rg_ratio > 1.2 or self.rg_ratio < -1.2) and not silly:
out.append('Genetic Correlation: nan (nan) (rg out of bounds) ')
out.append('Z-score: nan (nan) (rg out of bounds)')
out.append('P: nan (nan) (rg out of bounds)')
out.append('WARNING: rg was out of bounds.')
out.append(
'This usually means that h2 is not significantly different from zero.')
else:
out.append(
'Genetic Correlation: ' + s(self.rg_ratio) + ' (' + s(self.rg_se) + ')')
out.append('Z-score: ' + s(self.z))
out.append('P: ' + s(self.p))
return remove_brackets('\n'.join(out))
| gpl-3.0 | 1,021,471,430,700,087,800 | 38.59863 | 113 | 0.553465 | false |
sergeiliashko/sif | paramsfactory/geometry.py | 1 | 2445 | # -*- coding: utf-8 -*-
import numpy as np
from scipy.spatial import distance
from numpy import sqrt
from numpy import pi
def load_vectorcoordinates_from_txt(inputfile):
coords = np.loadtxt(inputfile)
n,i = coords.shape
return coords.reshape((n,int(i/2),2))
def generate_centercoords(vectorcoords):
# assuming it has (N,m,2) dimensions
return (vectorcoords[:,0] + vectorcoords[:,1])/2.
def generate_vector_weight(vectors):
# assuming it (N,m)
norms = np.apply_along_axis(np.linalg.norm,1,vectors)
return norms/np.min(norms)
def generate_angle_with_Ox(vectorcoords):
vectors = (vectorcoords[:,1] - vectorcoords[:,0])
n,m = vectors.shape
normed_vectors = vectors/np.apply_along_axis(np.linalg.norm,1, vectors).reshape((n,1))
x_axis = np.repeat(0., m)
x_axis[0] = 1.
angles = np.array([], dtype=float)
for unit_vec in normed_vectors:
if unit_vec[1]<0 :
angles = np.append(angles, -np.arccos(np.clip(np.dot(unit_vec,x_axis),-1.0,1.0)))
else:
angles = np.append(angles, +np.arccos(np.clip(np.dot(unit_vec,x_axis),-1.0,1.0)))
return angles
def generate_distance_vectors(xycenterdots, equalhalfs=True):
# equalhalfs, means the lower triangle equals to upper triangle
result = np.array([])
for xy1 in xycenterdots:
for xy2 in xycenterdots:
vector = xy2 - xy1
if vector[0] < 0:
result = np.append(result, np.array([np.arctan(np.divide(vector[1], vector[0]))])+np.pi)
elif vector[1] < 0:
result = np.append(result, np.array([np.arctan(np.divide(vector[1], vector[0]))])+2*np.pi)
else:
result = np.append(result, np.array([np.arctan(np.divide(vector[1], vector[0]))]))
n = len(xycenterdots)
result = result.reshape((n,n))
if equalhalfs:
result[np.tril_indices(n)]=(result[np.tril_indices(n)] + pi)%(2*pi)
return np.nan_to_num(result)
def generate_distances(xycenterdots, lattice_constant):
return distance.cdist(xycenterdots, xycenterdots, 'euclidean')*lattice_constant
def get_kagome_properties(lattice_constant):
r3 = 3/4.; s3 = sqrt(3)/2. #temporary vars
kagome_coords = np.array([[0,s3], [r3,s3/2], [r3,-s3/2], [0,-s3], [-r3,-s3/2], [-r3,s3/2]])
dd = generate_distances(kagome_coords, 2*lattice_constant/sqrt(3.))
uv = generate_distance_vectors(kagome_coords)
return (dd, uv)
| gpl-3.0 | -9,212,511,790,720,546,000 | 38.435484 | 106 | 0.636401 | false |
hyperwd/hwcram | rds/migrations/0001_initial.py | 1 | 1871 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-27 08:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0014_auto_20171014_1523'),
]
operations = [
migrations.CreateModel(
name='Rds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rds_name', models.CharField(max_length=128, null=True, verbose_name='实例名称')),
('rds_id', models.CharField(max_length=40, null=True, verbose_name='实例ID')),
('rds_status', models.CharField(max_length=20, null=True, verbose_name='状态')),
('region', models.CharField(choices=[('cn-north-1', '华北1'), ('cn-south-1', '华南1'), ('cn-east-2', '华东2')], default='cn-north-1', max_length=32, verbose_name='区域')),
('rds_host', models.GenericIPAddressField(null=True, verbose_name='IP地址')),
('rds_port', models.CharField(max_length=10, null=True, verbose_name='端口')),
('rds_type', models.CharField(max_length=20, null=True, verbose_name='类型')),
('rds_delete_time', models.DateTimeField(null=True, verbose_name='删除时间')),
('account_name', models.CharField(max_length=20, null=True, verbose_name='账户')),
('account', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Account')),
],
options={
'verbose_name': '关系型数据库',
'verbose_name_plural': '关系型数据库',
'db_table': 'hw_rds',
},
),
]
| mit | 1,182,857,725,951,132,000 | 44.923077 | 179 | 0.572306 | false |
jdstregz/sky-scraper | prototypes/prototypeAWS/prototypeAWS/settings.py | 1 | 3478 | # -*- coding: utf-8 -*-
# Scrapy settings for prototypeAWS project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'prototypeAWS'
SPIDER_MODULES = ['prototypeAWS.spiders']
NEWSPIDER_MODULE = 'prototypeAWS.spiders'
#SPLASH_URL = 'http://192.168.59.103:8050'
SPLASH_URL = 'http://localhost:8050/'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'prototypeAWS (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'prototypeAWS.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit | 843,985,632,857,596,800 | 34.489796 | 109 | 0.769695 | false |
Pazitos10/TNT | webapp/app/tntapp/migrations/0002_auto_20160527_1234.py | 1 | 1126 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-27 15:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tntapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='alumnomateria',
name='id_alumno',
),
migrations.RemoveField(
model_name='alumnomateria',
name='id_materia',
),
migrations.AddField(
model_name='asistencia',
name='latitud',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='asistencia',
name='longitud',
field=models.FloatField(null=True),
),
migrations.AlterField(
model_name='asistencia',
name='id_alumno',
field=models.CharField(max_length=50),
),
migrations.DeleteModel(
name='Alumno',
),
migrations.DeleteModel(
name='AlumnoMateria',
),
]
| mit | 6,627,745,366,140,349,000 | 24.590909 | 50 | 0.531083 | false |
tmt514/mtsa-dishes-translator | app/bot/intention_rules/pokemon_rules.py | 1 | 6881 |
from app.models import db, Term, Description
from app.bot.rule import Rule, ForceChangeStateException, transition
from app.bot.reply_generator import ListTemplate, ButtonTemplate, GenericTemplate
STATE_NEW = 'new'
STATE_POKEMON_SEARCH = 'STATE_POKEMON_SEARCH'
STATE_POKEMON_SEARCH_OK = 'STATE_POKEMON_SEARCH_OK'
STATE_HANDLE_MORE = 'STATE_HANDLE_MORE'
PAYLOAD_POKEMON_DESCRIPTION = 'PAYLOAD_POKEMON_DESCRIPTION'
PAYLOAD_POKEMON_SEARCH = 'PAYLOAD_POKEMON_SEARCH'
PAYLOAD_RELATED_POKEMON = 'PAYLOAD_RELATED_POKEMON'
PAYLOAD_MORE = 'PAYLOAD_MORE'
PAYLOAD_CANCEL = 'PAYLOAD_CANCEL'
PAYLOAD_CONTINUE_POKEMON = 'PAYLOAD_CONTINUE_POKEMON'
PAYLOAD_POKEMON_INFO = 'PAYLOAD_POKEMON_INFO'
import pickle
import jieba
import jieba.posseg as pseg
jieba.set_dictionary('app/data/dict.txt.big')
from app.data import POKEMON_REVERSE_INDEX, POKEMON_NAMES_MAPPING
from collections import defaultdict
def compute_docscore(sentence):
docscore = defaultdict(float)
for pair in sentence:
word = pair.word
doclist = POKEMON_REVERSE_INDEX.get(word) or []
for doc, score in doclist:
docscore[doc] += score
docs = [(v, k) for k, v in docscore.items()]
docs.sort(reverse=True)
return docs
class PokemonRules(Rule):
@transition(STATE_NEW, {'quick_reply': {'payload': PAYLOAD_POKEMON_INFO}}, STATE_NEW)
def rule_pokemon_info(self, bot, user, msg, **template_params):
target = msg['quick_reply'].get('target')
if not target:
return True
term = Term.query.filter_by(english=target).first()
reply = GenericTemplate(image_aspect_ratio="square")
photo = term.photos.first()
buttons = ButtonTemplate()
buttons.add_postback_button(title="%s的習性" % term.chinese, payload="%s:%d,%s" % (PAYLOAD_POKEMON_DESCRIPTION, term.id, '習性'))
kwargs = {
"title": term.chinese,
"subtitle": term.english,
"buttons": buttons.button_list,
"default_action": {
"type": "web_url",
"url": "https://wiki.52poke.com/zh-hant/%s" % term.chinese,
}
}
if photo is not None:
kwargs['image_url'] = photo.url
reply.add_element(**kwargs)
reply = reply.generate()
reply['quick_replies'] = [
bot.reply_gen.QUICK_REPLY_CANCEL
]
bot.bot_send_message(user.id, reply)
return True
@transition(STATE_NEW, {'postback': {'payload': PAYLOAD_POKEMON_DESCRIPTION}}, STATE_NEW)
def rule_pokemon_description(self, bot, user, msg, **template_params):
target = msg['postback'].get('target')
if not target:
return True
term, subheading = target.split(",")
term = Term.query.get(int(term))
description = term.descriptions.filter_by(subheading=subheading).first()
if not description:
bot.bot_send_message(user.id, {"text": "很抱歉查無資料噢 >___<"})
return True
bot.bot_send_message(user.id, {"text": description.content})
return True
@transition(STATE_NEW, {'postback': {'payload': PAYLOAD_POKEMON_SEARCH}}, STATE_POKEMON_SEARCH)
def rule_start_pokemon_search(self, bot, user, msg, **template_params):
bot.bot_send_message(user.id, {"text": "請輸入關鍵字查詢寶可夢~"})
return True
@transition(STATE_NEW, {'NLP_decision': STATE_POKEMON_SEARCH}, STATE_POKEMON_SEARCH)
def rule_start_pokemon_search2(self, bot, user, msg, **template_params):
bot.bot_send_message(user.id, {"text": "請輸入關鍵字查詢寶可夢~"})
return True
@transition(STATE_POKEMON_SEARCH, {'text':''}, STATE_POKEMON_SEARCH_OK)
def rule_pokemon_search(self, bot, user, msg, **template_params):
sentence = pseg.cut(msg['text'])
docs = compute_docscore(sentence)
if len(docs) == 0:
bot.bot_send_message(user.id, {"text": "對不起,查無資料 QQ"})
raise ForceChangeStateException(state=STATE_NEW, halt=True)
term = Term.query.filter_by(english=POKEMON_NAMES_MAPPING[docs[0][1]]).first()
user.set_q(msg['text'])
user.set_english(term.english)
user.set_chinese(term.chinese)
reply = {"text": "%s (%s)" % (term.chinese, term.english)}
reply['quick_replies'] = [
bot.reply_gen.make_quick_reply(title="類似寶可夢",
payload=PAYLOAD_RELATED_POKEMON,
image_url="http://emojis.slackmojis.com/emojis/images/1450464069/186/pokeball.png?1450464069"),
bot.reply_gen.make_quick_reply(title="輸入新的查詢", payload=PAYLOAD_CONTINUE_POKEMON),
bot.reply_gen.QUICK_REPLY_MORE,
bot.reply_gen.QUICK_REPLY_CANCEL
]
bot.bot_send_message(user.id, reply)
return True
@transition(STATE_POKEMON_SEARCH_OK, {'quick_reply':{'payload': PAYLOAD_CONTINUE_POKEMON}}, STATE_POKEMON_SEARCH)
def rule_pokemon_search_continue(self, bot, user, msg, **template_params):
bot.bot_send_message(user.id, {"text": "請輸入關鍵字查詢寶可夢~"})
return True
@transition(STATE_POKEMON_SEARCH_OK, {'quick_reply':{'payload': PAYLOAD_CANCEL}}, STATE_NEW)
def rule_pokemon_cancel(self, bot, user, msg, **template_params):
return True
@transition(STATE_POKEMON_SEARCH_OK, {'quick_reply':{'payload': PAYLOAD_MORE}}, STATE_HANDLE_MORE)
def rule_pokemon_more(self, bot, user, msg, **template_params):
user.set_q(user.get_chinese())
return False
@transition(STATE_POKEMON_SEARCH_OK, {'quick_reply':{'payload': PAYLOAD_RELATED_POKEMON}}, STATE_POKEMON_SEARCH_OK)
def rule_pokemon_results(self, bot, user, msg, **template_params):
sentence = pseg.cut(user.get_q())
docs = compute_docscore(sentence)
reply = GenericTemplate()
for i in range(0, min(5, len(docs))):
term = Term.query.filter_by(english=POKEMON_NAMES_MAPPING[docs[i][1]]).first()
photo = term.photos.first()
buttons = ButtonTemplate()
buttons.add_postback_button(title="%s的習性" % term.chinese, payload="%s:%d,%s" % (PAYLOAD_POKEMON_DESCRIPTION, term.id, '習性'))
kwargs = {
"title": term.chinese,
"subtitle": term.english,
"buttons": buttons.button_list,
}
if photo is not None:
kwargs['image_url'] = photo.url
reply.add_element(**kwargs)
reply = reply.generate()
reply['quick_replies'] = [
bot.reply_gen.make_quick_reply(title="輸入新的查詢", payload=PAYLOAD_CONTINUE_POKEMON),
bot.reply_gen.QUICK_REPLY_CANCEL
]
bot.bot_send_message(user.id, reply)
return True
| mit | -8,864,736,756,024,627,000 | 39.017857 | 136 | 0.625465 | false |
pypa/setuptools | setuptools/tests/test_manifest.py | 1 | 18007 | # -*- coding: utf-8 -*-
"""sdist tests"""
import contextlib
import os
import shutil
import sys
import tempfile
import itertools
import io
from distutils import log
from distutils.errors import DistutilsTemplateError
from setuptools.command.egg_info import FileList, egg_info, translate_pattern
from setuptools.dist import Distribution
from setuptools.tests.textwrap import DALS
import pytest
def make_local_path(s):
"""Converts '/' in a string to os.sep"""
return s.replace('/', os.sep)
SETUP_ATTRS = {
'name': 'app',
'version': '0.0',
'packages': ['app'],
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
@contextlib.contextmanager
def quiet():
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
try:
yield
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def touch(filename):
open(filename, 'w').close()
# The set of files always in the manifest, including all files in the
# .egg-info directory
default_files = frozenset(map(make_local_path, [
'README.rst',
'MANIFEST.in',
'setup.py',
'app.egg-info/PKG-INFO',
'app.egg-info/SOURCES.txt',
'app.egg-info/dependency_links.txt',
'app.egg-info/top_level.txt',
'app/__init__.py',
]))
translate_specs = [
('foo', ['foo'], ['bar', 'foobar']),
('foo/bar', ['foo/bar'], ['foo/bar/baz', './foo/bar', 'foo']),
# Glob matching
('*.txt', ['foo.txt', 'bar.txt'], ['foo/foo.txt']),
(
'dir/*.txt',
['dir/foo.txt', 'dir/bar.txt', 'dir/.txt'], ['notdir/foo.txt']),
('*/*.py', ['bin/start.py'], []),
('docs/page-?.txt', ['docs/page-9.txt'], ['docs/page-10.txt']),
# Globstars change what they mean depending upon where they are
(
'foo/**/bar',
['foo/bing/bar', 'foo/bing/bang/bar', 'foo/bar'],
['foo/abar'],
),
(
'foo/**',
['foo/bar/bing.py', 'foo/x'],
['/foo/x'],
),
(
'**',
['x', 'abc/xyz', '@nything'],
[],
),
# Character classes
(
'pre[one]post',
['preopost', 'prenpost', 'preepost'],
['prepost', 'preonepost'],
),
(
'hello[!one]world',
['helloxworld', 'helloyworld'],
['hellooworld', 'helloworld', 'hellooneworld'],
),
(
'[]one].txt',
['o.txt', '].txt', 'e.txt'],
['one].txt'],
),
(
'foo[!]one]bar',
['fooybar'],
['foo]bar', 'fooobar', 'fooebar'],
),
]
"""
A spec of inputs for 'translate_pattern' and matches and mismatches
for that input.
"""
match_params = itertools.chain.from_iterable(
zip(itertools.repeat(pattern), matches)
for pattern, matches, mismatches in translate_specs
)
@pytest.fixture(params=match_params)
def pattern_match(request):
return map(make_local_path, request.param)
mismatch_params = itertools.chain.from_iterable(
zip(itertools.repeat(pattern), mismatches)
for pattern, matches, mismatches in translate_specs
)
@pytest.fixture(params=mismatch_params)
def pattern_mismatch(request):
return map(make_local_path, request.param)
def test_translated_pattern_match(pattern_match):
pattern, target = pattern_match
assert translate_pattern(pattern).match(target)
def test_translated_pattern_mismatch(pattern_mismatch):
pattern, target = pattern_mismatch
assert not translate_pattern(pattern).match(target)
class TempDirTestCase:
def setup_method(self, method):
self.temp_dir = tempfile.mkdtemp()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def teardown_method(self, method):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
class TestManifestTest(TempDirTestCase):
def setup_method(self, method):
super(TestManifestTest, self).setup_method(method)
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
"""
Create a file tree like:
- LICENSE
- README.rst
- testing.rst
- .hidden.rst
- app/
- __init__.py
- a.txt
- b.txt
- c.rst
- static/
- app.js
- app.js.map
- app.css
- app.css.map
"""
for fname in ['README.rst', '.hidden.rst', 'testing.rst', 'LICENSE']:
touch(os.path.join(self.temp_dir, fname))
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'app')
os.mkdir(test_pkg)
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
touch(os.path.join(test_pkg, fname))
# Some compiled front-end assets to include
static = os.path.join(test_pkg, 'static')
os.mkdir(static)
for fname in ['app.js', 'app.js.map', 'app.css', 'app.css.map']:
touch(os.path.join(static, fname))
def make_manifest(self, contents):
"""Write a MANIFEST.in."""
with open(os.path.join(self.temp_dir, 'MANIFEST.in'), 'w') as f:
f.write(DALS(contents))
def get_files(self):
"""Run egg_info and get all the files to include, as a set"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = egg_info(dist)
cmd.ensure_finalized()
cmd.run()
return set(cmd.filelist.files)
def test_no_manifest(self):
"""Check a missing MANIFEST.in includes only the standard files."""
assert (default_files - set(['MANIFEST.in'])) == self.get_files()
def test_empty_files(self):
"""Check an empty MANIFEST.in includes only the standard files."""
self.make_manifest("")
assert default_files == self.get_files()
def test_include(self):
"""Include extra rst files in the project root."""
self.make_manifest("include *.rst")
files = default_files | set([
'testing.rst', '.hidden.rst'])
assert files == self.get_files()
def test_exclude(self):
"""Include everything in app/ except the text files"""
ml = make_local_path
self.make_manifest(
"""
include app/*
exclude app/*.txt
""")
files = default_files | set([ml('app/c.rst')])
assert files == self.get_files()
def test_include_multiple(self):
"""Include with multiple patterns."""
ml = make_local_path
self.make_manifest("include app/*.txt app/static/*")
files = default_files | set([
ml('app/a.txt'), ml('app/b.txt'),
ml('app/static/app.js'), ml('app/static/app.js.map'),
ml('app/static/app.css'), ml('app/static/app.css.map')])
assert files == self.get_files()
def test_graft(self):
"""Include the whole app/static/ directory."""
ml = make_local_path
self.make_manifest("graft app/static")
files = default_files | set([
ml('app/static/app.js'), ml('app/static/app.js.map'),
ml('app/static/app.css'), ml('app/static/app.css.map')])
assert files == self.get_files()
def test_graft_glob_syntax(self):
"""Include the whole app/static/ directory."""
ml = make_local_path
self.make_manifest("graft */static")
files = default_files | set([
ml('app/static/app.js'), ml('app/static/app.js.map'),
ml('app/static/app.css'), ml('app/static/app.css.map')])
assert files == self.get_files()
def test_graft_global_exclude(self):
"""Exclude all *.map files in the project."""
ml = make_local_path
self.make_manifest(
"""
graft app/static
global-exclude *.map
""")
files = default_files | set([
ml('app/static/app.js'), ml('app/static/app.css')])
assert files == self.get_files()
def test_global_include(self):
"""Include all *.rst, *.js, and *.css files in the whole tree."""
ml = make_local_path
self.make_manifest(
"""
global-include *.rst *.js *.css
""")
files = default_files | set([
'.hidden.rst', 'testing.rst', ml('app/c.rst'),
ml('app/static/app.js'), ml('app/static/app.css')])
assert files == self.get_files()
def test_graft_prune(self):
"""Include all files in app/, except for the whole app/static/ dir."""
ml = make_local_path
self.make_manifest(
"""
graft app
prune app/static
""")
files = default_files | set([
ml('app/a.txt'), ml('app/b.txt'), ml('app/c.rst')])
assert files == self.get_files()
class TestFileListTest(TempDirTestCase):
"""
A copy of the relevant bits of distutils/tests/test_filelist.py,
to ensure setuptools' version of FileList keeps parity with distutils.
"""
def setup_method(self, method):
super(TestFileListTest, self).setup_method(method)
self.threshold = log.set_threshold(log.FATAL)
self._old_log = log.Log._log
log.Log._log = self._log
self.logs = []
def teardown_method(self, method):
log.set_threshold(self.threshold)
log.Log._log = self._old_log
super(TestFileListTest, self).teardown_method(method)
def _log(self, level, msg, args):
if level not in (log.DEBUG, log.INFO, log.WARN, log.ERROR, log.FATAL):
raise ValueError('%s wrong log level' % str(level))
self.logs.append((level, msg, args))
def get_logs(self, *levels):
def _format(msg, args):
if len(args) == 0:
return msg
return msg % args
return [_format(msg, args) for level, msg, args
in self.logs if level in levels]
def clear_logs(self):
self.logs = []
def assertNoWarnings(self):
assert self.get_logs(log.WARN) == []
self.clear_logs()
def assertWarnings(self):
assert len(self.get_logs(log.WARN)) > 0
self.clear_logs()
def make_files(self, files):
for file in files:
file = os.path.join(self.temp_dir, file)
dirname, basename = os.path.split(file)
os.makedirs(dirname, exist_ok=True)
open(file, 'w').close()
def test_process_template_line(self):
# testing all MANIFEST.in template patterns
file_list = FileList()
ml = make_local_path
# simulated file list
self.make_files([
'foo.tmp', 'ok', 'xo', 'four.txt',
'buildout.cfg',
# filelist does not filter out VCS directories,
# it's sdist that does
ml('.hg/last-message.txt'),
ml('global/one.txt'),
ml('global/two.txt'),
ml('global/files.x'),
ml('global/here.tmp'),
ml('f/o/f.oo'),
ml('dir/graft-one'),
ml('dir/dir2/graft2'),
ml('dir3/ok'),
ml('dir3/sub/ok.txt'),
])
MANIFEST_IN = DALS("""\
include ok
include xo
exclude xo
include foo.tmp
include buildout.cfg
global-include *.x
global-include *.txt
global-exclude *.tmp
recursive-include f *.oo
recursive-exclude global *.x
graft dir
prune dir3
""")
for line in MANIFEST_IN.split('\n'):
if not line:
continue
file_list.process_template_line(line)
wanted = [
'buildout.cfg',
'four.txt',
'ok',
ml('.hg/last-message.txt'),
ml('dir/graft-one'),
ml('dir/dir2/graft2'),
ml('f/o/f.oo'),
ml('global/one.txt'),
ml('global/two.txt'),
]
file_list.sort()
assert file_list.files == wanted
def test_exclude_pattern(self):
# return False if no match
file_list = FileList()
assert not file_list.exclude_pattern('*.py')
# return True if files match
file_list = FileList()
file_list.files = ['a.py', 'b.py']
assert file_list.exclude_pattern('*.py')
# test excludes
file_list = FileList()
file_list.files = ['a.py', 'a.txt']
file_list.exclude_pattern('*.py')
file_list.sort()
assert file_list.files == ['a.txt']
def test_include_pattern(self):
# return False if no match
file_list = FileList()
self.make_files([])
assert not file_list.include_pattern('*.py')
# return True if files match
file_list = FileList()
self.make_files(['a.py', 'b.txt'])
assert file_list.include_pattern('*.py')
# test * matches all files
file_list = FileList()
self.make_files(['a.py', 'b.txt'])
file_list.include_pattern('*')
file_list.sort()
assert file_list.files == ['a.py', 'b.txt']
def test_process_template_line_invalid(self):
# invalid lines
file_list = FileList()
for action in ('include', 'exclude', 'global-include',
'global-exclude', 'recursive-include',
'recursive-exclude', 'graft', 'prune', 'blarg'):
try:
file_list.process_template_line(action)
except DistutilsTemplateError:
pass
except Exception:
assert False, "Incorrect error thrown"
else:
assert False, "Should have thrown an error"
def test_include(self):
ml = make_local_path
# include
file_list = FileList()
self.make_files(['a.py', 'b.txt', ml('d/c.py')])
file_list.process_template_line('include *.py')
file_list.sort()
assert file_list.files == ['a.py']
self.assertNoWarnings()
file_list.process_template_line('include *.rb')
file_list.sort()
assert file_list.files == ['a.py']
self.assertWarnings()
def test_exclude(self):
ml = make_local_path
# exclude
file_list = FileList()
file_list.files = ['a.py', 'b.txt', ml('d/c.py')]
file_list.process_template_line('exclude *.py')
file_list.sort()
assert file_list.files == ['b.txt', ml('d/c.py')]
self.assertNoWarnings()
file_list.process_template_line('exclude *.rb')
file_list.sort()
assert file_list.files == ['b.txt', ml('d/c.py')]
self.assertWarnings()
def test_global_include(self):
ml = make_local_path
# global-include
file_list = FileList()
self.make_files(['a.py', 'b.txt', ml('d/c.py')])
file_list.process_template_line('global-include *.py')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.py')]
self.assertNoWarnings()
file_list.process_template_line('global-include *.rb')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.py')]
self.assertWarnings()
def test_global_exclude(self):
ml = make_local_path
# global-exclude
file_list = FileList()
file_list.files = ['a.py', 'b.txt', ml('d/c.py')]
file_list.process_template_line('global-exclude *.py')
file_list.sort()
assert file_list.files == ['b.txt']
self.assertNoWarnings()
file_list.process_template_line('global-exclude *.rb')
file_list.sort()
assert file_list.files == ['b.txt']
self.assertWarnings()
def test_recursive_include(self):
ml = make_local_path
# recursive-include
file_list = FileList()
self.make_files(['a.py', ml('d/b.py'), ml('d/c.txt'), ml('d/d/e.py')])
file_list.process_template_line('recursive-include d *.py')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertNoWarnings()
file_list.process_template_line('recursive-include e *.py')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertWarnings()
def test_recursive_exclude(self):
ml = make_local_path
# recursive-exclude
file_list = FileList()
file_list.files = ['a.py', ml('d/b.py'), ml('d/c.txt'), ml('d/d/e.py')]
file_list.process_template_line('recursive-exclude d *.py')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.txt')]
self.assertNoWarnings()
file_list.process_template_line('recursive-exclude e *.py')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.txt')]
self.assertWarnings()
def test_graft(self):
ml = make_local_path
# graft
file_list = FileList()
self.make_files(['a.py', ml('d/b.py'), ml('d/d/e.py'), ml('f/f.py')])
file_list.process_template_line('graft d')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertNoWarnings()
file_list.process_template_line('graft e')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertWarnings()
def test_prune(self):
ml = make_local_path
# prune
file_list = FileList()
file_list.files = ['a.py', ml('d/b.py'), ml('d/d/e.py'), ml('f/f.py')]
file_list.process_template_line('prune d')
file_list.sort()
assert file_list.files == ['a.py', ml('f/f.py')]
self.assertNoWarnings()
file_list.process_template_line('prune e')
file_list.sort()
assert file_list.files == ['a.py', ml('f/f.py')]
self.assertWarnings()
| mit | -700,400,924,519,320,600 | 28.96173 | 79 | 0.54862 | false |
wevote/WeVoteServer | issue/models.py | 1 | 44537 | # issue/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from exception.models import handle_exception, handle_record_found_more_than_one_exception, \
handle_record_not_found_exception, handle_record_not_saved_exception
from wevote_settings.models import fetch_next_we_vote_id_issue_integer, fetch_site_unique_id_prefix
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
# sort_formula
MOST_LINKED_ORGANIZATIONS = "MOST_LINKED_ORGANIZATIONS"
ALPHABETICAL_ASCENDING = "ALPHABETICAL_ASCENDING"
LINKED = 'LINKED'
UNLINKED = 'UNLINKED'
LINK_CHOICES = (
(LINKED, 'Linked'),
(UNLINKED, 'Unlinked'),
)
# Reason for linking issue
NO_REASON = 'NO_REASON'
LINKED_BY_ORGANIZATION = 'LINKED_BY_ORGANIZATION'
LINKED_BY_WE_VOTE = 'LINKED_BY_WE_VOTE'
AUTO_LINKED_BY_HASHTAG = 'AUTO_LINKED_BY_HASHTAG'
AUTO_LINKED_BY_TEXT = 'AUTO_LINKED_BY_TEXT'
LINKING_REASON_CHOICES = (
(NO_REASON, 'No reason'),
(LINKED_BY_ORGANIZATION, 'Linked by organization'),
(LINKED_BY_WE_VOTE, 'Linked by We Vote'),
(AUTO_LINKED_BY_HASHTAG, 'Auto-linked by hashtag'),
(AUTO_LINKED_BY_TEXT, 'Auto-linked by text'),
)
# Reason linking option is blocked
# NO_REASON = 'NO_REASON' # Defined above
BLOCKED_BY_ORGANIZATION = 'BLOCKED_BY_ORGANIZATION'
BLOCKED_BY_WE_VOTE = 'BLOCKED_BY_WE_VOTE'
FLAGGED_BY_VOTERS = 'FLAGGED_BY_VOTERS'
LINKING_BLOCKED_REASON_CHOICES = (
(NO_REASON, 'No reason'),
(BLOCKED_BY_ORGANIZATION, 'Blocked by organization'),
(BLOCKED_BY_WE_VOTE, 'Blocked by We Vote'),
(FLAGGED_BY_VOTERS, 'Flagged by voters'),
)
# Kinds of lists of suggested organization
# UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
logger = wevote_functions.admin.get_logger(__name__)
class TrackedWordOrPhrase():
# word_or_phrase
# ignored
pass
class HashtagLinkedToIssue():
""" If this hashtag is found in an organization’s Twitter Feed a certain number of times, link an organization to
this issue automatically """
# hashtag_text
# issue_we_vote_id
pass
class IssueListManager(models.Manager):
"""
This is a class to make it easy to retrieve lists of Issues
"""
def fetch_visible_issue_we_vote_ids(self):
issue_we_vote_ids_list = []
results = self.retrieve_issues()
if results['issue_list_found']:
issue_list = results['issue_list']
for issue in issue_list:
issue_we_vote_ids_list.append(issue.we_vote_id)
return issue_we_vote_ids_list
def retrieve_issues(self, sort_formula=None, issue_we_vote_id_list_to_filter=None,
issue_we_vote_id_list_to_exclude=None, require_filter_or_exclude=False,
show_hidden_issues=False, read_only=True):
issue_list = []
issue_list_found = False
success = False
if require_filter_or_exclude and issue_we_vote_id_list_to_filter is None and \
issue_we_vote_id_list_to_exclude is None:
status = 'RETRIEVE_ISSUE_FILTERS_NOT_FOUND'
results = {
'success': success,
'status': status,
'issue_list_found': issue_list_found,
'issue_list': issue_list,
}
return results
try:
if read_only:
issue_queryset = Issue.objects.using('readonly').all()
else:
issue_queryset = Issue.objects.all()
# By default, we only show the issues marked "hide_issue=False"
if not show_hidden_issues:
issue_queryset = issue_queryset.filter(hide_issue=False)
if issue_we_vote_id_list_to_filter is not None:
issue_queryset = issue_queryset.filter(we_vote_id__in=issue_we_vote_id_list_to_filter)
if issue_we_vote_id_list_to_exclude is not None:
issue_queryset = issue_queryset.exclude(we_vote_id__in=issue_we_vote_id_list_to_exclude)
if sort_formula == MOST_LINKED_ORGANIZATIONS:
issue_queryset = issue_queryset.order_by(
'-linked_organization_count', 'we_vote_hosted_image_url_tiny', 'issue_name')
elif sort_formula == ALPHABETICAL_ASCENDING:
issue_queryset = issue_queryset.order_by('issue_name')
else:
issue_queryset = issue_queryset.order_by('issue_name')
issue_list = list(issue_queryset)
if len(issue_list):
issue_list_found = True
status = 'ISSUES_RETRIEVED'
else:
status = 'NO_ISSUES_RETRIEVED'
success = True
except Issue.DoesNotExist:
# No issues found. Not a problem.
status = 'NO_ISSUES_FOUND'
issue_list = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_issues_for_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': success,
'status': status,
'issue_list_found': issue_list_found,
'issue_list': issue_list,
}
return results
def retrieve_issue_count(self):
try:
issue_queryset = Issue.objects.using('readonly').all()
# We only show the issues marked "hide_issue=False"
issue_queryset = issue_queryset.filter(hide_issue=False)
issue_count = issue_queryset.count()
success = True
status = "ISSUE_COUNT_FOUND"
except Issue.DoesNotExist:
# No issues found. Not a problem.
status = 'NO_ISSUES_FOUND_DoesNotExist'
issue_count = 0
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_all_issues_for_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
issue_count = 0
results = {
'success': success,
'status': status,
'issue_count': issue_count,
}
return results
def fetch_organization_issues_for_display(self, organization_we_vote_id, sort_formula=None,
show_hidden_issues=False):
results = self.retrieve_organization_issues_for_display(organization_we_vote_id, sort_formula,
show_hidden_issues)
return results['issues_display_string']
def fetch_organization_issue_list(self, organization_we_vote_id, sort_formula=None,
show_hidden_issues=False):
results = self.retrieve_organization_issues_for_display(organization_we_vote_id, sort_formula,
show_hidden_issues)
return results['issue_list']
def retrieve_organization_issues_for_display(self, organization_we_vote_id, sort_formula=None,
show_hidden_issues=False):
issue_list_found = False
success = False
status = ""
issues_display_string = ""
if not positive_value_exists(organization_we_vote_id):
status += 'RETRIEVE_ISSUES_ORGANIZATION_NOT_FOUND '
results = {
'success': success,
'status': status,
'issue_list': [],
'issue_list_found': issue_list_found,
'issues_display_string': issues_display_string,
}
return results
organization_link_to_issue_list = OrganizationLinkToIssueList()
issues_list = organization_link_to_issue_list.fetch_issue_we_vote_id_list_by_organization_we_vote_id(
organization_we_vote_id)
if len(issues_list) == 0:
status += 'RETRIEVE_ISSUES_FOR_ORGANIZATION_NO_ISSUES '
results = {
'success': success,
'status': status,
'issue_list': [],
'issue_list_found': issue_list_found,
'issues_display_string': issues_display_string,
}
return results
try:
issue_queryset = Issue.objects.using('readonly').all()
# By default, we only show the issues marked "hide_issue=False"
if not show_hidden_issues:
issue_queryset = issue_queryset.filter(hide_issue=False)
issue_queryset = issue_queryset.filter(we_vote_id__in=issues_list)
if sort_formula == MOST_LINKED_ORGANIZATIONS:
issue_queryset = issue_queryset.order_by(
'-linked_organization_count', 'we_vote_hosted_image_url_tiny', 'issue_name')
elif sort_formula == ALPHABETICAL_ASCENDING:
issue_queryset = issue_queryset.order_by('issue_name')
else:
issue_queryset = issue_queryset.order_by('issue_name')
issue_list = list(issue_queryset)
if len(issue_list):
issue_list_found = True
status += 'RETRIEVE_ISSUES_FOR_ORGANIZATION_ISSUES_RETRIEVED '
for one_issue in issue_list:
issues_display_string += one_issue.issue_name + ", "
issues_display_string = issues_display_string[:-2]
else:
status += 'RETRIEVE_ISSUES_FOR_ORGANIZATION_NO_ISSUES_RETRIEVED '
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED fetch_organization_issues_for_display ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': success,
'status': status,
'issue_list': issue_list,
'issue_list_found': issue_list_found,
'issues_display_string': issues_display_string,
}
return results
class Issue(models.Model):
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "issue", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_issue_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id of this issue", max_length=255, default=None, null=True,
blank=True, unique=True)
issue_name = models.CharField(verbose_name="name of the issue",
max_length=255, null=True, blank=True, db_index=True)
# The description of the issue.
issue_description = models.TextField(verbose_name="description of the issue",
null=True, blank=True, default="")
issue_followers_count = models.PositiveIntegerField(verbose_name="number of followers of this issue",
null=False, blank=True, default=0)
linked_organization_count = models.PositiveIntegerField(verbose_name="number of organizations linked to the issue",
null=False, blank=True, default=0)
hide_issue = models.BooleanField(default=True) # Do not show issue to voters or partners (admins/volunteers only)
# For audit reasons, would this issue broadly be considered "left" or "right"
considered_left = models.BooleanField(default=False)
considered_right = models.BooleanField(default=False)
# A default image field for hard-coded local images
issue_icon_local_path = models.TextField(
verbose_name='path in web app for the issue icon', blank=True, null=True, default="")
we_vote_hosted_image_url_large = models.URLField(
verbose_name='we vote hosted large image url', blank=True, null=True)
we_vote_hosted_image_url_medium = models.URLField(
verbose_name='we vote hosted medium image url', blank=True, null=True)
we_vote_hosted_image_url_tiny = models.URLField(
verbose_name='we vote hosted tiny image url', blank=True, null=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_issue_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "issue" = tells us this is a unique id for a Issue
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}issue{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(Issue, self).save(*args, **kwargs)
class IssueManager(models.Manager):
def __unicode__(self):
return "IssueManager"
def retrieve_issue_from_id(self, issue_id):
issue_manager = IssueManager()
return issue_manager.retrieve_issue(issue_id)
def retrieve_issue_from_we_vote_id(self, we_vote_id):
issue_id = 0
issue_manager = IssueManager()
return issue_manager.retrieve_issue(issue_id, we_vote_id)
def fetch_issue_id_from_we_vote_id(self, we_vote_id):
issue_id = 0
issue_manager = IssueManager()
results = issue_manager.retrieve_issue(issue_id, we_vote_id)
if results['success']:
return results['issue_id']
return 0
def fetch_issue_name_from_we_vote_id(self, we_vote_id):
issue_id = 0
issue_manager = IssueManager()
results = issue_manager.retrieve_issue(issue_id, we_vote_id)
if results['success']:
return results['issue_name']
return ''
def fetch_issue_we_vote_id_from_id(self, issue_id):
we_vote_id = ''
issue_manager = IssueManager()
results = issue_manager.retrieve_issue(issue_id, we_vote_id)
if results['success']:
return results['issue_we_vote_id']
return ''
def fetch_issue_from_we_vote_id(self, we_vote_id):
issue_id = 0
issue_manager = IssueManager()
results = issue_manager.retrieve_issue(issue_id, we_vote_id)
if results['issue_found']:
return results['issue']
return None
def retrieve_issue_from_issue_name(self, issue_name):
issue_id = 0
we_vote_id = ''
issue_manager = IssueManager()
results = issue_manager.retrieve_issue(issue_id, we_vote_id, issue_name)
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_issue(self, issue_id, issue_we_vote_id=None, issue_name=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
issue_on_stage = Issue()
status = ""
try:
if positive_value_exists(issue_id):
issue_on_stage = Issue.objects.get(id=issue_id)
issue_id = issue_on_stage.id
issue_we_vote_id = issue_on_stage.we_vote_id
issue_name = issue_on_stage.issue_name
issue_found = True
status += "RETRIEVE_ISSUE_FOUND_BY_ID "
elif positive_value_exists(issue_we_vote_id):
issue_on_stage = Issue.objects.get(we_vote_id=issue_we_vote_id)
issue_id = issue_on_stage.id
issue_we_vote_id = issue_on_stage.we_vote_id
issue_name = issue_on_stage.issue_name
issue_found = True
status += "RETRIEVE_ISSUE_FOUND_BY_WE_VOTE_ID "
elif positive_value_exists(issue_name):
issue_on_stage = Issue.objects.get(issue_name=issue_name)
issue_id = issue_on_stage.id
issue_we_vote_id = issue_on_stage.we_vote_id
issue_name = issue_on_stage.issue_name
issue_found = True
status += "RETRIEVE_ISSUE_FOUND_BY_NAME "
else:
issue_found = False
status += "RETRIEVE_ISSUE_SEARCH_INDEX_MISSING "
except Issue.MultipleObjectsReturned as e:
issue_found = False
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
status += "RETRIEVE_ISSUE_MULTIPLE_OBJECTS_RETURNED "
except Issue.DoesNotExist:
issue_found = False
exception_does_not_exist = True
status += "RETRIEVE_ISSUE_NOT_FOUND "
except Exception as e:
issue_found = False
status += "RETRIEVE_ISSUE_NOT_FOUND_EXCEPTION " + str(e) + " "
results = {
'success': True if convert_to_int(issue_id) > 0 else False,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'issue_found': issue_found,
'issue_id': convert_to_int(issue_id),
'issue_name': issue_name,
'issue_we_vote_id': issue_we_vote_id,
'issue': issue_on_stage,
}
return results
def update_or_create_issue(self, issue_we_vote_id, issue_name='', issue_description=''):
"""
Either update or create a issue entry.
"""
exception_multiple_object_returned = False
success = False
new_issue_created = False
issue_on_stage = Issue()
status = ""
updated_issue_values = {
}
if positive_value_exists(issue_we_vote_id):
updated_issue_values["we_vote_id"] = issue_we_vote_id
if positive_value_exists(issue_name):
updated_issue_values["issue_name"] = issue_name
if positive_value_exists(issue_description):
updated_issue_values["issue_description"] = issue_description
# Should we deal with hide_issue?
if not positive_value_exists(issue_name) and not positive_value_exists(issue_we_vote_id):
success = False
status += 'MISSING_ISSUE_NAME_AND_WE_VOTE_ID '
else:
# Check before we try to create a new entry
issue_found = False
try:
issue_on_stage = Issue.objects.get(
we_vote_id__iexact=issue_we_vote_id,
)
issue_found = True
success = True
status += 'ISSUE_FOUND_BY_WE_VOTE_ID '
except Issue.MultipleObjectsReturned as e:
success = False
status += 'MULTIPLE_MATCHING_ISSUES_FOUND_BY_WE_VOTE_ID '
exception_multiple_object_returned = True
except Issue.DoesNotExist:
exception_does_not_exist = True
status += "RETRIEVE_ISSUE_NOT_FOUND_BY_WE_VOTE_ID "
except Exception as e:
status += 'FAILED_TO_RETRIEVE_ISSUE_BY_WE_VOTE_ID ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
if not issue_found:
try:
issue_on_stage = Issue.objects.get(
issue_name__iexact=issue_name,
)
issue_found = True
success = True
status += 'ISSUE_FOUND_BY_ISSUE_NAME '
except Issue.MultipleObjectsReturned as e:
success = False
status += 'MULTIPLE_MATCHING_ISSUES_FOUND_BY_ISSUE_NAME '
exception_multiple_object_returned = True
except Issue.DoesNotExist:
exception_does_not_exist = True
status += "RETRIEVE_ISSUE_NOT_FOUND_BY_ISSUE_NAME "
except Exception as e:
status += 'FAILED_TO_RETRIEVE_ISSUE_BY_ISSUE_NAME ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
if issue_found:
# Update record
# Note: When we decide to start updating issue_name elsewhere within We Vote, we should stop
# updating issue_name via subsequent Google Civic imports
try:
new_issue_created = False
issue_updated = False
issue_has_changes = False
for key, value in updated_issue_values.items():
if hasattr(issue_on_stage, key):
issue_has_changes = True
setattr(issue_on_stage, key, value)
if issue_has_changes and positive_value_exists(issue_on_stage.we_vote_id):
issue_on_stage.save()
issue_updated = True
if issue_updated:
success = True
status += "ISSUE_UPDATED "
else:
success = False
status += "ISSUE_NOT_UPDATED "
except Exception as e:
status += 'FAILED_TO_UPDATE_ISSUE ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
else:
# Create record
try:
new_issue_created = False
issue_on_stage = Issue.objects.create(
issue_name=issue_name,
issue_description=issue_description)
if positive_value_exists(issue_on_stage.id):
for key, value in updated_issue_values.items():
if hasattr(issue_on_stage, key):
setattr(issue_on_stage, key, value)
issue_on_stage.save()
new_issue_created = True
if new_issue_created:
success = True
status += "ISSUE_CREATED "
else:
success = False
status += "ISSUE_NOT_CREATED "
except Exception as e:
status += 'FAILED_TO_CREATE_ISSUE ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'new_issue_created': new_issue_created,
'issue': issue_on_stage,
}
return results
def reset_issue_image_details(self, issue, issue_icon_local_path=False):
"""
Reset an issue entry with original image details from we vote image.
"""
success = False
status = "ENTERING_RESET_ISSUE_IMAGE_DETAILS"
if issue:
if issue_icon_local_path is not False:
issue.issue_icon_local_path = issue_icon_local_path
issue.we_vote_hosted_image_url_large = None
issue.we_vote_hosted_image_url_medium = None
issue.we_vote_hosted_image_url_tiny = None
issue.save()
success = True
status = "RESET_ISSUE_IMAGE_DETAILS"
results = {
'success': success,
'status': status,
'candidate': issue,
}
return results
class OrganizationLinkToIssue(models.Model):
# This class represent the link between an organization and an issue
# We are relying on built-in Python id field
# The organization's we_vote_id linked to the issue
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
# The issue being linked
issue_id = models.PositiveIntegerField(null=True, blank=True)
# we_vote_id of the issue
issue_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
# Are the organization and the issue linked?
link_active = models.BooleanField(verbose_name='', default=True)
# AUTO_TAGGED_BY_TEXT, AUTO_TAGGED_BY_HASHTAG, TAGGED_BY_ORGANIZATION, TAGGED_BY_WE_VOTE, NO_REASON
reason_for_link = models.CharField(max_length=25, choices=LINKING_REASON_CHOICES,
default=NO_REASON)
# There are some cases where we want to prevent auto-linking of an issue
link_blocked = models.BooleanField(verbose_name='', default=False)
# FLAGGED_BY_VOTERS, BLOCKED_BY_WE_VOTE, BLOCKED_BY_ORGANIZATION, NOT_BLOCKED
reason_link_is_blocked = models.CharField(max_length=25, choices=LINKING_BLOCKED_REASON_CHOICES,
default=NO_REASON)
# The date the the issue link was modified
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
def __unicode__(self):
return self.issue_we_vote_id
def is_linked(self):
if self.link_active:
return True
return False
def is_not_linked(self):
return not self.is_linked()
class OrganizationLinkToIssueList(models.Manager):
# A way to retrieve all of the organization and issue linking information
def retrieve_issue_list_by_organization_we_vote_id(self, organization_we_vote_id, show_hidden_issues=False,
read_only=False):
# Retrieve a list of active issues linked to organization
link_issue_list_found = False
link_active = True
link_issue_list = {}
try:
if read_only:
link_issue_query = OrganizationLinkToIssue.objects.using('readonly').all()
else:
link_issue_query = OrganizationLinkToIssue.objects.all()
link_issue_query = link_issue_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
link_issue_query = link_issue_query.filter(link_active=link_active)
link_issue_list = list(link_issue_query)
if len(link_issue_list):
link_issue_list_found = True
except Exception as e:
pass
if link_issue_list_found:
if show_hidden_issues:
return link_issue_list
else:
link_issue_list_filtered = []
# Get a complete list of visible issues
issue_list_manager = IssueListManager()
visible_issue_we_vote_ids = issue_list_manager.fetch_visible_issue_we_vote_ids()
for link_issue in link_issue_list:
if link_issue.issue_we_vote_id in visible_issue_we_vote_ids:
link_issue_list_filtered.append(link_issue)
return link_issue_list_filtered
else:
link_issue_list = {}
return link_issue_list
def retrieve_issue_blocked_list_by_organization_we_vote_id(self, organization_we_vote_id, read_only=False):
# Retrieve a list of issues bocked for an organization
link_issue_list_found = False
link_blocked = True
link_issue_list = {}
try:
if read_only:
link_issue_query = OrganizationLinkToIssue.objects.using('readonly').all()
else:
link_issue_query = OrganizationLinkToIssue.objects.all()
link_issue_query = link_issue_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
link_issue_query = link_issue_query.filter(link_blocked=link_blocked)
link_issue_list = list(link_issue_query)
if len(link_issue_list):
link_issue_list_found = True
except Exception as e:
pass
if link_issue_list_found:
return link_issue_list
else:
link_issue_list = {}
return link_issue_list
def fetch_issue_we_vote_id_list_by_organization_we_vote_id(self, organization_we_vote_id):
link_issue_we_vote_id_list = []
link_issue_list = self.retrieve_issue_list_by_organization_we_vote_id(organization_we_vote_id, read_only=True)
for issue in link_issue_list:
link_issue_we_vote_id_list.append(issue.issue_we_vote_id)
return link_issue_we_vote_id_list
def fetch_organization_we_vote_id_list_by_issue_we_vote_id_list(self, issue_we_vote_id_list):
organization_we_vote_id_list = []
results = self.retrieve_organization_we_vote_id_list_from_issue_we_vote_id_list(
issue_we_vote_id_list) # Already read_only
if results['organization_we_vote_id_list_found']:
organization_we_vote_id_list = results['organization_we_vote_id_list']
return organization_we_vote_id_list
def fetch_issue_count_for_organization(self, organization_id=0, organization_we_vote_id=''):
link_active = True
link_issue_list_count = 0
try:
link_issue_list = OrganizationLinkToIssue.objects.using('readonly').all()
link_issue_list = link_issue_list.filter(organization_we_vote_id__iexact=organization_we_vote_id)
link_issue_list = link_issue_list.filter(link_active=link_active)
link_issue_list_count = link_issue_list.count()
except Exception as e:
pass
return link_issue_list_count
def fetch_organization_count_for_issue(self, issue_we_vote_id=''):
link_active = True
link_issue_list_count = 0
try:
link_issue_list = OrganizationLinkToIssue.objects.using('readonly').all()
link_issue_list = link_issue_list.filter(issue_we_vote_id__iexact=issue_we_vote_id)
link_issue_list = link_issue_list.filter(link_active=link_active)
link_issue_list_count = link_issue_list.count()
except Exception as e:
pass
return link_issue_list_count
def fetch_linked_organization_count(self, issue_we_vote_id):
number_of_organizations_following_this_issue = 0
try:
if positive_value_exists(issue_we_vote_id):
organization_link_to_issue_query = OrganizationLinkToIssue.objects.using('readonly').filter(
issue_we_vote_id__iexact=issue_we_vote_id,
link_active=True
)
number_of_organizations_following_this_issue = organization_link_to_issue_query.count()
except Exception as e:
pass
return number_of_organizations_following_this_issue
def retrieve_organization_we_vote_id_list_from_issue_we_vote_id_list(self, issue_we_vote_id_list):
organization_we_vote_id_list = []
organization_we_vote_id_list_found = False
link_active = True
try:
link_queryset = OrganizationLinkToIssue.objects.using('readonly').all()
# we decided not to use case-insensitivity in favour of '__in'
link_queryset = link_queryset.filter(issue_we_vote_id__in=issue_we_vote_id_list)
link_queryset = link_queryset.filter(link_active=link_active)
link_queryset = link_queryset.values('organization_we_vote_id').distinct()
organization_link_to_issue_results = list(link_queryset)
if len(organization_link_to_issue_results):
organization_we_vote_id_list_found = True
for one_link in organization_link_to_issue_results:
organization_we_vote_id_list.append(one_link['organization_we_vote_id'])
status = 'ORGANIZATION_WE_VOTE_ID_LIST_RETRIEVED '
else:
status = 'NO_ORGANIZATION_WE_VOTE_IDS_RETRIEVED '
except Issue.DoesNotExist:
# No issues found. Not a problem.
status = 'NO_ORGANIZATION_WE_VOTE_IDS_DO_NOT_EXIST '
organization_we_vote_id_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_organization_we_vote_id_list_from_issue_we_vote_id_list' \
'{error} [type: {error_type}] '.format(error=e, error_type=type(e))
results = {
'success': True if organization_we_vote_id_list else False,
'status': status,
'organization_we_vote_id_list_found': organization_we_vote_id_list_found,
'organization_we_vote_id_list': organization_we_vote_id_list,
}
return results
class OrganizationLinkToIssueManager(models.Manager):
def __unicode__(self):
return "OrganizationLinkToIssueManager"
def link_organization_to_issue(self, organization_we_vote_id, issue_id, issue_we_vote_id,
reason_for_link=NO_REASON):
link_active = True
link_blocked = False
if reason_for_link is None:
reason_for_link = LINKED_BY_WE_VOTE
reason_for_block = NO_REASON
return self.toggle_issue_link(organization_we_vote_id, issue_id, issue_we_vote_id, link_active, link_blocked,
reason_for_link, reason_for_block)
def unlink_organization_to_issue(self, organization_we_vote_id, issue_id, issue_we_vote_id,
reason_for_unlink=NO_REASON):
link_active = False
link_blocked = False
reason_for_link = NO_REASON
reason_for_block = NO_REASON
return self.toggle_issue_link(organization_we_vote_id, issue_id, issue_we_vote_id, link_active, link_blocked,
reason_for_link, reason_for_block)
def toggle_issue_link(self, organization_we_vote_id, issue_id, issue_we_vote_id, link_active, link_blocked,
reason_for_link=NO_REASON, reason_for_block=NO_REASON):
link_issue_on_stage_found = False
link_issue_on_stage_we_vote_id = 0
link_issue_on_stage = OrganizationLinkToIssue()
status = ''
issue_identifier_exists = positive_value_exists(issue_we_vote_id) or positive_value_exists(issue_id)
if not positive_value_exists(organization_we_vote_id) and not issue_identifier_exists:
results = {
'success': True if link_issue_on_stage_found else False,
'status': 'Insufficient inputs to toggle issue link, try passing ids for organization and issue ',
'link_issue_found': link_issue_on_stage_found,
'issue_we_vote_id': link_issue_on_stage_we_vote_id,
'link_issue': link_issue_on_stage,
}
return results
# First make sure that issue_id is for a valid issue
issue_manager = IssueManager()
if positive_value_exists(issue_id):
results = issue_manager.retrieve_issue(issue_id)
else:
results = issue_manager.retrieve_issue(0, issue_we_vote_id)
if results['issue_found']:
issue = results['issue']
issue_found = True
issue_we_vote_id = issue.we_vote_id
issue_id = issue.id
else:
issue_found = False
# Does a link_issue entry exist from this organization already?
link_issue_id = 0
results = self.retrieve_issue_link(link_issue_id, organization_we_vote_id, issue_id, issue_we_vote_id)
if results['link_issue_found']:
link_issue_on_stage = results['link_issue']
# Update this follow_issue entry with new values - we do not delete because we might be able to use
try:
link_issue_on_stage.link_active = link_active
link_issue_on_stage.link_blocked = link_blocked
if link_active:
link_issue_on_stage.reason_for_link = reason_for_link
link_issue_on_stage.reason_link_is_blocked = NO_REASON
else:
link_issue_on_stage.reason_for_link = NO_REASON
link_issue_on_stage.reason_link_is_blocked = reason_for_block
link_issue_on_stage.auto_linked_from_twitter_suggestion = False
# We don't need to update here because set set auto_now=True in the field
# follow_issue_on_stage.date_last_changed =
link_issue_on_stage.save()
link_issue_on_stage_we_vote_id = link_issue_on_stage.issue_we_vote_id
link_issue_on_stage_found = True
status += 'UPDATE ' + str(link_active)
except Exception as e:
status += 'FAILED_TO_UPDATE ' + str(link_active)
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['MultipleObjectsReturned']:
logger.warning("link_issue: delete all but one and take it over?")
status += 'TOGGLE_LINKING MultipleObjectsReturned ' + str(link_active)
else:
# Create new link_issue entry
if issue_found:
try:
if positive_value_exists(organization_we_vote_id) \
and positive_value_exists(issue_id) and positive_value_exists(issue_we_vote_id):
link_issue_on_stage = OrganizationLinkToIssue(
organization_we_vote_id=organization_we_vote_id,
issue_id=issue_id,
issue_we_vote_id=issue_we_vote_id,
)
link_issue_on_stage.link_active = link_active
link_issue_on_stage.reason_for_link = reason_for_link
link_issue_on_stage.link_blocked = link_blocked
link_issue_on_stage.reason_for_block = reason_for_block
link_issue_on_stage.save()
link_issue_on_stage_we_vote_id = link_issue_on_stage.issue_we_vote_id
link_issue_on_stage_found = True
status += 'CREATE ' + str(link_active)
else:
status += "ORGANIZATION_LINK_TO_ISSUE_COULD_NOT_BE_CREATED-MISSING_ORGANIZATION "
except Exception as e:
status = 'FAILED_TO_UPDATE ' + str(link_active)
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += 'ISSUE_NOT_FOUND_ON_CREATE ' + str(link_active)
if positive_value_exists(link_issue_on_stage_we_vote_id) and issue_found:
# If a link issue was saved, update the linked_organization_count
organization_link_issue_list_manager = OrganizationLinkToIssueList()
linked_organization_count = organization_link_issue_list_manager.fetch_linked_organization_count(
link_issue_on_stage_we_vote_id)
try:
issue.linked_organization_count = linked_organization_count
issue.save()
status += "LINKED_ORGANIZATION_COUNT_UPDATED "
except Exception as e:
pass
results = {
'success': True if link_issue_on_stage_found else False,
'status': status,
'link_issue_found': link_issue_on_stage_found,
'issue_we_vote_id': link_issue_on_stage_we_vote_id,
'link_issue': link_issue_on_stage,
}
return results
def retrieve_issue_link(self, link_issue_id, organization_we_vote_id, issue_id, issue_we_vote_id):
"""
link_issue_id is the identifier for records stored in this table (it is NOT the issue_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
link_issue_on_stage = OrganizationLinkToIssue()
link_issue_on_stage_we_vote_id = 0
try:
if positive_value_exists(link_issue_id):
link_issue_on_stage = OrganizationLinkToIssue.objects.get(id=link_issue_id)
link_issue_on_stage_we_vote_id = link_issue_on_stage.issue_we_vote_id
success = True
status = 'LINK_ISSUE_FOUND_WITH_ID'
elif positive_value_exists(organization_we_vote_id) and positive_value_exists(issue_id):
link_issue_on_stage = OrganizationLinkToIssue.objects.get(
organization_we_vote_id__iexact=organization_we_vote_id,
issue_id=issue_id)
link_issue_on_stage_we_vote_id = link_issue_on_stage.issue_we_vote_id
success = True
status = 'LINK_ISSUE_FOUND_WITH_ORGANIZATION_ID_WE_VOTE_ID_AND_ISSUE_ID'
elif positive_value_exists(organization_we_vote_id) and positive_value_exists(issue_we_vote_id):
link_issue_on_stage = OrganizationLinkToIssue.objects.get(
organization_we_vote_id__iexact=organization_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
link_issue_on_stage_we_vote_id = link_issue_on_stage.issue_we_vote_id
success = True
status = 'LINK_ISSUE_FOUND_WITH_ORGANIZATION_ID_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID'
else:
success = False
status = 'LINK_ISSUE_MISSING_REQUIRED_VARIABLES'
except OrganizationLinkToIssue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = 'LINK_ISSUE_NOT_FOUND_MultipleObjectsReturned'
except OrganizationLinkToIssue.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = 'LINK_ISSUE_NOT_FOUND_DoesNotExist'
if positive_value_exists(link_issue_on_stage_we_vote_id):
link_issue_on_stage_found = True
is_linked = link_issue_on_stage.is_linked()
is_not_linked = link_issue_on_stage.is_not_linked()
else:
link_issue_on_stage_found = False
is_linked = False
is_not_linked = True
results = {
'status': status,
'success': success,
'link_issue_found': link_issue_on_stage_found,
'link_issue_id': link_issue_on_stage_we_vote_id,
'link_issue': link_issue_on_stage,
'is_linked': is_linked,
'is_not_linked': is_not_linked,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
| mit | 7,011,680,556,360,434,000 | 44.676923 | 119 | 0.574964 | false |
obayhan/hddiyari_presentation | engine.py | 1 | 6149 | # -*- coding: utf-8 -*-
import os
import urllib2
import urllib
from mechanize import Browser
from bs4 import BeautifulSoup
import re
from PIL import Image
import pyimgur
favi = "/home/ozgur/mount/media/Series/Conan/ConanTheBarbarian/Conan.the.Barbarian.1982.iNTERNAL.DVDRiP.XViD.CD1-HLS.avi"
fmkv = "/home/ozgur/mount/media/TorrentTemp/All.About.Steve.2009.720p.BluRay.DUAL.x264-CBGB(HDA).mkv"
from hachoir_core.error import HachoirError
from hachoir_core.cmd_line import unicodeFilename
from hachoir_parser import createParser
from hachoir_core.tools import makePrintable
from hachoir_metadata import extractMetadata
from hachoir_core.i18n import getTerminalCharset
from sys import argv, stderr, exit
__author__ = 'ozgur'
__creation_date__ = '11.08.2014' '23:15'
CLIENT_ID = "48fa40a51f1c795"
HDR = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
class MovieInfoFetcher():
def __init__(self):
pass
def getunicode(self, soup):
body = ''
if isinstance(soup, unicode):
soup = soup.replace(''', "'")
soup = soup.replace('"', '"')
soup = soup.replace(' ', ' ')
body += soup
else:
if not soup.contents:
return ''
con_list = soup.contents
for con in con_list:
body = body + self.getunicode(con)
return body
@staticmethod
def parse_movie_divxplanet(link):
directors = ""
authors = ""
actors = ""
genre = ""
req = urllib2.Request(link, headers=HDR)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page.read())
temp_list = soup.find_all('div', itemprop='director')
for item in temp_list:
directors += item.a.span.text + ", "
temp_list = soup.find_all('div', itemprop='author')
for item in temp_list:
authors += item.a.span.text + ", "
try:
title = soup.find('span', itemprop='alternativeHeadline').text
except:
title = ""
temp_list = soup.find_all('div', itemprop='actor')
for item in temp_list:
actors += item.a.span.text + ", "
temp_list = soup.find_all('span', itemprop='genre')
for item in temp_list:
genre += item.text + ", "
try:
description = soup.find('span', itemprop='description').text
except:
description = ""
retval = {
'directors': directors,
'authors': authors,
'title': title,
'actors': actors,
'genre': genre,
'description': description,
}
return retval
def parse_movie_imdb(self, link):
br = Browser()
br.open(link)
link = br.find_link(url_regex=re.compile(r'/title/tt.*'))
res = br.follow_link(link)
soup = BeautifulSoup(res.read())
movie_title = self.getunicode(soup.find('title'))
rate = soup.find('span', itemprop='ratingValue')
rating = self.getunicode(rate)
actors = []
actors_soup = soup.findAll('a', itemprop='actors')
for i in range(len(actors_soup)):
actors.append(self.getunicode(actors_soup[i]))
des = soup.find('meta', {'name': 'description'})['content']
genre = []
infobar = soup.find('div', {'class': 'infobar'})
r = infobar.find('', {'title': True})['title']
genrelist = infobar.findAll('a', {'href': True})
for i in range(len(genrelist) - 1):
genre.append(self.getunicode(genrelist[i]))
release_date = self.getunicode(genrelist[-1])
print movie_title, rating + '/10.0'
print 'Relase Date:', release_date
print 'Rated', r
print ''
print 'Genre:',
print ', '.join(genre)
print '\nActors:',
print ', '.join(actors)
print '\nDescription:'
print des
class ImageProcessor():
def __init__(self):
pass
def _download_image(self, link, path):
print(link + " >> " + path)
testfile = urllib.URLopener(HDR)
testfile.retrieve(link, path)
testfile.close()
def _resize_image(self, path):
size = 500, 500
im = Image.open(path)
im.thumbnail(size, Image.ANTIALIAS)
im.save(path)
return True
def _upload_image(self, path):
im = pyimgur.Imgur(CLIENT_ID)
uploaded_image = im.upload_image(path, title="HDDiyari")
return uploaded_image.link
def prepare_image(self, link, name):
retval = ""
link = str(link)
if not os.path.exists("temp"):
os.makedirs("temp")
if link != "":
path = os.path.join("temp", name) + ".jpg"
self._download_image(link, path)
self._resize_image(path)
retval = self._upload_image(path)
return retval
class MovieMetadata():
def __init__(self):
pass
def get_movie_metadata(self, filename):
filename, realname = unicodeFilename(filename), filename
# parser = createParser(filename, realname)
parser = createParser(filename, filename)
if not parser:
print >> stderr, "Unable to parse file"
exit(1)
try:
metadata = extractMetadata(parser)
except HachoirError, err:
print "Metadata extraction error: %s" % unicode(err)
metadata = None
if not metadata:
print "Unable to extract metadata"
exit(1)
text = metadata.exportPlaintext()
charset = getTerminalCharset()
retval = ""
for line in text:
retval += makePrintable(line, charset) + u"\n"
return retval
| gpl-2.0 | 8,579,259,287,783,442,000 | 29.745 | 128 | 0.567247 | false |
alirizakeles/tendenci | tendenci/apps/profiles/middleware.py | 1 | 2856 | from django.contrib.auth import logout
from django.conf import settings
from django.contrib.auth.models import User
from tendenci.apps.site_settings.utils import get_setting
class ProfileMiddleware(object):
"""
Appends a profile instance to anonymous users.
Creates a profile for logged in users without one.
"""
def process_request(self, request):
from tendenci.apps.profiles.models import Profile
if request.user.is_anonymous():
request.user.profile = Profile(status=False, status_detail="inactive", user=User(is_staff=False, is_superuser=False, is_active=False))
else:
try:
profile = request.user.profile
except Profile.DoesNotExist:
profile = Profile.objects.create_profile(user=request.user)
class ProfileLanguageMiddleware(object):
"""This middleware should come before django's LocaleMiddleware
"""
if settings.USE_I18N:
def get_user_language(self, request):
try:
lang = getattr(request.user.profile, 'language')
except:
lang = None
if not lang:
lang = get_setting('site', 'global', 'localizationlanguage')
return lang
def process_request(self, request):
"""check user language and assign it to session or cookie accordingly
"""
user_language = self.get_user_language(request)
if user_language:
if hasattr(request, 'session'):
lang_code_in_session = request.session.get('django_language', None)
if not lang_code_in_session or lang_code_in_session != user_language:
request.session['django_language'] = user_language
else:
lang_code_in_cookie = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code_in_cookie and lang_code_in_cookie != user_language:
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = user_language
def process_response(self, request, response):
"""assign user_language to cookie LANGUAGE_COOKIE_NAME
"""
user_language = self.get_user_language(request)
lang_code_in_cookie = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if user_language and (not lang_code_in_cookie or user_language != lang_code_in_cookie):
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user_language)
return response
class ForceLogoutProfileMiddleware(object):
def process_request(self, request):
# this will force logout deactivated user on next request
if request.user.is_authenticated():
if not request.user.is_active:
logout(request)
| gpl-3.0 | 6,919,863,689,373,114,000 | 41 | 146 | 0.620798 | false |
kschoelz/abacuspb | abacuspb/resources/categories.py | 1 | 3350 | from flask import abort
from flask.ext.restful import Resource, reqparse, fields, marshal
from bson.objectid import ObjectId
from abacuspb import db
import pymongo
category_fields = { # Request validator
'name': fields.String,
'parent_id': fields.String,
'budget_tracked': fields.Boolean,
'uri': fields.Url('category')
}
class CategoryListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, required=True, location='json')
self.reqparse.add_argument('parent_id', type=str, location='json')
self.reqparse.add_argument('budget_tracked', type=bool, location='json')
super(CategoryListAPI, self).__init__()
def get(self):
"""
Get all categories
"""
categories = []
cats = db.categories.find(sort=[('name', pymongo.ASCENDING)])
if cats.count() == 0:
abort(404)
for cat in cats:
categories.append(cat)
return { 'categories': map(lambda t: marshal(t, category_fields), categories) }
def post(self):
"""
Create a new category/sub-category
"""
args = self.reqparse.parse_args()
# Check if category already exists
if db.categories.find({'name': args['name']}).count() != 0:
return { 'message': 'category already exists', 'status': 400 }, 400
# If parent_id set, check if parent exists first
if args['parent_id'] and (db.categories.find({'id': args['parent_id']}).count() == 0):
return { 'message': 'parent category does not exist', 'status': 404}, 404
# Save new category
if not args.has_key('parent_id'): args['parent_id'] = None
category = {
'id': str(ObjectId()),
'name': args['name'],
'parent_id': args['parent_id'],
'budget_tracked': args['budget_tracked']
}
db.categories.insert(category)
return { 'category': marshal(category, category_fields) }, 201
class CategoryAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, location='json')
self.reqparse.add_argument('parent_id', type=str, location='json')
self.reqparse.add_argument('budget_tracked', type=bool, location='json')
super(CategoryAPI, self).__init__()
def get(self, id):
"""
Get single category by id
"""
category = db.categories.find_one({'id':id})
if category == None:
abort(404)
return { 'category': marshal(category, category_fields) }
def put(self, id):
"""
Update single category by id
"""
category = db.categories.find_one({'id':id})
if category == None:
abort(404)
args = self.reqparse.parse_args()
for k, v in args.iteritems():
if v != None:
category[k] = v
db.categories.update({'id':id}, category)
return { 'category': marshal(category, category_fields) }
def delete(self, id):
"""
Delete single category by id
"""
if not db.categories.remove({'id':id})['n']:
abort(404)
return { 'result': True }
| gpl-2.0 | -2,139,059,082,744,144,100 | 34.638298 | 94 | 0.571642 | false |
smurfix/MoaT | moat/types/__init__.py | 1 | 2788 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of MoaT, the Master of all Things.
##
## MoaT is Copyright © 2007-2016 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
## This header is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘scripts/_boilerplate.py’.
## Thus, do not remove the next line, or insert any blank lines above.
##BP
"""\
This package implements the high-level Type object in MoaT.
Types translate between some sensible internal value and various external
representations. For instance, interally a switch is on or off (True or
False) while on 1wire the bit is 1 or 0, on an MQTT bus it's "on" or "off"
(or "ON and "OFF"), in etcd it's 'true' or 'false', etc.
The internal value shall be the one that makes the most sense to the
computer. For instance, intensity of a dimmable light is between zero and
one. It's the GUI's responsibility to map "float/percentage" to something
nicer.
"""
import os
from etcd_tree import EtcTypes
import logging
logger = logging.getLogger(__name__)
MODULE_DIR = ('meta','module')
# Type definitions declare data types and bounds for the rest of MoaT.
TYPEDEF_DIR = ('meta','type')
TYPEDEF = ':type'
class _type_names(dict):
def __getitem__(self,k):
while True:
try:
return super().__getitem__(k)
except KeyError:
if '/' not in k:
from .base import Type
return Type
k = k[:k.rindex('/')]
_type_names = _type_names()
def types():
"""Generator for all types known to MoaT. This accesses the code."""
from .base import Type
from moat.script.util import objects
return objects(__name__, Type, filter=lambda x:x.name is not None)
def type_names():
"""Creates a dict which maps type names to its moat.types.*.Type object."""
if not _type_names:
for t in types():
_type_names[t.name] = t
return _type_names
def setup_meta_types(types):
"""Teach an EtcTypes object about MoaT types"""
from .base import TypeDir
types.step(TYPEDEF_DIR).register('**',TYPEDEF, cls=TypeDir)
| gpl-3.0 | 1,698,206,316,585,124,400 | 32.939024 | 82 | 0.71254 | false |
ONSdigital/eq-survey-runner | tests/integration/star_wars/test_confirmation_page.py | 1 | 2093 | from tests.integration.integration_test_case import IntegrationTestCase
class TestConfirmationPage(IntegrationTestCase):
def test_confirmation_page(self):
self.rogue_one_login_and_check_introduction_text()
self.post(action='start_questionnaire')
self.rogue_one_check_character_page()
# Form submission with no errors
self.post({'character-answer': 'Cassian Andor'})
self.assertInUrl('cassian-andor-like-this-page')
# Like page
self.rogue_one_check_description_page()
# Form submission with no errors
self.post({'cassian-andor-like-this-page-answer': 'Yes'})
self.assertInUrl('film-takings')
# Takings page
self.rogue_one_check_takings_page()
# Form submission with no errors
self.post({'film-takings-answer': '900'})
self.assertInUrl('summary')
# Summary page
self.rogue_one_check_confirmation_page()
# Form submission with no errors
self.post()
self.assertInUrl('thank-you')
def rogue_one_login_and_check_introduction_text(self):
self.launchSurvey('0', 'rogue_one')
self.rogue_one_check_introduction_text()
def rogue_one_check_introduction_text(self):
self.assertRegexPage('(?s)Rogue One')
def rogue_one_check_character_page(self):
self.assertInBody('Who do you want to know more about?')
self.assertInBody('Jyn Erso')
self.assertInBody('character-answer-3')
def rogue_one_check_description_page(self):
self.assertInBody('An accomplished Rebel Alliance Intelligence Officer')
self.assertInBody('Do you like this page?')
self.assertInBody('cassian-andor-like-this-page-answer-1')
def rogue_one_check_takings_page(self):
self.assertInBody('In millions, how much do you think this film will take?')
self.assertInBody('film-takings-answer')
def rogue_one_check_confirmation_page(self):
self.assertInBody('Summary')
self.assertInBody('You must submit this survey to complete it')
| mit | -1,796,353,747,892,187,400 | 33.883333 | 84 | 0.66603 | false |
edx-solutions/edx-platform | cms/djangoapps/contentstore/courseware_index.py | 1 | 27678 | """ Code to allow module store to interface with courseware index """
import logging
import re
from abc import ABCMeta, abstractmethod
from datetime import timedelta
from django.conf import settings
from django.urls import resolve
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from eventtracking import tracker
from search.search_engine_base import SearchEngine
from six import add_metaclass, string_types, text_type
from cms.djangoapps.contentstore.course_group_config import GroupConfiguration
from course_modes.models import CourseMode
from openedx.core.lib.courses import course_image_url
from xmodule.annotator_mixin import html_to_text
from xmodule.library_tools import normalize_key_for_search
from xmodule.modulestore import ModuleStoreEnum
# REINDEX_AGE is the default amount of time that we look back for changes
# that might have happened. If we are provided with a time at which the
# indexing is triggered, then we know it is safe to only index items
# recently changed at that time. This is the time period that represents
# how far back from the trigger point to look back in order to index
REINDEX_AGE = timedelta(0, 60) # 60 seconds
log = logging.getLogger('edx.modulestore')
def strip_html_content_to_text(html_content):
""" Gets only the textual part for html content - useful for building text to be searched """
# Removing HTML-encoded non-breaking space characters
text_content = re.sub(r"(\s| |//)+", " ", html_to_text(html_content))
# Removing HTML CDATA
text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content)
# Removing HTML comments
text_content = re.sub(r"<!--.*-->", "", text_content)
return text_content
def indexing_is_enabled():
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False)
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
@add_metaclass(ABCMeta)
class SearchIndexerBase(object):
"""
Base class to perform indexing for courseware or library search from different modulestores
"""
__metaclass__ = ABCMeta
INDEX_NAME = None
DOCUMENT_TYPE = None
ENABLE_INDEXING_KEY = None
INDEX_EVENT = {
'name': None,
'category': None
}
@classmethod
def indexing_is_enabled(cls):
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False)
@classmethod
@abstractmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
@classmethod
@abstractmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
@classmethod
@abstractmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id
@classmethod
def remove_deleted_items(cls, searcher, structure_key, exclude_items):
"""
remove any item that is present in the search index that is not present in updated list of indexed items
as we find items we can shorten the set of items to keep
"""
response = searcher.search(
doc_type=cls.DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key),
exclude_dictionary={"id": list(exclude_items)}
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DOCUMENT_TYPE, result_ids)
@classmethod
def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE):
"""
Process course for indexing
Arguments:
modulestore - modulestore object to use for operations
structure_key (CourseKey|LibraryKey) - course or library identifier
triggered_at (datetime) - provides time at which indexing was triggered;
useful for index updates - only things changed recently from that date
(within REINDEX_AGE above ^^) will have their index updated, others skip
updating their index but are still walked through in order to identify
which items may need to be removed from the index
If None, then a full reindex takes place
Returns:
Number of items that have been added to the index
"""
error_list = []
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
structure_key = cls.normalize_structure_key(structure_key)
location_info = cls._get_location_info(structure_key)
# Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index`
indexed_count = {
"count": 0
}
# indexed_items is a list of all the items that we wish to remain in the
# index, whether or not we are planning to actually update their index.
# This is used in order to build a query to remove those items not in this
# list - those are ready to be destroyed
indexed_items = set()
# items_index is a list of all the items index dictionaries.
# it is used to collect all indexes and index them using bulk API,
# instead of per item index API call.
items_index = []
def get_item_location(item):
"""
Gets the version agnostic item location
"""
return item.location.version_agnostic().replace(branch=None)
def prepare_item_index(item, skip_index=False, groups_usage_info=None):
"""
Add this item to the items_index and indexed_items list
Arguments:
item - item to add to index, its children will be processed recursively
skip_index - simply walk the children in the tree, the content change is
older than the REINDEX_AGE window and would have been already indexed.
This should really only be passed from the recursive child calls when
this method has determined that it is safe to do so
Returns:
item_content_groups - content groups assigned to indexed item
"""
is_indexable = hasattr(item, "index_dictionary")
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it's not indexable and it does not have children, then ignore
if not item_index_dictionary and not item.has_children:
return
item_content_groups = None
if item.category == "split_test":
split_partition = item.get_selected_partition()
for split_test_child in item.get_children():
if split_partition:
for group in split_partition.groups:
group_id = text_type(group.id)
child_location = item.group_id_to_child.get(group_id, None)
if child_location == split_test_child.location:
groups_usage_info.update({
text_type(get_item_location(split_test_child)): [group_id],
})
for component in split_test_child.get_children():
groups_usage_info.update({
text_type(get_item_location(component)): [group_id]
})
if groups_usage_info:
item_location = get_item_location(item)
item_content_groups = groups_usage_info.get(text_type(item_location), None)
item_id = text_type(cls._id_modifier(item.scope_ids.usage_id))
indexed_items.add(item_id)
if item.has_children:
# determine if it's okay to skip adding the children herein based upon how recently any may have changed
skip_child_index = skip_index or \
(triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age)
children_groups_usage = []
for child_item in item.get_children():
if modulestore.has_published_version(child_item):
children_groups_usage.append(
prepare_item_index(
child_item,
skip_index=skip_child_index,
groups_usage_info=groups_usage_info
)
)
if None in children_groups_usage:
item_content_groups = None
if skip_index or not item_index_dictionary:
return
item_index = {}
# if it has something to add to the index, then add it
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = item_id
if item.start:
item_index['start_date'] = item.start
item_index['content_groups'] = item_content_groups if item_content_groups else None
item_index.update(cls.supplemental_fields(item))
items_index.append(item_index)
indexed_count["count"] += 1
return item_content_groups
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning(u'Could not index item: %s - %r', item.location, err)
error_list.append(_(u'Could not index item: {}').format(item.location))
try:
with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only):
structure = cls._fetch_top_level(modulestore, structure_key)
groups_usage_info = cls.fetch_group_usage(modulestore, structure)
# First perform any additional indexing from the structure object
cls.supplemental_index_information(modulestore, structure)
# Now index the content
for item in structure.get_children():
prepare_item_index(item, groups_usage_info=groups_usage_info)
searcher.index(cls.DOCUMENT_TYPE, items_index)
cls.remove_deleted_items(searcher, structure_key, indexed_items)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
u"Indexing error encountered, courseware index may be out of date %s - %r",
structure_key,
err
)
error_list.append(_('General indexing error occurred'))
if error_list:
raise SearchIndexingError('Error(s) present during indexing', error_list)
return indexed_count["count"]
@classmethod
def _do_reindex(cls, modulestore, structure_key):
"""
(Re)index all content within the given structure (course or library),
tracking the fact that a full reindex has taken place
"""
indexed_count = cls.index(modulestore, structure_key)
if indexed_count:
cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count)
return indexed_count
@classmethod
def _track_index_request(cls, event_name, category, indexed_count):
"""Track content index requests.
Arguments:
event_name (str): Name of the event to be logged.
category (str): category of indexed items
indexed_count (int): number of indexed items
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': category,
}
tracker.emit(
event_name,
data
)
@classmethod
def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument
"""
Base implementation of fetch group usage on course/library.
"""
return None
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform any supplemental indexing given that the structure object has
already been loaded. Base implementation performs no operation.
Arguments:
modulestore - modulestore object used during the indexing operation
structure - structure object loaded during the indexing job
Returns:
None
"""
pass
@classmethod
def supplemental_fields(cls, item): # pylint: disable=unused-argument
"""
Any supplemental fields that get added to the index for the specified
item. Base implementation returns an empty dictionary
"""
return {}
class CoursewareSearchIndexer(SearchIndexerBase):
"""
Class to perform indexing for courseware search from different modulestores
"""
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX'
INDEX_EVENT = {
'name': 'edx.course.index.reindexed',
'category': 'courseware_index'
}
UNNAMED_MODULE_NAME = ugettext_lazy("(Unnamed)")
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return structure_key
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_course(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": text_type(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, course_key)
@classmethod
def fetch_group_usage(cls, modulestore, structure):
groups_usage_dict = {}
partitions_info = GroupConfiguration.get_partitions_usage_info(modulestore, structure)
content_group_info = GroupConfiguration.get_content_groups_items_usage_info(
modulestore,
structure
)
for group_info in (partitions_info, content_group_info):
for groups in group_info.values():
for name, group in groups.items():
for module in group:
view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable
usage_key_string = text_type(kwargs['usage_key_string'])
if groups_usage_dict.get(usage_key_string, None):
groups_usage_dict[usage_key_string].append(name)
else:
groups_usage_dict[usage_key_string] = [name]
return groups_usage_dict
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform additional indexing from loaded structure object
"""
CourseAboutSearchIndexer.index_about_information(modulestore, structure)
@classmethod
def supplemental_fields(cls, item):
"""
Add location path to the item object
Once we've established the path of names, the first name is the course
name, and the next 3 names are the navigable path within the edx
application. Notice that we stop at that level because a full path to
deep children would be confusing.
"""
location_path = []
parent = item
while parent is not None:
path_component_name = parent.display_name
if not path_component_name:
path_component_name = text_type(cls.UNNAMED_MODULE_NAME)
location_path.append(path_component_name)
parent = parent.get_parent()
location_path.reverse()
return {
"course_name": location_path[0],
"location": location_path[1:4]
}
class LibrarySearchIndexer(SearchIndexerBase):
"""
Base class to perform indexing for library search from different modulestores
"""
INDEX_NAME = "library_index"
DOCUMENT_TYPE = "library_content"
ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX'
INDEX_EVENT = {
'name': 'edx.library.index.reindexed',
'category': 'library_index'
}
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return normalize_key_for_search(structure_key)
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_library(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"library": text_type(normalized_structure_key)}
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None)))
@classmethod
def do_library_reindex(cls, modulestore, library_key):
"""
(Re)index all content within the given library, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, library_key)
class AboutInfo(object):
""" About info structure to contain
1) Property name to use
2) Where to add in the index (using flags above)
3) Where to source the properties value
"""
# Bitwise Flags for where to index the information
#
# ANALYSE - states that the property text contains content that we wish to be able to find matched within
# e.g. "joe" should yield a result for "I'd like to drink a cup of joe"
#
# PROPERTY - states that the property text should be a property of the indexed document, to be returned with the
# results: search matches will only be made on exact string matches
# e.g. "joe" will only match on "joe"
#
# We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index
# e.g. university name is desired to be analysed, so that a search on "Oxford" will match
# property values "University of Oxford" and "Oxford Brookes University",
# but it is also a useful property, because within a (future) filtered search a user
# may have chosen to filter courses from "University of Oxford"
#
# see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below
#
ANALYSE = 1 << 0 # Add the information to the analysed content of the index
PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed)
def __init__(self, property_name, index_flags, source_from):
self.property_name = property_name
self.index_flags = index_flags
self.source_from = source_from
def get_value(self, **kwargs):
""" get the value for this piece of information, using the correct source """
return self.source_from(self, **kwargs)
def from_about_dictionary(self, **kwargs):
""" gets the value from the kwargs provided 'about_dictionary' """
about_dictionary = kwargs.get('about_dictionary', None)
if not about_dictionary:
raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'")
return about_dictionary.get(self.property_name, None)
def from_course_property(self, **kwargs):
""" gets the value from the kwargs provided 'course' """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return getattr(course, self.property_name, None)
def from_course_mode(self, **kwargs):
""" fetches the available course modes from the CourseMode model """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return [mode.slug for mode in CourseMode.modes_for_course(course.id)]
# Source location options - either from the course or the about info
FROM_ABOUT_INFO = from_about_dictionary
FROM_COURSE_PROPERTY = from_course_property
FROM_COURSE_MODE = from_course_mode
class CourseAboutSearchIndexer(object):
"""
Class to perform indexing of about information from course object
"""
DISCOVERY_DOCUMENT_TYPE = "course_info"
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
# List of properties to add to the index - each item in the list is an instance of AboutInfo object
ABOUT_INFORMATION_TO_INCLUDE = [
AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE),
AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
]
@classmethod
def index_about_information(cls, modulestore, course):
"""
Add the given course to the course discovery index
Arguments:
modulestore - modulestore object to use for operations
course - course object from which to take properties, locate about information
"""
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
course_id = text_type(course.id)
course_info = {
'id': course_id,
'course': course_id,
'content': {},
'image_url': course_image_url(course),
}
# load data for all of the 'about' modules for this course into a dictionary
about_dictionary = {
item.location.block_id: item.data
for item in modulestore.get_items(course.id, qualifiers={"category": "about"})
}
about_context = {
"course": course,
"about_dictionary": about_dictionary,
}
for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE:
# Broad exception handler so that a single bad property does not scupper the collection of others
try:
section_content = about_information.get_value(**about_context)
except: # pylint: disable=bare-except
section_content = None
log.warning(
u"Course discovery could not collect property %s for course %s",
about_information.property_name,
course_id,
exc_info=True,
)
if section_content:
if about_information.index_flags & AboutInfo.ANALYSE:
analyse_content = section_content
if isinstance(section_content, string_types):
analyse_content = strip_html_content_to_text(section_content)
course_info['content'][about_information.property_name] = analyse_content
if about_information.index_flags & AboutInfo.PROPERTY:
course_info[about_information.property_name] = section_content
# Broad exception handler to protect around and report problems with indexing
try:
searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info])
except:
log.exception(
u"Course discovery indexing error encountered, course discovery index may be out of date %s",
course_id,
)
raise
log.debug(
u"Successfully added %s course to the course discovery index",
course_id
)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": text_type(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def remove_deleted_items(cls, structure_key):
""" Remove item from Course About Search_index """
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
response = searcher.search(
doc_type=cls.DISCOVERY_DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key)
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DISCOVERY_DOCUMENT_TYPE, result_ids)
| agpl-3.0 | -2,806,086,705,523,989,000 | 40.809668 | 120 | 0.629381 | false |
google/init2winit | init2winit/model_lib/adabelief_vgg.py | 1 | 4101 | # coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Flax implementation of Adabelief VGG.
This module ports the Adabelief implemetation of VGG to Flax. The
Adabelief paper and github can be found here:
https://arxiv.org/abs/2010.07468
https://github.com/juntang-zhuang/Adabelief-Optimizer/blob/update_0.2.0/PyTorch_Experiments/classification_cifar10/models/vgg.py
The original VGGNet paper can be found here:
https://arxiv.org/abs/1409.1556
"""
from flax import nn
from init2winit.model_lib import base_model
from init2winit.model_lib import model_utils
import jax.numpy as jnp
from ml_collections.config_dict import config_dict
DEFAULT_HPARAMS = config_dict.ConfigDict(
dict(
num_layers=11, # Must be one of [11, 13, 16, 19]
layer_rescale_factors={},
lr_hparams={
'schedule': 'constant',
'base_lr': 0.2,
},
normalizer='none',
optimizer='momentum',
opt_hparams={
'momentum': 0.9,
},
batch_size=128,
l2_decay_factor=0.0001,
l2_decay_rank_threshold=2,
label_smoothing=None,
rng_seed=-1,
use_shallue_label_smoothing=False,
model_dtype='float32',
grad_clip=None,
))
def classifier(x, num_outputs, dropout_rate, deterministic):
"""Implements the classification portion of the network."""
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = nn.Dense(x, 512)
x = nn.relu(x)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = nn.Dense(x, 512)
x = nn.relu(x)
x = nn.Dense(x, num_outputs)
return x
def features(x, num_layers, normalizer, dtype, train):
"""Implements the feature extraction portion of the network."""
layers = _layer_size_options[num_layers]
conv = nn.Conv.partial(bias=False, dtype=dtype)
maybe_normalize = model_utils.get_normalizer(normalizer, train)
for l in layers:
if l == 'M':
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
else:
x = conv(x, features=l, kernel_size=(3, 3), padding=((1, 1), (1, 1)))
x = maybe_normalize(x)
x = nn.relu(x)
return x
class VGG(nn.Module):
"""Adabelief VGG."""
def apply(self,
x,
num_layers,
num_outputs,
normalizer='none',
dtype='float32',
train=True):
x = features(x, num_layers, normalizer, dtype, train)
x = jnp.reshape(x, (x.shape[0], -1))
x = classifier(x, num_outputs, dropout_rate=0.5, deterministic=not train)
return x
# Specifies the sequence of layers in the feature extraction section of the
# network for a given size.
# The numbers indicate the feature size of a convolutional layer, the
# letter M indicates a max pooling layer.
_layer_size_options = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
13: [
64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'
],
16: [
64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512,
512, 512, 'M'
],
19: [
64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512,
'M', 512, 512, 512, 512, 'M'
],
}
class AdaBeliefVGGModel(base_model.BaseModel):
def build_flax_module(self):
"""Adabelief VGG."""
return VGG.partial(
num_layers=self.hps.num_layers,
num_outputs=self.hps['output_shape'][-1],
dtype=self.hps.model_dtype,
normalizer=self.hps.normalizer)
| apache-2.0 | 927,639,614,500,274,800 | 29.154412 | 128 | 0.633992 | false |
matthiask/fh-fablib | fh_fablib/deploy.py | 1 | 2280 | from __future__ import unicode_literals
from fabric.api import env, execute, task
from fabric.contrib.project import rsync_project
from fh_fablib import run_local, cd, require_env, run, step
@task(default=True)
@require_env
def deploy(*args):
"""Deploys frontend and backend code to the server if the checking step
did not report any problems"""
step("\nChecking whether we are up to date...")
run_local("git push --dry-run origin %(box_branch)s")
execute("check.deploy")
step("\nCompiling static sources...")
run_local("yarn run prod")
step("\nPushing changes...")
run_local("git push origin %(box_branch)s")
step("\nDeploying new code on server...")
with cd("%(box_domain)s"):
run("git fetch")
run("git merge --ff-only origin/%(box_branch)s")
_do_deploy(args)
@task
@require_env
def direct():
"""Deploys code directly, most useful when Bitbucket is down"""
execute("check.deploy")
step("\nCompiling static sources...")
run_local("yarn run prod")
step("\nPushing changes...")
run_local("git push %(box_remote)s %(box_branch)s:refs/heads/DIRECTDEPLOY")
step("\nDeploying new code on server...")
with cd("%(box_domain)s"):
run("git merge --ff-only DIRECTDEPLOY")
_do_deploy()
run_local("git push %(box_remote)s :refs/heads/DIRECTDEPLOY")
step("\nPLEASE do not forget to push to the source repository anyway!")
def _do_deploy(args=()):
with cd("%(box_domain)s"):
run('find . -name "*.pyc" -delete')
run("venv/bin/pip install -U pip wheel setuptools")
run("venv/bin/pip install -r requirements.txt")
run("venv/bin/python manage.py migrate --noinput")
step("\nUploading static files...")
rsync_project(
local_dir="static/",
remote_dir="%(box_domain)s/static/" % env,
delete=("clear" in args),
)
step("\nCollecting static files...")
with cd("%(box_domain)s"):
run("venv/bin/python manage.py collectstatic --noinput")
step("\nRunning system checks on server...")
with cd("%(box_domain)s"):
run("venv/bin/python manage.py check --deploy")
step("\nRestarting server process...")
execute("server.restart")
execute("git.fetch_remote")
| bsd-3-clause | 5,284,025,365,114,822,000 | 27.5 | 79 | 0.633772 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/troubleshooting_result.py | 1 | 1610 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingResult(Model):
"""Troubleshooting information gained from specified resource.
:param start_time: The start time of the troubleshooting.
:type start_time: datetime
:param end_time: The end time of the troubleshooting.
:type end_time: datetime
:param code: The result code of the troubleshooting.
:type code: str
:param results: Information from troubleshooting.
:type results:
list[~azure.mgmt.network.v2017_09_01.models.TroubleshootingDetails]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'results': {'key': 'results', 'type': '[TroubleshootingDetails]'},
}
def __init__(self, **kwargs):
super(TroubleshootingResult, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.code = kwargs.get('code', None)
self.results = kwargs.get('results', None)
| mit | 3,085,018,237,102,909,400 | 38.268293 | 76 | 0.600621 | false |
ff0000/scarlet | scarlet/assets/tasks.py | 1 | 1893 | from . import get_asset_model, get_image_cropper
from . import settings
def optional_celery(**kparms):
name = kparms.pop('name', None)
def wrapped(func):
def inner(*args, **kw):
return func(*args, **kw)
return inner
if settings.USE_CELERY_DECORATOR:
from celery import task
wrapper = task(**kparms)
elif settings.CELERY:
wrapper = settings.CELERY.task(**kparms)
else:
wrapper = wrapped
return wrapper
@optional_celery(name='assets_ensure_crops')
def ensure_crops(asset_id, *required_crops, **kwargs):
asset = kwargs.pop('asset', None)
if not asset or asset_id:
asset = get_asset_model().objects.get(pk=asset_id)
required_crops = set(required_crops).union(
set(get_image_cropper().required_crops()))
crops = set(asset.imagedetail_set.all().values_list('name', flat=True))
needed = required_crops.difference(crops)
length = len(needed)
detail_mod = asset.imagedetail_set.model
for i, size in enumerate(needed):
last = i == (length-1)
spec = get_image_cropper().create_crop(size, asset.file)
detail_mod.save_crop_spec(asset, spec,
update_version=last)
@optional_celery(name='assets_reset_crops')
def reset_crops(asset_id, asset=None, **kwargs):
if not asset or asset_id:
asset = get_asset_model().objects.get(pk=asset_id)
crops = set(asset.imagedetail_set.values_list('name', flat=True))
crops = crops.union(set(get_image_cropper().required_crops()))
length = len(crops)
detail_mod = asset.imagedetail_set.model
for i, size in enumerate(crops):
last = i == (length-1)
spec = get_image_cropper().create_crop(size, asset.file)
detail_mod.save_crop_spec(asset, spec,
update_version=last)
| mit | -7,459,464,536,788,910,000 | 32.210526 | 75 | 0.618595 | false |
michalliu/syncthing-silk | generate_syncthing_strings.py | 1 | 2336 | #!/usr/bin/env python
import json
import sys
import os
from collections import OrderedDict
import re
import string
base_path = "syncthing/src/github.com/syncthing/syncthing"
langdir = "gui/assets/lang"
resdir = "app/src/main/res"
resfile = "strings-syncthing.xml"
validKeyChars = string.ascii_letters+string.digits+'_'
def fixup(lst):
nlst = []
for el in lst:
key, val = el
#print(key)
key = key.lower()
key = key.replace(' ','_')
key = key.replace('{%','')
key = key.replace('%}','')
key = ''.join(filter(lambda x: x in validKeyChars, key))
key = re.sub(r'_+',r'_',key)
key = re.sub(r'^_',r'',key)
key = re.sub(r'_$',r'',key)
#print(key)
#print(val)
#val = re.sub(r'\{\{.*?\}\}', r'%s',val)
rep = re.findall(r'\{\{.*?\}\}', val)
ii=1
for s in rep:
val = val.replace(s,'%'+str(ii)+'$s')
ii+=1
val = val.replace("'","\\'")
val = val.replace('"','\\"')
#print(val)
nlst.append((key,val))
return nlst
def sort_dict(d):
lst = sorted(fixup(d.items()), key=lambda x: x[0])
return OrderedDict(lst)
def write_opening(f):
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<!--AUTO GENERATED RESOURCE DO NOT EDIT!!!!-->\n')
f.write('<resources>\n')
def write_closing(f):
f.write('</resources>')
def write_xml(json, directory):
with open(os.path.join(directory,resfile),mode='w', encoding="utf-8") as f:
write_opening(f)
for key in json.keys():
val = json.get(key)
f.write(' <string name="%s">%s</string>\n' % (key,val))
write_closing(f)
realpath = os.path.join(base_path, langdir);
print(realpath)
for f in os.listdir(realpath):
langfile = os.path.join(realpath, f)
if not langfile.endswith("json"):
continue
print(langfile)
with open(os.path.join(base_path,langdir,f), encoding="utf-8") as jf:
j = json.load(jf);
if f.endswith("en.json"):
f = "lang"
f = re.sub(r'(lang-..)-(..)\.json',r'\1-r\2.json',f)
xmldir=os.path.join(resdir,f.replace("lang","values").replace(".json", ""))
print(xmldir)
if not os.path.exists(xmldir):
os.makedirs(xmldir);
write_xml(sort_dict(j),xmldir)
| gpl-3.0 | 1,702,889,957,937,964,500 | 27.487805 | 79 | 0.552654 | false |
morepath/morepath | morepath/autosetup.py | 1 | 8590 | """
This module defines functionality to automatically configure Morepath.
:func:`morepath.scan`, :func:`morepath.autoscan`
are part of the public API.
"""
import sys
import importscan
import importlib
import pkg_resources
from .error import AutoImportError
def scan(package=None, ignore=None, handle_error=None):
"""Scan package for configuration actions (decorators).
It scans by recursively importing the package and any modules
in it, including any sub-packages.
Register any found directives with their app classes.
:param package: The Python module or package to scan. Optional; if left
empty case the calling package is scanned.
:param ignore: A list of packages to ignore. Optional. Defaults to
``['.test', '.tests']``. See :func:`importscan.scan` for details.
:param handle_error: Optional error handling function. See
:func:`importscan.scan` for details.
"""
if package is None:
package = caller_package()
if ignore is None:
ignore = [".test", ".tests"]
importscan.scan(package, ignore, handle_error)
def autoscan(ignore=None):
"""Automatically load Morepath configuration from packages.
Morepath configuration consists of decorator calls on :class:`App`
instances, i.e. ``@App.view()`` and ``@App.path()``.
This function tries to load needed Morepath configuration from all
packages automatically. This only works if:
- The package is made available using a ``setup.py`` file.
- The package or a dependency of the package includes
``morepath`` in the ``install_requires`` list of the
``setup.py`` file.
- The ``setup.py`` name is the same as the name of the
distributed package or module. For example: if the module
inside the package is named ``myapp`` the package must be named
``myapp`` as well (not ``my-app`` or ``MyApp``).
If the ``setup.py`` name differs from the package name, it's
possible to specify the module morepath should scan using entry
points::
setup(name='some-package',
...
install_requires=[
'setuptools',
'morepath'
],
entry_points={
'morepath': [
'scan = somepackage',
]
})
This function simply recursively imports everything in those packages,
except for test directories.
In addition to calling this function you can also import modules
that use Morepath directives manually, and you can use
:func:`scan` to automatically import everything in a
single package.
Typically called immediately after startup just before the
application starts serving using WSGI.
``autoscan`` always ignores ``.test`` and ``.tests``
sub-packages -- these are assumed never to contain useful Morepath
configuration and are not scanned.
``autoscan`` can fail with an ``ImportError`` when it tries to
scan code that imports an optional dependency that is not
installed. This happens most commonly in test code, which often
rely on test-only dependencies such as ``pytest`` or ``nose``. If
those tests are in a ``.test`` or ``.tests`` sub-package they
are automatically ignored, however.
If you have a special package with such expected import errors,
you can exclude them from ``autoscan`` using the ``ignore``
argument, for instance using ``['special_package']``. You then can
use :func:`scan` for that package, with a custom
``ignore`` argument that excludes the modules that generate import
errors.
See also :func:`scan`.
:param ignore: ignore to ignore some modules
during scanning. Optional. If ommitted, ignore ``.test`` and
``.tests`` packages by default. See :func:`importscan.scan` for
more details.
"""
if ignore is None:
ignore = []
ignore.extend([".test", ".tests"])
for package in morepath_packages():
importscan.scan(package, ignore)
def morepath_packages():
"""Iterable of modules that depend on morepath. Each such module is
imported before it is returned.
If the ``setup.py`` name differs from the name of the distributed
package or module, the import will fail. See
:func:`morepath.autoscan` for more information.
:return: Iterable of modules that depend on Morepath. The modules
are typically the ``__init__.py`` modules of packages as declared
by ``setup.py``.
"""
m = DependencyMap()
m.load()
for distribution in m.relevant_dists("morepath"):
yield import_package(distribution)
def import_package(distribution):
"""
Takes a pkg_resources distribution and loads the module contained
in it, if it matches the rules layed out in :func:`morepath.autoscan`.
"""
try:
return importlib.import_module(get_module_name(distribution))
except ImportError:
raise AutoImportError(distribution.project_name)
class DependencyMap:
"""A registry that tracks dependencies between distributions.
Used by :func:`morepath_packages` to find installed Python distributions
that depend on Morepath, directly or indirectly.
"""
def __init__(self):
self._d = {}
def load(self):
"""Fill the registry with dependency information."""
for dist in pkg_resources.working_set:
for r in dist.requires():
self._d.setdefault(dist.project_name, set()).add(r.project_name)
def depends(self, project_name, on_project_name, visited=None):
"""Check whether project transitively depends on another.
A project depends on another project if it directly or
indirectly requires the other project.
:param project_name: Python distribution name.
:param on_project_name: Python distribution name it depends on.
:return: True if ``project_name`` depends on ``on_project_name``.
"""
dependent_project_names = self._d.get(project_name, set())
if on_project_name in dependent_project_names:
return True
visited = visited or set()
for n in dependent_project_names:
if n in visited:
continue
visited.add(n)
if self.depends(n, on_project_name, visited):
return True
return False
def relevant_dists(self, on_project_name):
"""Iterable of distributions that depend on project.
Dependency is transitive.
:param on_project_name: Python distribution name
:return: iterable of Python distribution objects that depend on
project
"""
for dist in pkg_resources.working_set:
if not self.depends(dist.project_name, on_project_name):
continue
yield dist
def get_module_name(distribution):
"""Determines the module name to import from the given distribution.
If an entry point named ``scan`` is found in the group ``morepath``,
it's value is used. If not, the project_name is used.
See :func:`morepath.autoscan` for details and an example.
"""
if hasattr(distribution, "get_entry_map"):
entry_points = distribution.get_entry_map("morepath")
else:
entry_points = None
if entry_points and "scan" in entry_points:
return entry_points["scan"].module_name
# use normal setuptools project name.
# setuptools has the nasty habit to turn _ in package names
# into -. We turn them back again.
return distribution.project_name.replace("-", "_")
# taken from pyramid.path
def caller_module(level=2):
"""Give module where calling function is defined.
:level: levels deep to look up the stack frame
:return: a Python module
"""
module_globals = sys._getframe(level).f_globals
module_name = module_globals.get("__name__") or "__main__"
module = sys.modules[module_name]
return module
def caller_package(level=2):
"""Give package where calling function is defined.
:level: levels deep to look up the stack frame
:return: a Python module (representing the ``__init__.py`` of a package)
"""
# caller_module in arglist for tests
module = caller_module(level + 1)
f = getattr(module, "__file__", "")
if ("__init__.py" in f) or ("__init__$py" in f): # empty at >>>
# Module is a package
return module
# Go up one level to get package
package_name = module.__name__.rsplit(".", 1)[0]
return sys.modules[package_name]
| bsd-3-clause | 3,834,593,244,588,918,300 | 32.554688 | 80 | 0.65553 | false |
mlperf/inference_results_v0.5 | closed/NVIDIA/code/resnet/tensorrt/network_search.py | 1 | 7710 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
from code.common import logging
class network_search(object):
def __init__(self, network):
self.tensors = set()
self.tensorReads = {} # map: tensor -> list of layers (read)
self.layerWrites = {} # map: layer -> list of tensors (written)
for i in range(network.num_layers):
layer = network.get_layer(i)
if layer.type != trt.LayerType.CONSTANT:
writes = []
self.layerWrites[layer] = writes
for i in range(layer.num_outputs):
tensor = layer.get_output(i)
self.tensors.add(tensor)
writes.append(tensor)
for i in range(layer.num_inputs):
tensor = layer.get_input(i)
self.tensors.add(tensor)
reads = self.tensorReads.get(tensor)
if reads is None:
reads = [layer]
self.tensorReads[tensor] = reads
else:
reads.append(layer)
for tensor in self.tensors:
if self.tensorReads.get(tensor) is None:
self.tensorReads[tensor] = []
@staticmethod
def print_match(pattern, match):
for node in pattern:
key = node["name"]
value = match[key]
if isinstance(value, trt.ILayer):
logging.info(key + "=" + match[key].name)
else:
logging.info(key + "=" + value.__str__())
@staticmethod
def print_matches(pattern, matches):
matchNumber = 1
if isinstance(matches, list):
for match in matches:
logging.info("Match number:", matchNumber)
network_search.print_match(pattern, match)
logging.info()
matchNumber = matchNumber+1
else:
print_match(pattern + "=" + match)
def match_tensor(self, tensor, values):
channels = values.get("channels")
if channels is not None:
if len(tensor.shape)==0:
return False
if channels!=tensor.shape[0]:
return False
return True
def match_convolution_layer(self, convolution_layer, values):
return True
def match_scale_layer(self, scale_layer, values):
return True
def match_activation_layer(self, activation_layer, values):
subtype = values.get("subtype")
if subtype is not None and subtype!=activation_layer.type:
return False
return True
def match_element_wise_layer(self, element_wise_layer, values):
op = values.get("op")
if op is not None and op!=element_wise_layer.op:
return False
return True
def match(self, current, search, key, state):
entry = search[key]
type = entry["type"]
if isinstance(current, trt.ITensor):
if isinstance(type, trt.LayerType):
if len(self.tensorReads[current])!=1:
return False
return self.match(self.tensorReads[current][0], search, key, state)
else:
if not self.match_tensor(current, entry):
return False
children = entry.get("children")
if children is not None:
if isinstance(children, str):
children = [children]
if not self.pair_match(self.tensorReads[current], search, children, state):
return False
# fall through
elif isinstance(current, trt.ILayer):
current.__class__ = trt.ILayer
layerType = current.type
if not isinstance(type, trt.LayerType) or layerType!=type:
return False
#
# For this example, I only need to match a few layer types, if more are required, please extend
#
if layerType==trt.LayerType.CONVOLUTION:
current.__class__ = trt.IConvolutionLayer;
if not self.match_convolution_layer(current, entry):
return False
elif layerType==trt.LayerType.SCALE:
current.__class__ = trt.IScaleLayer
if not self.match_scale_layer(current, entry):
return False
elif layerType==trt.LayerType.ACTIVATION:
current.__class__ = trt.IActivationLayer
if not self.match_activation_layer(current, entry):
return False
elif layerType==trt.LayerType.ELEMENTWISE:
current.__class__ = trt.IElementWiseLayer
if not self.match_element_wise_layer(current, entry):
return False
else:
raise Exception("Layer type not implemented")
children = entry.get("children")
if children is not None:
if isinstance(children, str):
children = [children];
if not self.pair_match(self.layerWrites[current], search, children, state):
return False
# fall through
else:
raise Exception("Unexpected type: " + current.__class__.__name__)
join = state.get(key)
if join is None:
state[key] = current
else:
if join!=current:
return False
return True
def pair_match(self, currentList, search, keyList, state):
# each "key" criteria must uniquely match exactly one "current", i.e., a bijection from keys to currents.
if len(currentList)!=len(keyList):
return False
matchSet = set()
bijectionMap = {}
for key in keyList:
count = 0
for current in currentList:
copy = state.copy()
if self.match(current, search, key, copy):
count = count+1
matchSet.add(current)
bijectionMap[key] = current
if count!=1:
return False
if len(matchSet)!=len(currentList):
return False
for key,current in bijectionMap.items():
self.match(current, search, key, state)
return True
@staticmethod
def search(network, pattern, singleMatch = False):
engine = network_search(network)
patternDictionary = {}
for entry in pattern:
patternDictionary[entry["name"]] = entry
logging.info(patternDictionary)
results = []
for tensor in engine.tensors:
state = {}
if engine.match(tensor, patternDictionary, "input", state):
if singleMatch:
return state
results.append(state)
if singleMatch:
return None
return results
| apache-2.0 | 8,873,584,699,447,280,000 | 37.55 | 113 | 0.540208 | false |
google/grr | grr/server/grr_response_server/databases/db.py | 1 | 158455 | #!/usr/bin/env python
"""The GRR relational database abstraction.
This defines the Database abstraction, which defines the methods used by GRR on
a logical relational database model.
"""
import abc
import collections
import re
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Text
from typing import Tuple
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import time_utils
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_server import fleet_utils
from grr_response_server import foreman_rules
from grr_response_server.rdfvalues import cronjobs as rdf_cronjobs
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
CLIENT_STATS_RETENTION = rdfvalue.Duration.From(31, rdfvalue.DAYS)
# Use 254 as max length for usernames to allow email addresses.
MAX_USERNAME_LENGTH = 254
MAX_LABEL_LENGTH = 100
MAX_ARTIFACT_NAME_LENGTH = 100
MAX_CRON_JOB_ID_LENGTH = 100
MAX_MESSAGE_HANDLER_NAME_LENGTH = 128
_MAX_GRR_VERSION_LENGTH = 100
_MAX_CLIENT_PLATFORM_LENGTH = 100
_MAX_CLIENT_PLATFORM_RELEASE_LENGTH = 200
# Using sys.maxsize may not work with real database implementations. We need
# to have a reasonably large number that can be used to read all the records
# using a particular DB API call.
MAX_COUNT = 1024**3
CLIENT_IDS_BATCH_SIZE = 500000
_EMAIL_REGEX = re.compile(r"[^@]+@([^@]+)$")
MAX_EMAIL_LENGTH = 255
class Error(Exception):
"""Base exception class for DB exceptions."""
# Python exception constructors should be able to handle arbitrary amount
# of arguments if they want to stay pickleable.
# See:
# https://stackoverflow.com/questions/41808912/cannot-unpickle-exception-subclass
#
# Database exceptions have to be pickleable in order to make self-contained
# E2E testing with SharedMemoryDB possible (as SharedMemoryDB server has to
# pass serialized exception objects back to SharedMemoryDB clients running
# as separate processes - see test/grr_response_test/run_self_contained.py for
# more details). Consequently, __init__ passes all the positional arguments
# through and accepts "cause" as a keyword argument.
#
# Exceptions inherited from Error are expected to call Error's constructor
# with all positional arguments they've received and set self.message to
# a custom message (in case they need one).
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.cause = kwargs.get("cause")
self.message = None
def __str__(self):
message = self.message or super().__str__()
if self.cause is not None:
return "%s: %s" % (message, self.cause)
return message
class NotFoundError(Error):
pass
class UnknownArtifactError(NotFoundError):
"""An exception class for errors about unknown artifacts.
Attributes:
name: A name of the non-existing artifact that was referenced.
cause: An (optional) exception instance that triggered this error.
"""
def __init__(self, name, cause=None):
super().__init__(name, cause=cause)
self.name = name
self.message = "Artifact with name '%s' does not exist" % self.name
class DuplicatedArtifactError(Error):
"""An exception class for errors about duplicated artifacts being written.
Attributes:
name: A name of the artifact that was referenced.
cause: An (optional) exception instance that triggered this error.
"""
def __init__(self, name, cause=None):
super().__init__(name, cause=cause)
self.name = name
self.message = "Artifact with name '%s' already exists" % self.name
class UnknownClientError(NotFoundError):
""""An exception class representing errors about uninitialized client.
Attributes:
client_id: An id of the non-existing client that was referenced.
cause: An (optional) exception instance that triggered the unknown client
error.
"""
def __init__(self, client_id, cause=None):
super().__init__(client_id, cause=cause)
self.client_id = client_id
self.message = "Client with id '%s' does not exist" % self.client_id
class AtLeastOneUnknownClientError(UnknownClientError):
def __init__(self, client_ids, cause=None):
super().__init__(client_ids, cause=cause)
self.client_ids = client_ids
self.message = "At least one client in '%s' does not exist" % ",".join(
client_ids)
class UnknownPathError(NotFoundError):
"""An exception class representing errors about unknown paths.
Attributes:
client_id: An id of the client for which the path does not exists.
path_type: A type of the path.
path_id: An id of the path.
"""
def __init__(self, client_id, path_type, components, cause=None):
super().__init__(client_id, path_type, components, cause=cause)
self.client_id = client_id
self.path_type = path_type
self.components = components
self.message = "Path '%s' of type '%s' on client '%s' does not exist"
self.message %= ("/".join(self.components), self.path_type, self.client_id)
class AtLeastOneUnknownPathError(NotFoundError):
"""An exception class raised when one of a set of paths is unknown."""
def __init__(self, client_path_ids, cause=None):
super().__init__(client_path_ids, cause=cause)
self.client_path_ids = client_path_ids
self.message = "At least one of client path ids does not exist: "
self.message += ", ".join(str(cpid) for cpid in self.client_path_ids)
class NotDirectoryPathError(NotFoundError):
"""An exception class raised when a path corresponds to a non-directory."""
def __init__(self, client_id, path_type, components, cause=None):
super().__init__(client_id, path_type, components, cause=cause)
self.client_id = client_id
self.path_type = path_type
self.components = components
self.message = ("Listing descendants of path '%s' of type '%s' on client "
"'%s' that is not a directory")
self.message %= ("/".join(self.components), self.path_type, self.client_id)
class UnknownRuleError(NotFoundError):
pass
class UnknownGRRUserError(NotFoundError):
"""An error thrown when no user is found for a given username."""
def __init__(self, username, cause=None):
super().__init__(username, cause=cause)
self.username = username
self.message = "Cannot find user with username %r" % self.username
class UnknownApprovalRequestError(NotFoundError):
pass
class UnknownCronJobError(NotFoundError):
pass
class UnknownCronJobRunError(NotFoundError):
pass
class UnknownSignedBinaryError(NotFoundError):
"""Exception raised when a signed binary isn't found in the DB."""
def __init__(self, binary_id, cause=None):
"""Initializes UnknownSignedBinaryError.
Args:
binary_id: rdf_objects.SignedBinaryID for the signed binary.
cause: A lower-level Exception raised by the database driver, which might
have more details about the error.
"""
super().__init__(binary_id, cause=cause)
self.binary_id = binary_id
self.message = ("Signed binary of type %s and path %s was not found" %
(self.binary_id.binary_type, self.binary_id.path))
class UnknownFlowError(NotFoundError):
def __init__(self, client_id, flow_id, cause=None):
super().__init__(client_id, flow_id, cause=cause)
self.client_id = client_id
self.flow_id = flow_id
self.message = ("Flow with client id '%s' and flow id '%s' does not exist" %
(self.client_id, self.flow_id))
class UnknownScheduledFlowError(NotFoundError):
"""Raised when a nonexistent ScheduledFlow is accessed."""
def __init__(self, client_id, creator, scheduled_flow_id, cause=None):
super().__init__(client_id, creator, scheduled_flow_id, cause=cause)
self.client_id = client_id
self.creator = creator
self.scheduled_flow_id = scheduled_flow_id
self.message = "ScheduledFlow {}/{}/{} does not exist.".format(
self.client_id, self.creator, self.scheduled_flow_id)
class UnknownHuntError(NotFoundError):
def __init__(self, hunt_id, cause=None):
super().__init__(hunt_id, cause=cause)
self.hunt_id = hunt_id
self.message = "Hunt with hunt id '%s' does not exist" % self.hunt_id
class DuplicatedHuntError(Error):
def __init__(self, hunt_id, cause=None):
message = "Hunt with hunt id '{}' already exists".format(hunt_id)
super().__init__(message, cause=cause)
self.hunt_id = hunt_id
class UnknownHuntOutputPluginStateError(NotFoundError):
def __init__(self, hunt_id, state_index):
super().__init__(hunt_id, state_index)
self.hunt_id = hunt_id
self.state_index = state_index
self.message = ("Hunt output plugin state for hunt '%s' with "
"index %d does not exist" %
(self.hunt_id, self.state_index))
class AtLeastOneUnknownFlowError(NotFoundError):
def __init__(self, flow_keys, cause=None):
super().__init__(flow_keys, cause=cause)
self.flow_keys = flow_keys
self.message = ("At least one flow with client id/flow_id in '%s' "
"does not exist" % (self.flow_keys))
class UnknownFlowRequestError(NotFoundError):
"""Raised when a flow request is not found."""
def __init__(self, client_id, flow_id, request_id, cause=None):
super().__init__(client_id, flow_id, request_id, cause=cause)
self.client_id = client_id
self.flow_id = flow_id
self.request_id = request_id
self.message = (
"Flow request %d for flow with client id '%s' and flow id '%s' "
"does not exist" % (self.request_id, self.client_id, self.flow_id))
class AtLeastOneUnknownRequestError(NotFoundError):
def __init__(self, request_keys, cause=None):
super().__init__(request_keys, cause=cause)
self.request_keys = request_keys
self.message = ("At least one request with client id/flow_id/request_id in "
"'%s' does not exist" % (self.request_keys))
class ParentHuntIsNotRunningError(Error):
"""Exception indicating that a hunt-induced flow is not processable."""
def __init__(self, client_id, flow_id, hunt_id, hunt_state):
super().__init__(client_id, flow_id, hunt_id, hunt_state)
self.client_id = client_id
self.flow_id = flow_id
self.hunt_id = hunt_id
self.hunt_state = hunt_state
self.message = (
"Parent hunt %s of the flow with client id '%s' and "
"flow id '%s' is not running: %s" %
(self.hunt_id, self.client_id, self.flow_id, self.hunt_state))
class HuntOutputPluginsStatesAreNotInitializedError(Error):
"""Exception indicating that hunt output plugin states weren't initialized."""
def __init__(self, hunt_obj):
super().__init__(hunt_obj)
self.hunt_obj = hunt_obj
self.message = ("Hunt %r has output plugins but no output plugins states. "
"Make sure it was created with hunt.CreateHunt and not "
"simply written to the database." % self.hunt_obj)
class ConflictingUpdateFlowArgumentsError(Error):
"""Raised when UpdateFlow is called with conflicting parameter."""
def __init__(self, client_id, flow_id, param_name):
super().__init__(client_id, flow_id, param_name)
self.client_id = client_id
self.flow_id = flow_id
self.param_name = param_name
self.message = ("Conflicting parameter when updating flow "
"%s (client %s). Can't call UpdateFlow with "
"flow_obj and %s passed together." %
(flow_id, client_id, param_name))
class FlowExistsError(Error):
"""Raised when an insertion fails because the Flow already exists."""
def __init__(self, client_id, flow_id):
super().__init__("Flow {}/{} already exists.".format(client_id, flow_id))
self.client_id = client_id
self.flow_id = flow_id
class StringTooLongError(ValueError):
"""Validation error raised if a string is too long."""
# TODO(user): migrate to Python 3 enums as soon as Python 3 is default.
class HuntFlowsCondition(object):
"""Constants to be used with ReadHuntFlows/CountHuntFlows methods."""
UNSET = 0
FAILED_FLOWS_ONLY = 1
SUCCEEDED_FLOWS_ONLY = 2
COMPLETED_FLOWS_ONLY = 3
FLOWS_IN_PROGRESS_ONLY = 4
CRASHED_FLOWS_ONLY = 5
@classmethod
def MaxValue(cls):
return cls.CRASHED_FLOWS_ONLY
HuntCounters = collections.namedtuple("HuntCounters", [
"num_clients",
"num_successful_clients",
"num_failed_clients",
"num_clients_with_results",
"num_crashed_clients",
"num_running_clients",
"num_results",
"total_cpu_seconds",
"total_network_bytes_sent",
])
FlowStateAndTimestamps = collections.namedtuple("FlowStateAndTimestamps", [
"flow_state",
"create_time",
"last_update_time",
])
class ClientPath(object):
"""An immutable class representing certain path on a given client.
Attributes:
client_id: A client to which the path belongs to.
path_type: A type of the path.
components: A tuple of path components.
basename: A basename of the path.
path_id: A path id of the path (corresponding to the path components).
"""
def __init__(self, client_id, path_type, components):
precondition.ValidateClientId(client_id)
_ValidateEnumType(path_type, rdf_objects.PathInfo.PathType)
_ValidatePathComponents(components)
self._repr = (client_id, path_type, tuple(components))
@classmethod
def OS(cls, client_id, components):
path_type = rdf_objects.PathInfo.PathType.OS
return cls(client_id=client_id, path_type=path_type, components=components)
@classmethod
def TSK(cls, client_id, components):
path_type = rdf_objects.PathInfo.PathType.TSK
return cls(client_id=client_id, path_type=path_type, components=components)
@classmethod
def NTFS(cls, client_id, components):
path_type = rdf_objects.PathInfo.PathType.NTFS
return cls(client_id=client_id, path_type=path_type, components=components)
@classmethod
def Registry(cls, client_id, components):
path_type = rdf_objects.PathInfo.PathType.REGISTRY
return cls(client_id=client_id, path_type=path_type, components=components)
@classmethod
def Temp(cls, client_id, components):
path_type = rdf_objects.PathInfo.PathType.Temp
return cls(client_id=client_id, path_type=path_type, components=components)
@classmethod
def FromPathSpec(cls, client_id, path_spec):
path_info = rdf_objects.PathInfo.FromPathSpec(path_spec)
return cls.FromPathInfo(client_id, path_info)
@classmethod
def FromPathInfo(cls, client_id, path_info):
return cls(
client_id=client_id,
path_type=path_info.path_type,
components=tuple(path_info.components))
@property
def client_id(self):
return self._repr[0]
@property
def path_type(self):
return self._repr[1]
@property
def components(self):
return self._repr[2]
@property
def path_id(self) -> rdf_objects.PathID:
return rdf_objects.PathID.FromComponents(self.components)
@property
def vfs_path(self):
return rdf_objects.ToCategorizedPath(self.path_type, self.components)
@property
def basename(self):
return self.components[-1]
def __eq__(self, other):
if not isinstance(other, ClientPath):
return NotImplemented
return self._repr == other._repr # pylint: disable=protected-access
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._repr)
def Path(self):
return "/".join(self.components)
def __repr__(self):
return "<%s client_id=%r path_type=%r components=%r>" % (
compatibility.GetName(
self.__class__), self.client_id, self.path_type, self.components)
class ClientPathHistory(object):
"""A class representing stat and hash history for some path."""
def __init__(self):
self.stat_entries = {}
self.hash_entries = {}
def AddStatEntry(self, timestamp, stat_entry):
precondition.AssertType(timestamp, rdfvalue.RDFDatetime)
precondition.AssertType(stat_entry, rdf_client_fs.StatEntry)
self.stat_entries[timestamp] = stat_entry
def AddHashEntry(self, timestamp, hash_entry):
precondition.AssertType(timestamp, rdfvalue.RDFDatetime)
precondition.AssertType(hash_entry, rdf_crypto.Hash)
self.hash_entries[timestamp] = hash_entry
class Database(metaclass=abc.ABCMeta):
"""The GRR relational database abstraction."""
unchanged = "__unchanged__"
@abc.abstractmethod
def Now(self) -> rdfvalue.RDFDatetime:
"""Retrieves current time as reported by the database."""
@abc.abstractmethod
def WriteArtifact(self, artifact):
"""Writes new artifact to the database.
Args:
artifact: An `rdf_artifacts.Artifact` instance to write.
"""
# TODO(hanuszczak): Consider removing this method if it proves to be useless
# after the artifact registry refactoring.
@abc.abstractmethod
def ReadArtifact(self, name):
"""Looks up an artifact with given name from the database.
Args:
name: A name of the artifact to return.
Raises:
UnknownArtifactError: If an artifact with given name does not exist.
"""
@abc.abstractmethod
def ReadAllArtifacts(self):
"""Lists all artifacts that are stored in the database.
Returns:
A list of artifacts stored in the database.
"""
@abc.abstractmethod
def DeleteArtifact(self, name):
"""Deletes an artifact with given name from the database.
Args:
name: A name of the artifact to delete.
Raises:
UnknownArtifactError: If an artifact with given name does not exist.
"""
@abc.abstractmethod
def WriteClientMetadata(
self,
client_id,
certificate=None,
fleetspeak_enabled=None,
first_seen=None,
last_ping=None,
last_clock=None,
last_ip=None,
last_foreman=None,
fleetspeak_validation_info: Optional[Dict[str, str]] = None):
"""Write metadata about the client.
Updates one or more client metadata fields for the given client_id. Any of
the data fields can be left as None, and in this case are not changed.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
certificate: If set, should be an rdfvalues.crypto.RDFX509 protocol
buffer. Normally only set during initial client record creation.
fleetspeak_enabled: A bool, indicating whether the client is connecting
through Fleetspeak. Normally only set during initial client record
creation.
first_seen: An rdfvalue.Datetime, indicating the first time the client
contacted the server.
last_ping: An rdfvalue.Datetime, indicating the last time the client
contacted the server.
last_clock: An rdfvalue.Datetime, indicating the last client clock time
reported to the server.
last_ip: An rdfvalues.client.NetworkAddress, indicating the last observed
ip address for the client.
last_foreman: An rdfvalue.Datetime, indicating the last time that the
client sent a foreman message to the server.
fleetspeak_validation_info: A dict with validation info from Fleetspeak.
"""
def DeleteClient(self, client_id):
"""Deletes a client with all associated metadata.
This method is a stub. Deletion is not yet supported.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
"""
# TODO: Cascaded deletion of data is only implemented in MySQL
# yet. When the functionality for deleting clients is required, make sure to
# delete all associated metadata (history, stats, flows, messages, ...).
raise NotImplementedError("Deletetion of Clients is not yet implemented.")
@abc.abstractmethod
def MultiReadClientMetadata(self, client_ids):
"""Reads ClientMetadata records for a list of clients.
Note: client ids not found in the database will be omitted from the
resulting map.
Args:
client_ids: A collection of GRR client id strings, e.g.
["C.ea3b2b71840d6fa7", "C.ea3b2b71840d6fa8"]
Returns:
A map from client_id to rdfvalues.object.ClientMetadata.
"""
def ReadClientMetadata(self, client_id):
"""Reads the ClientMetadata record for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
An rdfvalues.object.ClientMetadata object.
Raises:
UnknownClientError: if no client with corresponding id was found.
"""
result = self.MultiReadClientMetadata([client_id])
try:
return result[client_id]
except KeyError:
raise UnknownClientError(client_id)
@abc.abstractmethod
def WriteClientSnapshot(self, client):
"""Writes new client snapshot.
Writes a new snapshot of the client to the client history, typically saving
the results of an interrogate flow.
Args:
client: An rdfvalues.objects.ClientSnapshot. Will be saved at the
"current" timestamp.
Raises:
UnknownClientError: The client_id is not known yet.
"""
@abc.abstractmethod
def MultiReadClientSnapshot(self, client_ids):
"""Reads the latest client snapshots for a list of clients.
Args:
client_ids: a collection of GRR client ids, e.g. ["C.ea3b2b71840d6fa7",
"C.ea3b2b71840d6fa8"]
Returns:
A map from client_id to rdfvalues.objects.ClientSnapshot.
"""
def ReadClientSnapshot(self, client_id):
"""Reads the latest client snapshot for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
An rdfvalues.objects.ClientSnapshot object.
"""
return self.MultiReadClientSnapshot([client_id]).get(client_id)
@abc.abstractmethod
def MultiReadClientFullInfo(self, client_ids, min_last_ping=None):
"""Reads full client information for a list of clients.
Note: client ids not found in the database will be omitted from the
resulting map.
Args:
client_ids: a collection of GRR client ids, e.g. ["C.ea3b2b71840d6fa7",
"C.ea3b2b71840d6fa8"]
min_last_ping: If not None, only the clients with last ping time bigger
than min_last_ping will be returned.
Returns:
A map from client ids to `ClientFullInfo` instance.
"""
def ReadClientFullInfo(self, client_id):
"""Reads full client information for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
A `ClientFullInfo` instance for given client.
Raises:
UnknownClientError: if no client with such id was found.
"""
result = self.MultiReadClientFullInfo([client_id])
try:
return result[client_id]
except KeyError:
raise UnknownClientError(client_id)
def ReadAllClientIDs(self,
min_last_ping=None,
batch_size=CLIENT_IDS_BATCH_SIZE):
"""Yields lists of client ids for all clients in the database.
Args:
min_last_ping: If provided, only ids for clients with a last-ping
timestamp newer than (or equal to) the given value will be returned.
batch_size: Integer, specifying the number of client ids to be queried at
a time.
Yields:
Lists of client IDs.
"""
for results in self.ReadClientLastPings(
min_last_ping=min_last_ping, batch_size=batch_size):
yield list(results.keys())
@abc.abstractmethod
def ReadClientLastPings(self,
min_last_ping=None,
max_last_ping=None,
fleetspeak_enabled=None,
batch_size=CLIENT_IDS_BATCH_SIZE):
"""Yields dicts of last-ping timestamps for clients in the DB.
Args:
min_last_ping: The minimum timestamp to fetch from the DB.
max_last_ping: The maximum timestamp to fetch from the DB.
fleetspeak_enabled: If set to True, only return data for
Fleetspeak-enabled clients. If set to False, only return ids for
non-Fleetspeak-enabled clients. If not set, return ids for both
Fleetspeak-enabled and non-Fleetspeak-enabled clients.
batch_size: Integer, specifying the number of client pings to be queried
at a time.
Yields:
Dicts mapping client ids to their last-ping timestamps.
"""
@abc.abstractmethod
def WriteClientSnapshotHistory(self, clients):
"""Writes the full history for a particular client.
Args:
clients: A list of client objects representing snapshots in time. Each
object should have a `timestamp` attribute specifying at which point
this snapshot was taken. All clients should have the same client id.
Raises:
AttributeError: If some client does not have a `timestamp` attribute.
TypeError: If clients are not instances of `objects.ClientSnapshot`.
ValueError: If client list is empty or clients have non-uniform ids.
"""
@abc.abstractmethod
def ReadClientSnapshotHistory(self, client_id, timerange=None):
"""Reads the full history for a particular client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
timerange: Should be either a tuple of (from, to) or None. "from" and to"
should be rdfvalue.RDFDatetime or None values (from==None means "all
record up to 'to'", to==None means all records from 'from'). If both
"to" and "from" are None or the timerange itself is None, all history
items are fetched. Note: "from" and "to" are inclusive: i.e. a from <=
time <= to condition is applied.
Returns:
A list of rdfvalues.objects.ClientSnapshot, newest snapshot first.
"""
@abc.abstractmethod
def WriteClientStartupInfo(self, client_id, startup_info):
"""Writes a new client startup record.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
startup_info: An rdfvalues.client.StartupInfo object. Will be saved at the
"current" timestamp.
Raises:
UnknownClientError: The client_id is not known yet.
"""
@abc.abstractmethod
def ReadClientStartupInfo(self,
client_id: str) -> Optional[rdf_client.StartupInfo]:
"""Reads the latest client startup record for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
An rdfvalues.client.StartupInfo object.
"""
@abc.abstractmethod
def ReadClientStartupInfoHistory(self, client_id, timerange=None):
"""Reads the full startup history for a particular client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
timerange: Should be either a tuple of (from, to) or None. "from" and to"
should be rdfvalue.RDFDatetime or None values (from==None means "all
record up to 'to'", to==None means all records from 'from'). If both
"to" and "from" are None or the timerange itself is None, all history
items are fetched. Note: "from" and "to" are inclusive: i.e. a from <=
time <= to condition is applied.
Returns:
A list of rdfvalues.client.StartupInfo objects sorted by timestamp,
newest entry first.
"""
@abc.abstractmethod
def WriteClientCrashInfo(self, client_id, crash_info):
"""Writes a new client crash record.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
crash_info: An rdfvalues.objects.ClientCrash object. Will be saved at the
"current" timestamp.
Raises:
UnknownClientError: The client_id is not known yet.
"""
@abc.abstractmethod
def ReadClientCrashInfo(self, client_id):
"""Reads the latest client crash record for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
An rdfvalues.client.ClientCrash object.
"""
@abc.abstractmethod
def ReadClientCrashInfoHistory(self, client_id):
"""Reads the full crash history for a particular client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
A list of rdfvalues.client.ClientCrash objects sorted by timestamp,
newest entry first.
"""
@abc.abstractmethod
def AddClientKeywords(self, client_id: Text,
keywords: Iterable[Text]) -> None:
"""Associates the provided keywords with the client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
keywords: An iterable container of keyword strings to write.
Raises:
UnknownClientError: The client_id is not known yet.
"""
@abc.abstractmethod
def ListClientsForKeywords(
self,
keywords: Iterable[Text],
start_time: Optional[rdfvalue.RDFDatetime] = None
) -> Dict[Text, List[Text]]:
"""Lists the clients associated with keywords.
Args:
keywords: An iterable container of keyword strings to look for.
start_time: If set, should be an rdfvalue.RDFDatime and the function will
only return keywords associated after this time.
Returns:
A dict mapping each provided keyword to a potentially empty list of client
ids.
"""
@abc.abstractmethod
def RemoveClientKeyword(self, client_id: Text, keyword: Text) -> None:
"""Removes the association of a particular client to a keyword.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
keyword: The keyword to delete.
"""
@abc.abstractmethod
def AddClientLabels(self, client_id: Text, owner: Text,
labels: List[Text]) -> None:
"""Attaches a user label to a client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
owner: Username string that owns the created labels.
labels: The labels to attach as a list of strings.
"""
@abc.abstractmethod
def MultiReadClientLabels(
self,
client_ids: List[Text]) -> Dict[Text, List[rdf_objects.ClientLabel]]:
"""Reads the user labels for a list of clients.
Args:
client_ids: a collection of GRR client ids, e.g. ["C.ea3b2b71840d6fa7",
"C.ea3b2b71840d6fa8"]
Returns:
A map from client_id to a list of rdfvalue.objects.ClientLabel,
sorted by owner, label name.
"""
def ReadClientLabels(self, client_id: Text) -> List[rdf_objects.ClientLabel]:
"""Reads the user labels for a given client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
A list of rdfvalue.objects.ClientLabel for the given client,
sorted by owner, label name.
"""
return self.MultiReadClientLabels([client_id])[client_id]
@abc.abstractmethod
def RemoveClientLabels(self, client_id: Text, owner: Text,
labels: List[Text]) -> None:
"""Removes a list of user labels from a given client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
owner: Username string that owns the labels that should be removed.
labels: The labels to remove as a list of strings.
"""
@abc.abstractmethod
def ReadAllClientLabels(self) -> List[rdf_objects.ClientLabel]:
"""Lists all client labels known to the system.
Returns:
A list of rdfvalue.objects.ClientLabel values.
"""
@abc.abstractmethod
def WriteClientStats(self, client_id: Text,
stats: rdf_client_stats.ClientStats) -> None:
"""Stores a ClientStats instance.
If stats.create_time is unset, a copy of stats with create_time = now()
will be stored.
Stats are not stored if create_time is older than the retention period
db.CLIENT_STATS_RETENTION.
Any existing entry with identical client_id and create_time will be
overwritten.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
stats: an instance of rdfvalues.client_stats.ClientStats
"""
@abc.abstractmethod
def ReadClientStats(
self,
client_id: Text,
min_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> List[rdf_client_stats.ClientStats]:
"""Reads ClientStats for a given client and optional time range.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
min_timestamp: minimum rdfvalue.RDFDateTime (inclusive). If None,
ClientStats since the retention date will be returned.
max_timestamp: maximum rdfvalue.RDFDateTime (inclusive). If None,
ClientStats up to the current time will be returned.
Returns: A List of rdfvalues.client_stats.ClientStats instances, sorted by
create_time.
"""
@abc.abstractmethod
def DeleteOldClientStats(
self,
yield_after_count: int,
retention_time: Optional[rdfvalue.RDFDatetime] = None
) -> Generator[int, None, None]:
"""Deletes ClientStats older than a given timestamp.
This function yields after deleting at most `yield_after_count` ClientStats.
Args:
yield_after_count: A positive integer, representing the maximum number of
deleted entries, after which this function must yield to allow
heartbeats.
retention_time: An RDFDateTime representing the oldest create_time of
ClientStats that remains after deleting all older entries. If not
specified, defaults to Now() - db.CLIENT_STATS_RETENTION.
Yields:
The number of ClientStats that were deleted since the last yield.
"""
@abc.abstractmethod
def CountClientVersionStringsByLabel(
self, day_buckets: Set[int]) -> fleet_utils.FleetStats:
"""Computes client-activity stats for all GRR versions in the DB.
Stats are aggregated across the given time buckets, e.g. if the buckets
are {1, 7, 30}, stats will be calculated for 1-day-active, 7-day-active
and 30-day-active clients (according to clients' last-ping timestamps).
Args:
day_buckets: A set of integers, where each represents an n-day-active
bucket.
Returns:
A FleetStats object containing the results.
"""
@abc.abstractmethod
def CountClientPlatformsByLabel(
self, day_buckets: Set[int]) -> fleet_utils.FleetStats:
"""Computes client-activity stats for all client platforms in the DB.
Stats are aggregated across the given time buckets, e.g. if the buckets
are {1, 7, 30}, stats will be calculated for 1-day-active, 7-day-active
and 30-day-active clients (according to clients' last-ping timestamps).
Args:
day_buckets: A set of integers, where each represents an n-day-active
bucket.
Returns:
A FleetStats object containing the results.
"""
@abc.abstractmethod
def CountClientPlatformReleasesByLabel(
self, day_buckets: Set[int]) -> fleet_utils.FleetStats:
"""Computes client-activity stats for client OS-release strings in the DB.
Stats are aggregated across the given time buckets, e.g. if the buckets
are {1, 7, 30}, stats will be calculated for 1-day-active, 7-day-active
and 30-day-active clients (according to clients' last-ping timestamps).
Args:
day_buckets: A set of integers, where each represents an n-day-active
bucket.
Returns:
A FleetStats object containing the results.
"""
@abc.abstractmethod
def WriteForemanRule(self, rule):
"""Writes a foreman rule to the database.
Args:
rule: A foreman.ForemanRule object.
"""
@abc.abstractmethod
def RemoveForemanRule(self, hunt_id):
"""Removes a foreman rule from the database.
Args:
hunt_id: Hunt id of the rule that should be removed.
Raises:
UnknownRuleError: No rule with the given hunt_id exists.
"""
@abc.abstractmethod
def ReadAllForemanRules(self):
"""Reads all foreman rules from the database.
Returns:
A list of foreman.ForemanCondition objects.
"""
@abc.abstractmethod
def RemoveExpiredForemanRules(self):
"""Removes all expired foreman rules from the database."""
@abc.abstractmethod
def WriteGRRUser(self,
username,
password=None,
ui_mode=None,
canary_mode=None,
user_type=None,
email=None):
"""Writes user object for a user with a given name.
If a user with the given username exists, it is overwritten.
Args:
username: Name of a user to insert/update.
password: If set, should be a string with a new encrypted user password.
ui_mode: If set, should be a GUISettings.UIMode enum.
canary_mode: If not None, should be a boolean indicating user's preferred
canary mode setting.
user_type: GRRUser.UserType enum describing user type (unset, standard or
admin).
email: If set, E-Mail address overriding the default
<username>@<Logging.domain>.
"""
@abc.abstractmethod
def ReadGRRUser(self, username):
"""Reads a user object corresponding to a given name.
Args:
username: Name of a user.
Returns:
A rdfvalues.objects.GRRUser object.
Raises:
UnknownGRRUserError: if there's no user corresponding to the given name.
"""
@abc.abstractmethod
def ReadGRRUsers(self, offset=0, count=None):
"""Reads GRR users with optional pagination, sorted by username.
Args:
offset: An integer specifying an offset to be used when reading results.
count: Maximum number of users to return. If not provided, all users will
be returned (respecting offset).
Returns: A List of `objects.GRRUser` objects.
Raises:
ValueError: if offset or count are negative.
"""
@abc.abstractmethod
def CountGRRUsers(self):
"""Returns the total count of GRR users."""
@abc.abstractmethod
def DeleteGRRUser(self, username):
"""Deletes the user and all related metadata with the given username.
Args:
username: Username identifying the user.
Raises:
UnknownGRRUserError: if there is no user corresponding to the given name.
"""
@abc.abstractmethod
def WriteApprovalRequest(self, approval_request):
"""Writes an approval request object.
Args:
approval_request: rdfvalues.objects.ApprovalRequest object. Note:
approval_id and timestamps provided inside the argument object will be
ignored. Values generated by the database will be used instead.
Returns:
approval_id: String identifying newly created approval request.
Approval id is unique among approval ids for the same
username. I.e. there can be no 2 approvals with the same id
for the same username.
"""
@abc.abstractmethod
def ReadApprovalRequest(self, requestor_username, approval_id):
"""Reads an approval request object with a given id.
Args:
requestor_username: Username of the user who has requested the approval.
approval_id: String identifying approval request object.
Returns:
rdfvalues.objects.ApprovalRequest object.
Raises:
UnknownApprovalRequestError: if there's no corresponding approval request
object.
"""
@abc.abstractmethod
def ReadApprovalRequests(
self,
requestor_username,
approval_type,
subject_id=None,
include_expired=False) -> Sequence[rdf_objects.ApprovalRequest]:
"""Reads approval requests of a given type for a given user.
Args:
requestor_username: Username of the user who has requested the approval.
approval_type: Type of approvals to list.
subject_id: String identifying the subject (client id, hunt id or cron job
id). If not None, only approval requests for this subject will be
returned.
include_expired: If True, will also yield already expired approvals.
Returns:
A list of rdfvalues.objects.ApprovalRequest objects.
"""
@abc.abstractmethod
def GrantApproval(self, requestor_username, approval_id, grantor_username):
"""Grants approval for a given request using given username.
Args:
requestor_username: Username of the user who has requested the approval.
approval_id: String identifying approval request object.
grantor_username: String with a username of a user granting the approval.
"""
def IterateAllClientsFullInfo(self, min_last_ping=None, batch_size=50000):
"""Iterates over all available clients and yields full info protobufs.
Args:
min_last_ping: If not None, only the clients with last-ping timestamps
newer than (or equal to) min_last_ping will be returned.
batch_size: Always reads <batch_size> client full infos at a time.
Yields:
An rdfvalues.objects.ClientFullInfo object for each client in the db.
"""
for batch in self.ReadAllClientIDs(
min_last_ping=min_last_ping, batch_size=batch_size):
res = self.MultiReadClientFullInfo(batch)
for full_info in res.values():
yield full_info
def IterateAllClientSnapshots(self, min_last_ping=None, batch_size=50000):
"""Iterates over all available clients and yields client snapshot objects.
Args:
min_last_ping: If provided, only snapshots for clients with last-ping
timestamps newer than (or equal to) the given value will be returned.
batch_size: Always reads <batch_size> snapshots at a time.
Yields:
An rdfvalues.objects.ClientSnapshot object for each client in the db.
"""
for batch in self.ReadAllClientIDs(
min_last_ping=min_last_ping, batch_size=batch_size):
res = self.MultiReadClientSnapshot(batch)
for snapshot in res.values():
if snapshot:
yield snapshot
@abc.abstractmethod
def ReadPathInfo(self, client_id, path_type, components, timestamp=None):
"""Retrieves a path info record for a given path.
The `timestamp` parameter specifies for what moment in time the path
information is to be retrieved. For example, if (using abstract time units)
at time 1 the path was in state A, at time 5 it was observed to be in state
B and at time 8 it was in state C one wants to retrieve information at time
6 the result is going to be B.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path information for.
components: A tuple of path components of a path to retrieve path
information for.
timestamp: A moment in time for which we want to retrieve the information.
If none is provided, the latest known path information is returned.
Returns:
An `rdf_objects.PathInfo` instance.
"""
@abc.abstractmethod
def ReadPathInfos(self, client_id, path_type, components_list):
"""Retrieves path info records for given paths.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path information for.
components_list: An iterable of tuples of path components corresponding to
paths to retrieve path information for.
Returns:
A dictionary mapping path components to `rdf_objects.PathInfo` instances.
"""
def ListChildPathInfos(self,
client_id,
path_type,
components,
timestamp=None):
"""Lists path info records that correspond to children of given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path information for.
components: A tuple of path components of a path to retrieve child path
information for.
timestamp: If set, lists only descendants that existed only at that
timestamp.
Returns:
A list of `rdf_objects.PathInfo` instances sorted by path components.
"""
return self.ListDescendantPathInfos(
client_id, path_type, components, max_depth=1, timestamp=timestamp)
@abc.abstractmethod
def ListDescendantPathInfos(self,
client_id,
path_type,
components,
timestamp=None,
max_depth=None):
"""Lists path info records that correspond to descendants of given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path information for.
components: A tuple of path components of a path to retrieve descendent
path information for.
timestamp: If set, lists only descendants that existed at that timestamp.
max_depth: If set, the maximum number of generations to descend, otherwise
unlimited.
Returns:
A list of `rdf_objects.PathInfo` instances sorted by path components.
"""
@abc.abstractmethod
def WritePathInfos(self, client_id, path_infos):
"""Writes a collection of path_info records for a client.
If any records are already present in the database, they will be merged -
see db_path_utils.MergePathInfo.
Args:
client_id: The client of interest.
path_infos: A list of rdfvalue.objects.PathInfo records.
"""
@abc.abstractmethod
def MultiWritePathInfos(self, path_infos):
"""Writes a collection of path info records for specified clients.
Args:
path_infos: A dictionary mapping client ids to `rdf_objects.PathInfo`
instances.
"""
@abc.abstractmethod
def ReadPathInfosHistories(
self,
client_id: Text,
path_type: rdf_objects.PathInfo.PathType,
components_list: Iterable[Sequence[Text]],
cutoff: Optional[rdfvalue.RDFDatetime] = None
) -> Dict[Sequence[Text], Sequence[rdf_objects.PathInfo]]:
"""Reads a collection of hash and stat entries for given paths.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path history information for.
components_list: An iterable of tuples of path components corresponding to
paths to retrieve path information for.
cutoff: An optional timestamp cutoff up to which the history entries are
collected.
Returns:
A dictionary mapping path components to lists of `rdf_objects.PathInfo`
ordered by timestamp in ascending order.
"""
def ReadPathInfoHistory(
self,
client_id: Text,
path_type: rdf_objects.PathInfo.PathType,
components: Sequence[Text],
cutoff: Optional[rdfvalue.RDFDatetime] = None
) -> Sequence[rdf_objects.PathInfo]:
"""Reads a collection of hash and stat entry for given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path history for.
components: A tuple of path components corresponding to path to retrieve
information for.
cutoff: An optional timestamp cutoff up to which the history entries are
collected.
Returns:
A list of `rdf_objects.PathInfo` ordered by timestamp in ascending order.
"""
histories = self.ReadPathInfosHistories(
client_id=client_id,
path_type=path_type,
components_list=[components],
cutoff=cutoff)
return histories[components]
@abc.abstractmethod
def ReadLatestPathInfosWithHashBlobReferences(self,
client_paths,
max_timestamp=None):
"""Returns PathInfos that have corresponding HashBlobReferences.
Args:
client_paths: ClientPath objects pointing to files.
max_timestamp: If not specified, then for every path simply the latest
PathInfo that has a matching HashBlobReference entry will be returned.
If specified, should be an rdfvalue.RDFDatetime, then the latest
PathInfo with a timestamp less or equal to max_timestamp will be
returned for every path.
Returns:
A dictionary mapping client paths to PathInfo objects. Every client path
from the client_paths argument is guaranteed to be a key in the resulting
dictionary. If a particular path won't have a PathInfo with a
corresponding HashBlobReference entry, None will be used as a dictionary
value.
"""
@abc.abstractmethod
def WriteUserNotification(self, notification):
"""Writes a notification for a given user.
Args:
notification: objects.UserNotification object to be written.
"""
@abc.abstractmethod
def ReadUserNotifications(self, username, state=None, timerange=None):
"""Reads notifications scheduled for a user within a given timerange.
Args:
username: Username identifying the user.
state: If set, only return the notifications with a given state attribute.
timerange: Should be either a tuple of (from, to) or None. "from" and to"
should be rdfvalue.RDFDatetime or None values (from==None means "all
record up to 'to'", to==None means all records from 'from'). If both
"to" and "from" are None or the timerange itself is None, all
notifications are fetched. Note: "from" and "to" are inclusive: i.e. a
from <= time <= to condition is applied.
Returns:
List of objects.UserNotification objects.
"""
@abc.abstractmethod
def UpdateUserNotifications(self, username, timestamps, state=None):
"""Updates existing user notification objects.
Args:
username: Username identifying the user.
timestamps: List of timestamps of the notifications to be updated.
state: objects.UserNotification.State enum value to be written into the
notifications objects.
"""
@abc.abstractmethod
def ReadAPIAuditEntries(
self,
username: Optional[Text] = None,
router_method_names: Optional[List[Text]] = None,
min_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> List[rdf_objects.APIAuditEntry]:
"""Returns audit entries stored in the database.
The event log is sorted according to their timestamp (with the oldest
recorded event being first).
Args:
username: username associated with the audit entries
router_method_names: list of names of router methods
min_timestamp: minimum rdfvalue.RDFDateTime (inclusive)
max_timestamp: maximum rdfvalue.RDFDateTime (inclusive)
Returns:
List of `rdfvalues.objects.APIAuditEntry` instances.
"""
@abc.abstractmethod
def CountAPIAuditEntriesByUserAndDay(
self,
min_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> Dict[Tuple[Text, rdfvalue.RDFDatetime], int]:
"""Returns audit entry counts grouped by user and calendar day.
Examples:
>>> db.REL_DB.CountAPIAuditEntriesByUserAndDay()
{("sampleuser", RDFDateTime("2019-02-02 00:00:00")): 5}
Args:
min_timestamp: minimum rdfvalue.RDFDateTime (inclusive)
max_timestamp: maximum rdfvalue.RDFDateTime (inclusive)
Returns:
A dictionary mapping tuples of usernames and datetimes to counts.
- The dictionary has no entry if the count is zero for a day and user.
- The RDFDateTime only contains date information. The time part is always
midnight in UTC.
"""
@abc.abstractmethod
def WriteAPIAuditEntry(self, entry):
"""Writes an audit entry to the database.
Args:
entry: An `audit.APIAuditEntry` instance.
"""
@abc.abstractmethod
def WriteMessageHandlerRequests(self, requests):
"""Writes a list of message handler requests to the database.
Args:
requests: List of objects.MessageHandlerRequest.
"""
@abc.abstractmethod
def ReadMessageHandlerRequests(self):
"""Reads all message handler requests from the database.
Returns:
A list of objects.MessageHandlerRequest, sorted by timestamp,
newest first.
"""
@abc.abstractmethod
def DeleteMessageHandlerRequests(self, requests):
"""Deletes a list of message handler requests from the database.
Args:
requests: List of objects.MessageHandlerRequest.
"""
@abc.abstractmethod
def RegisterMessageHandler(self, handler, lease_time, limit=1000):
"""Registers a message handler to receive batches of messages.
Args:
handler: Method, which will be called repeatedly with lists of leased
objects.MessageHandlerRequest. Required.
lease_time: rdfvalue.Duration indicating how long the lease should
be valid. Required.
limit: Limit for the number of leased requests to give one execution of
handler.
"""
@abc.abstractmethod
def UnregisterMessageHandler(self, timeout=None):
"""Unregisters any registered message handler.
Args:
timeout: A timeout in seconds for joining the handler thread.
"""
@abc.abstractmethod
def WriteCronJob(self, cronjob):
"""Writes a cronjob to the database.
Args:
cronjob: A cronjobs.CronJob object.
"""
def ReadCronJob(self, cronjob_id):
"""Reads a cronjob from the database.
Args:
cronjob_id: The id of the cron job to read.
Returns:
A list of cronjobs.CronJob objects.
Raises:
UnknownCronJobError: A cron job with the given id does not exist.
"""
return self.ReadCronJobs(cronjob_ids=[cronjob_id])[0]
@abc.abstractmethod
def ReadCronJobs(self, cronjob_ids=None):
"""Reads all cronjobs from the database.
Args:
cronjob_ids: A list of cronjob ids to read. If not set, returns all cron
jobs in the database.
Returns:
A list of cronjobs.CronJob objects.
Raises:
UnknownCronJobError: A cron job for at least one of the given ids
does not exist.
"""
@abc.abstractmethod
def EnableCronJob(self, cronjob_id):
"""Enables a cronjob.
Args:
cronjob_id: The id of the cron job to enable.
Raises:
UnknownCronJobError: A cron job with the given id does not exist.
"""
@abc.abstractmethod
def DisableCronJob(self, cronjob_id):
"""Disables a cronjob.
Args:
cronjob_id: The id of the cron job to disable.
Raises:
UnknownCronJobError: A cron job with the given id does not exist.
"""
@abc.abstractmethod
def DeleteCronJob(self, cronjob_id):
"""Deletes a cronjob along with all its runs.
Args:
cronjob_id: The id of the cron job to delete.
Raises:
UnknownCronJobError: A cron job with the given id does not exist.
"""
@abc.abstractmethod
def UpdateCronJob(self,
cronjob_id,
last_run_status=unchanged,
last_run_time=unchanged,
current_run_id=unchanged,
state=unchanged,
forced_run_requested=unchanged):
"""Updates run information for an existing cron job.
Args:
cronjob_id: The id of the cron job to update.
last_run_status: A CronJobRunStatus object.
last_run_time: The last time a run was started for this cron job.
current_run_id: The id of the currently active run.
state: The state dict for stateful cron jobs.
forced_run_requested: A boolean indicating if a forced run is pending for
this job.
Raises:
UnknownCronJobError: A cron job with the given id does not exist.
"""
@abc.abstractmethod
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None):
"""Leases all available cron jobs.
Args:
cronjob_ids: A list of cronjob ids that should be leased. If None, all
available cronjobs will be leased.
lease_time: rdfvalue.Duration indicating how long the lease should
be valid.
Returns:
A list of cronjobs.CronJob objects that were leased.
"""
@abc.abstractmethod
def ReturnLeasedCronJobs(self, jobs):
"""Makes leased cron jobs available for leasing again.
Args:
jobs: A list of leased cronjobs.
Raises:
ValueError: If not all of the cronjobs are leased.
"""
@abc.abstractmethod
def WriteCronJobRun(self, run_object):
"""Stores a cron job run object in the database.
Args:
run_object: A rdf_cronjobs.CronJobRun object to store.
"""
@abc.abstractmethod
def ReadCronJobRuns(self, job_id):
"""Reads all cron job runs for a given job id.
Args:
job_id: Runs will be returned for the job with the given id.
Returns:
A list of rdf_cronjobs.CronJobRun objects.
"""
@abc.abstractmethod
def ReadCronJobRun(self, job_id, run_id):
"""Reads a single cron job run from the db.
Args:
job_id: The job_id of the run to be read.
run_id: The run_id of the run to be read.
Returns:
An rdf_cronjobs.CronJobRun object.
"""
@abc.abstractmethod
def DeleteOldCronJobRuns(self, cutoff_timestamp):
"""Deletes cron job runs that are older than cutoff_timestamp.
Args:
cutoff_timestamp: This method deletes all runs that were started before
cutoff_timestamp.
Returns:
The number of deleted runs.
"""
@abc.abstractmethod
def WriteHashBlobReferences(self, references_by_hash):
"""Writes blob references for a given set of hashes.
Every file known to GRR has a history of PathInfos. Every PathInfo has a
hash_entry corresponding to a known hash of a file (or a downloaded part
of the file) at a given moment.
GRR collects files by collecting individual data blobs from the client.
Thus, in the end a file contents may be described as a sequence of blobs.
Using WriteHashBlobReferences we key this sequence of blobs not with the
file name, but rather with a hash identifying file contents.
This way for any given PathInfo we can look at the hash and say whether
we have corresponding contents of the file by using ReadHashBlobReferences.
Args:
references_by_hash: A dict where SHA256HashID objects are keys and lists
of BlobReference objects are values.
"""
@abc.abstractmethod
def ReadHashBlobReferences(self, hashes):
"""Reads blob references of a given set of hashes.
Every file known to GRR has a history of PathInfos. Every PathInfo has a
hash_entry corresponding to a known hash of a file (or a downloaded part
of the file) at a given moment.
GRR collects files by collecting individual data blobs from the client.
Thus, in the end a file contents may be described as a sequence of blobs.
We key this sequence of blobs not with the file name, but rather with a
hash identifying file contents.
This way for any given PathInfo we can look at the hash and say whether
we have corresponding contents of the file by using ReadHashBlobReferences.
Args:
hashes: An iterable of SHA256HashID objects.
Returns:
A dict where SHA256HashID objects are keys and iterables of BlobReference
objects are values. If no blob references are found for a certain hash,
None will be used as a value instead of a list.
"""
# If we send a message unsuccessfully to a client five times, we just give up
# and remove the message to avoid endless repetition of some broken action.
CLIENT_MESSAGES_TTL = 5
@abc.abstractmethod
def WriteClientActionRequests(self, requests):
"""Writes messages that should go to the client to the db.
Args:
requests: A list of ClientActionRequest objects to write.
"""
@abc.abstractmethod
def LeaseClientActionRequests(self, client_id, lease_time=None, limit=None):
"""Leases available client action requests for the client with the given id.
Args:
client_id: The client for which the requests should be leased.
lease_time: rdfvalue.Duration indicating how long the lease should
be valid.
limit: Lease at most <limit> requests. If set, must be less than 10000.
Default is 5000.
Returns:
A list of ClientActionRequest objects.
"""
@abc.abstractmethod
def ReadAllClientActionRequests(self, client_id):
"""Reads all client action requests available for a given client_id.
Args:
client_id: The client for which the requests should be read.
Returns:
A list of ClientActionRequest objects.
"""
@abc.abstractmethod
def DeleteClientActionRequests(self, requests):
"""Deletes a list of client action requests from the db.
Args:
requests: A list of ClientActionRequest objects to delete.
"""
@abc.abstractmethod
def WriteFlowObject(self, flow_obj, allow_update=True):
"""Writes a flow object to the database.
Args:
flow_obj: An rdf_flow_objects.Flow object to write.
allow_update: If False, raises AlreadyExistsError if the flow already
exists in the database. If True, the flow will be updated.
Raises:
AlreadyExistsError: The flow already exists and allow_update is False.
UnknownClientError: The client with the flow's client_id does not exist.
"""
@abc.abstractmethod
def ReadFlowObject(self, client_id, flow_id):
"""Reads a flow object from the database.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to read.
Returns:
An rdf_flow_objects.Flow object.
"""
@abc.abstractmethod
def ReadAllFlowObjects(
self,
client_id: Optional[Text] = None,
min_create_time: Optional[rdfvalue.RDFDatetime] = None,
max_create_time: Optional[rdfvalue.RDFDatetime] = None,
include_child_flows: bool = True,
) -> List[rdf_flow_objects.Flow]:
"""Returns all flow objects.
Args:
client_id: The client id.
min_create_time: the minimum creation time (inclusive)
max_create_time: the maximum creation time (inclusive)
include_child_flows: include child flows in the results. If False, only
parent flows are returned.
Returns:
A list of rdf_flow_objects.Flow objects.
"""
@abc.abstractmethod
def ReadChildFlowObjects(self, client_id, flow_id):
"""Reads flow objects that were started by a given flow from the database.
Args:
client_id: The client id on which the flows are running.
flow_id: The id of the parent flow.
Returns:
A list of rdf_flow_objects.Flow objects.
"""
@abc.abstractmethod
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time):
"""Marks a flow as being processed on this worker and returns it.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to read.
processing_time: Duration that the worker has to finish processing before
the flow is considered stuck.
Raises:
ValueError: The flow is already marked as being processed.
ParentHuntIsNotRunningError: If the flow's parent hunt is stopped or
completed.
Returns:
And rdf_flow_objects.Flow object.
"""
@abc.abstractmethod
def ReleaseProcessedFlow(self, flow_obj):
"""Releases a flow that the worker was processing to the database.
This method will check if there are currently more requests ready for
processing. If there are, the flow will not be written to the database and
the method will return false.
Args:
flow_obj: The rdf_flow_objects.Flow object to return.
Returns:
A boolean indicating if it was possible to return the flow to the
database. If there are currently more requests ready to being processed,
this method will return false and the flow will not be written.
"""
@abc.abstractmethod
def UpdateFlow(self,
client_id,
flow_id,
flow_obj=unchanged,
flow_state=unchanged,
client_crash_info=unchanged,
pending_termination=unchanged,
processing_on=unchanged,
processing_since=unchanged,
processing_deadline=unchanged):
"""Updates flow objects in the database.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to update.
flow_obj: An updated rdf_flow_objects.Flow object.
flow_state: An update rdf_flow_objects.Flow.FlowState value.
client_crash_info: A rdf_client.ClientCrash object to store with the flow.
pending_termination: An rdf_flow_objects.PendingFlowTermination object.
Indicates that this flow is scheduled for termination.
processing_on: Worker this flow is currently processed on.
processing_since: Timestamp when the worker started processing this flow.
processing_deadline: Time after which this flow will be considered stuck
if processing hasn't finished.
"""
@abc.abstractmethod
def UpdateFlows(self, client_id_flow_id_pairs, pending_termination=unchanged):
"""Updates flow objects in the database.
Args:
client_id_flow_id_pairs: An iterable with tuples of (client_id, flow_id)
identifying flows to update.
pending_termination: An rdf_flow_objects.PendingFlowTermination object.
Indicates that this flow is scheduled for termination.
"""
@abc.abstractmethod
def WriteFlowRequests(self, requests):
"""Writes a list of flow requests to the database.
Args:
requests: List of rdf_flow_objects.FlowRequest objects.
"""
@abc.abstractmethod
def UpdateIncrementalFlowRequests(self, client_id: str, flow_id: str,
next_response_id_updates: Dict[int, int]):
"""Updates next response ids of given requests.
Used to update incremental requests (requests with a callback_state
specified) after each new batch of responses is processed.
Args:
client_id: The client id on which the flow is running.
flow_id: The flow id of the flow with requests to update.
next_response_id_updates: A map from request ids to new "next_response_id"
values.
"""
@abc.abstractmethod
def DeleteFlowRequests(self, requests):
"""Deletes a list of flow requests from the database.
Note: This also deletes all corresponding responses.
Args:
requests: List of rdf_flow_objects.FlowRequest objects.
"""
@abc.abstractmethod
def WriteFlowResponses(
self, responses: Iterable[rdf_flow_objects.FlowMessage]) -> None:
"""Writes FlowMessages and updates corresponding requests.
This method not only stores the list of responses given in the database but
also updates flow status information at the same time. Specifically, it
updates all corresponding flow requests, setting the needs_processing flag
in case all expected responses are available in the database after this call
and, in case the request the flow is currently waiting on becomes available
for processing, it also writes a FlowProcessingRequest to notify the worker.
Args:
responses: List of rdf_flow_objects.FlowMessage rdfvalues to write.
"""
@abc.abstractmethod
def ReadAllFlowRequestsAndResponses(self, client_id, flow_id):
"""Reads all requests and responses for a given flow from the database.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to read requests and responses for.
Returns:
A list of tuples (request, dict mapping response_id to response) for each
request in the db.
"""
@abc.abstractmethod
def DeleteAllFlowRequestsAndResponses(self, client_id, flow_id):
"""Deletes all requests and responses for a given flow from the database.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to delete requests and responses for.
"""
@abc.abstractmethod
def ReadFlowRequestsReadyForProcessing(self,
client_id,
flow_id,
next_needed_request=None):
"""Reads all requests for a flow that can be processed by the worker.
There are 2 kinds of requests that are going to be returned by this call:
1. Completed requests. These are requests that received all the
responses, including the status message, and their
"needs_processing" attribute is set to True.
2. Incremental requests. These are requests that have the callback state
specified (via the "callback_state" attribute) and are not yet
completed.
Completed requests are going to be returned with all the corresponding
responses. Incremental requests are going to be returned with new
responses only (that is, with responses having ids greater or equal to
request's 'next_response_id' attribute).
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to read requests for.
next_needed_request: The next request id that the flow needs to process.
Returns:
A dict mapping flow request id to tuples (request,
sorted list of responses for the request).
"""
@abc.abstractmethod
def WriteFlowProcessingRequests(self, requests):
"""Writes a list of flow processing requests to the database.
Args:
requests: List of rdf_flows.FlowProcessingRequest.
"""
@abc.abstractmethod
def ReadFlowProcessingRequests(self):
"""Reads all flow processing requests from the database.
Returns:
A list of rdf_flows.FlowProcessingRequest, sorted by timestamp,
newest first.
"""
@abc.abstractmethod
def AckFlowProcessingRequests(self, requests):
"""Acknowledges and deletes flow processing requests.
Args:
requests: List of rdf_flows.FlowProcessingRequest.
"""
@abc.abstractmethod
def DeleteAllFlowProcessingRequests(self):
"""Deletes all flow processing requests from the database."""
@abc.abstractmethod
def RegisterFlowProcessingHandler(self, handler):
"""Registers a handler to receive flow processing messages.
Args:
handler: Method, which will be called repeatedly with lists of
rdf_flows.FlowProcessingRequest. Required.
"""
@abc.abstractmethod
def UnregisterFlowProcessingHandler(self, timeout=None):
"""Unregisters any registered flow processing handler.
Args:
timeout: A timeout in seconds for joining the handler thread.
"""
@abc.abstractmethod
def WriteFlowResults(self, results):
"""Writes flow results for a given flow.
Args:
results: An iterable with FlowResult rdfvalues.
"""
@abc.abstractmethod
def ReadFlowResults(self,
client_id,
flow_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None):
"""Reads flow results of a given flow using given query options.
If both with_tag and with_type and/or with_substring arguments are provided,
they will be applied using AND boolean operator.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to read results for.
offset: An integer specifying an offset to be used when reading results.
"offset" is applied after with_tag/with_type/with_substring filters are
applied.
count: Number of results to read. "count" is applied after
with_tag/with_type/with_substring filters are applied.
with_tag: (Optional) When specified, should be a string. Only results
having specified tag will be returned.
with_type: (Optional) When specified, should be a string. Only results of
a specified type will be returned.
with_substring: (Optional) When specified, should be a string. Only
results having the specified string as a substring in their serialized
form will be returned.
Returns:
A list of FlowResult values sorted by timestamp in ascending order.
"""
@abc.abstractmethod
def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):
"""Counts flow results of a given flow using given query options.
If both with_tag and with_type arguments are provided, they will be applied
using AND boolean operator.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to count results for.
with_tag: (Optional) When specified, should be a string. Only results
having specified tag will be accounted for.
with_type: (Optional) When specified, should be a string. Only results of
a specified type will be accounted for.
Returns:
A number of flow results of a given flow matching given query options.
"""
@abc.abstractmethod
def CountFlowResultsByType(self, client_id, flow_id):
"""Returns counts of flow results grouped by result type.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to count results for.
Returns:
A dictionary of "type name" => <number of items>.
"""
@abc.abstractmethod
def CountFlowErrorsByType(self, client_id: str,
flow_id: str) -> Dict[str, int]:
"""Returns counts of flow errors grouped by error type.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to count errors for.
Returns:
A dictionary of "type name" => <number of items>.
"""
@abc.abstractmethod
def WriteFlowErrors(self, errors: Iterable[rdf_flow_objects.FlowError]):
"""Writes flow errors for a given flow.
Args:
errors: An iterable with FlowError rdfvalues.
"""
@abc.abstractmethod
def ReadFlowErrors(
self,
client_id: str,
flow_id: str,
offset: int,
count: int,
with_tag: Optional[str] = None,
with_type: Optional[str] = None) -> List[rdf_flow_objects.FlowError]:
"""Reads flow errors of a given flow using given query options.
If both with_tag and with_type and/or with_substring arguments are provided,
they will be applied using AND boolean operator.
Args:
client_id: The client id on which this flow is running.
flow_id: The id of the flow to read errors for.
offset: An integer specifying an offset to be used when reading errors.
"offset" is applied after with_tag/with_type/with_substring filters are
applied.
count: Number of errors to read. "count" is applied after
with_tag/with_type/with_substring filters are applied.
with_tag: (Optional) When specified, should be a string. Only errors
having specified tag will be returned.
with_type: (Optional) When specified, should be a string. Only errors of a
specified type will be returned.
Returns:
A list of FlowError values sorted by timestamp in ascending order.
"""
@abc.abstractmethod
def CountFlowErrors(self,
client_id: str,
flow_id: str,
with_tag: Optional[str] = None,
with_type: Optional[str] = None) -> int:
"""Counts flow errors of a given flow using given query options.
If both with_tag and with_type arguments are provided, they will be applied
using AND boolean operator.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to count errors for.
with_tag: (Optional) When specified, should be a string. Only errors
having specified tag will be accounted for.
with_type: (Optional) When specified, should be a string. Only errors of a
specified type will be accounted for.
Returns:
A number of flow errors of a given flow matching given query options.
"""
@abc.abstractmethod
def WriteFlowLogEntries(self, entries):
"""Writes flow log entries for a given flow.
Args:
entries: An iterable of FlowLogEntry values.
"""
@abc.abstractmethod
def ReadFlowLogEntries(self,
client_id,
flow_id,
offset,
count,
with_substring=None):
"""Reads flow log entries of a given flow using given query options.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to read log entries for.
offset: An integer specifying an offset to be used when reading log
entries. "offset" is applied after the with_substring filter is applied
(if specified).
count: Number of log entries to read. "count" is applied after the
with_substring filter is applied (if specified).
with_substring: (Optional) When specified, should be a string. Only log
entries having the specified string as a message substring will be
returned.
Returns:
A list of FlowLogEntry values sorted by timestamp in ascending order.
"""
@abc.abstractmethod
def CountFlowLogEntries(self, client_id, flow_id):
"""Returns number of flow log entries of a given flow.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to count log entries for.
Returns:
Number of flow log entries of a given flow.
"""
@abc.abstractmethod
def WriteFlowOutputPluginLogEntries(self, entries):
"""Writes flow output plugin log entries for a given flow.
Args:
entries: An iterable of FlowOutputPluginLogEntry values.
"""
@abc.abstractmethod
def ReadFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
offset,
count,
with_type=None):
"""Reads flow output plugin log entries.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to read log entries for.
output_plugin_id: The id of an output plugin with logs to be read.
offset: An integer specifying an offset to be used when reading log
entries. "offset" is applied after the with_type filter is applied (if
specified).
count: Number of log entries to read. "count" is applied after the
with_type filter is applied (if specified).
with_type: (Optional) When specified, should have a
FlowOutputPluginLogEntry.LogEntryType value. Output will be limited to
entries with a given type.
Returns:
A list of FlowOutputPluginLogEntry values sorted by timestamp in ascending
order.
"""
@abc.abstractmethod
def CountFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
with_type=None):
"""Returns the number of flow output plugin log entries of a given flow.
Args:
client_id: The client id on which the flow is running.
flow_id: The id of the flow to count output plugin log entries for.
output_plugin_id: The id of an output plugin with logs to be read. NOTE:
REL_DB code uses strings for output plugin ids for consistency (as all
other DB ids are strings). At the moment plugin_id in the database is
simply a stringified index of the plugin in Flow/Hunt.output_plugins
list.
with_type: (Optional) When specified, should have a
FlowOutputPluginLogEntry.LogEntryType value. Only records of a given
type will be counted.
Returns:
Number of output log entries.
"""
@abc.abstractmethod
def ReadHuntOutputPluginLogEntries(self,
hunt_id,
output_plugin_id,
offset,
count,
with_type=None):
"""Reads hunt output plugin log entries.
Args:
hunt_id: The hunt id of a hunt with the flows to read output plugins log
entries from.
output_plugin_id: The id of an output plugin with logs to be read. NOTE:
REL_DB code uses strings for output plugin ids for consistency (as all
other DB ids are strings). At the moment plugin_id in the database is
simply a stringified index of the plugin in Flow/Hunt.output_plugins
list.
offset: An integer specifying an offset to be used when reading log
entries. "offset" is applied after the with_type filter is applied (if
specified).
count: Number of log entries to read. "count" is applied after the
with_type filter is applied (if specified).
with_type: (Optional) When specified, should have a
FlowOutputPluginLogEntry.LogEntryType value. Output will be limited to
entries with a given type.
Returns:
A list of FlowOutputPluginLogEntry values sorted by timestamp in ascending
order.
"""
@abc.abstractmethod
def CountHuntOutputPluginLogEntries(self,
hunt_id,
output_plugin_id,
with_type=None):
"""Returns number of hunt output plugin log entries of a given hunt.
Args:
hunt_id: The hunt id of a hunt with output plugins log entries to be
counted.
output_plugin_id: The id of an output plugin with logs to be read. NOTE:
REL_DB code uses strings for output plugin ids for consistency (as all
other DB ids are strings). At the moment plugin_id in the database is
simply a stringified index of the plugin in Flow/Hunt.output_plugins
list.
with_type: (Optional) When specified, should have a
FlowOutputPluginLogEntry.LogEntryType value. Only records of a given
type will be counted.
Returns:
Number of output plugin log entries.
"""
@abc.abstractmethod
def WriteHuntObject(self, hunt_obj):
"""Writes a hunt object to the database.
Args:
hunt_obj: An rdf_hunt_objects.Hunt object to write.
"""
@abc.abstractmethod
def UpdateHuntObject(self,
hunt_id,
duration=None,
client_rate=None,
client_limit=None,
hunt_state=None,
hunt_state_comment=None,
start_time=None,
num_clients_at_start_time=None):
"""Updates the hunt object by applying the update function.
Each keyword argument when set to None, means that that corresponding value
shouldn't be updated.
Args:
hunt_id: Id of the hunt to be updated.
duration: A maximum allowed running time duration of the flow.
client_rate: Number correpsonding to hunt's client rate.
client_limit: Number corresponding hunt's client limit.
hunt_state: New Hunt.HuntState value.
hunt_state_comment: String correpsonding to a hunt state comment.
start_time: RDFDatetime corresponding to a start time of the hunt.
num_clients_at_start_time: Integer corresponding to a number of clients at
start time.
"""
@abc.abstractmethod
def ReadHuntOutputPluginsStates(self, hunt_id):
"""Reads all hunt output plugins states of a given hunt.
Args:
hunt_id: Id of the hunt.
Returns:
An iterable of rdf_flow_runner.OutputPluginState objects.
Raises:
UnknownHuntError: if a hunt with a given hunt id does not exit.
"""
@abc.abstractmethod
def WriteHuntOutputPluginsStates(self, hunt_id, states):
"""Writes hunt output plugin states for a given hunt.
Args:
hunt_id: Id of the hunt.
states: An iterable with rdf_flow_runner.OutputPluginState objects.
Raises:
UnknownHuntError: if a hunt with a given hunt id does not exit.
"""
pass
@abc.abstractmethod
def UpdateHuntOutputPluginState(self, hunt_id, state_index, update_fn):
"""Updates hunt output plugin state for a given output plugin.
Args:
hunt_id: Id of the hunt to be updated.
state_index: Index of a state in ReadHuntOutputPluginsStates-returned
list.
update_fn: A function accepting a (descriptor, state) arguments, where
descriptor is OutputPluginDescriptor and state is an AttributedDict. The
function is expected to return a modified state (it's ok to modify it
in-place).
Returns:
An updated AttributedDict object corresponding to an update plugin state
(result of the update_fn function call).
Raises:
UnknownHuntError: if a hunt with a given hunt id does not exit.
UnknownHuntOutputPluginStateError: if a state with a given index does
not exist.
"""
@abc.abstractmethod
def DeleteHuntObject(self, hunt_id):
"""Deletes a hunt object with a given id.
Args:
hunt_id: Id of the hunt to be deleted.
"""
@abc.abstractmethod
def ReadHuntObject(self, hunt_id):
"""Reads a hunt object from the database.
Args:
hunt_id: The id of the hunt to read.
Raises:
UnknownHuntError: if there's no hunt with the corresponding id.
Returns:
An rdf_hunt_objects.Hunt object.
"""
@abc.abstractmethod
def ReadHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None):
"""Reads hunt objects from the database.
Args:
offset: An integer specifying an offset to be used when reading hunt
objects.
count: Number of hunt objects to read.
with_creator: When specified, should be a string corresponding to a GRR
username. Only hunts created by the matching user will be returned.
created_after: When specified, should be a rdfvalue.RDFDatetime. Only
hunts with create_time after created_after timestamp will be returned.
with_description_match: When specified, will only return hunts with
descriptions containing a given substring.
Returns:
A list of rdf_hunt_objects.Hunt objects sorted by create_time in
descending order.
"""
@abc.abstractmethod
def ListHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None):
"""Reads metadata for hunt objects from the database.
Args:
offset: An integer specifying an offset to be used when reading hunt
metadata.
count: Number of hunt metadata objects to read.
with_creator: When specified, should be a string corresponding to a GRR
username. Only metadata for hunts created by the matching user will be
returned.
created_after: When specified, should be a rdfvalue.RDFDatetime. Only
metadata for hunts with create_time after created_after timestamp will
be returned.
with_description_match: When specified, will only return metadata for
hunts with descriptions containing a given substring.
Returns:
A list of rdf_hunt_objects.HuntMetadata objects sorted by create_time in
descending order.
"""
@abc.abstractmethod
def ReadHuntLogEntries(self, hunt_id, offset, count, with_substring=None):
"""Reads hunt log entries of a given hunt using given query options.
Args:
hunt_id: The id of the hunt to read log entries for.
offset: An integer specifying an offset to be used when reading log
entries. "offset" is applied after the with_substring filter is applied
(if specified).
count: Number of log entries to read. "count" is applied after the
with_substring filter is applied (if specified).
with_substring: (Optional) When specified, should be a string. Only log
entries having the specified string as a message substring will be
returned.
Returns:
A list of FlowLogEntry values sorted by timestamp in ascending order.
"""
@abc.abstractmethod
def CountHuntLogEntries(self, hunt_id):
"""Returns number of hunt log entries of a given hunt.
Args:
hunt_id: The id of the hunt to count log entries for.
Returns:
Number of hunt log entries of a given hunt.
"""
@abc.abstractmethod
def ReadHuntResults(self,
hunt_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None,
with_timestamp=None):
"""Reads hunt results of a given hunt using given query options.
If both with_tag and with_type and/or with_substring arguments are provided,
they will be applied using AND boolean operator.
Args:
hunt_id: The id of the hunt to read results for.
offset: An integer specifying an offset to be used when reading results.
"offset" is applied after with_tag/with_type/with_substring filters are
applied.
count: Number of results to read. "count" is applied after
with_tag/with_type/with_substring filters are applied.
with_tag: (Optional) When specified, should be a string. Only results
having specified tag will be returned.
with_type: (Optional) When specified, should be a string. Only results of
a specified type will be returned.
with_substring: (Optional) When specified, should be a string. Only
results having the specified string as a substring in their serialized
form will be returned.
with_timestamp: (Optional) When specified should an rdfvalue.RDFDatetime.
Only results with a given timestamp will be returned.
Returns:
A list of FlowResult values sorted by timestamp in ascending order.
"""
@abc.abstractmethod
def CountHuntResults(self, hunt_id, with_tag=None, with_type=None):
"""Counts hunt results of a given hunt using given query options.
If both with_tag and with_type arguments are provided, they will be applied
using AND boolean operator.
Args:
hunt_id: The id of the hunt to count results for.
with_tag: (Optional) When specified, should be a string. Only results
having specified tag will be accounted for.
with_type: (Optional) When specified, should be a string. Only results of
a specified type will be accounted for.
Returns:
A number of hunt results of a given hunt matching given query options.
"""
@abc.abstractmethod
def CountHuntResultsByType(self, hunt_id):
"""Returns counts of items in hunt results grouped by type.
Args:
hunt_id: The id of the hunt to count results for.
Returns:
A dictionary of "type name" => <number of items>.
"""
@abc.abstractmethod
def ReadHuntFlows(self,
hunt_id,
offset,
count,
filter_condition=HuntFlowsCondition.UNSET):
"""Reads hunt flows matching given conditins.
If more than one condition is specified, all of them have to be fulfilled
for a particular flow object to be returned (i.e. they're applied with AND).
Args:
hunt_id: The id of the hunt to read log entries for.
offset: An integer specifying an offset to be used when reading results.
"offset" is applied after with_tag/with_type/with_substring filters are
applied.
count: Number of results to read. "count" is applied after
with_tag/with_type/with_substring filters are applied.
filter_condition: One of HuntFlowsCondition constants describing a
condition to filter ReadHuntFlows results.
Returns:
A list of Flow objects.
"""
@abc.abstractmethod
def CountHuntFlows(self, hunt_id, filter_condition=HuntFlowsCondition.UNSET):
"""Counts hunt flows matching given conditions.
If more than one condition is specified, all of them have to be fulfilled
for a particular flow object to be returned (i.e. they're applied with AND).
Args:
hunt_id: The id of the hunt to read log entries for.
with_tag/with_type/with_substring filters are applied.
filter_condition: One of HuntFlowsCondition constants describing a
condition to influence CountHuntFlows results.
Returns:
A number of flows matching the specified condition.
"""
@abc.abstractmethod
def ReadHuntCounters(self, hunt_id):
"""Reads hunt counters.
Args:
hunt_id: The id of the hunt to read counters for.
Returns:
HuntCounters object.
"""
@abc.abstractmethod
def ReadHuntClientResourcesStats(self, hunt_id):
"""Read hunt client resources stats.
Args:
hunt_id: The id of the hunt to read counters for.
Returns:
rdf_stats.ClientResourcesStats object.
"""
@abc.abstractmethod
def ReadHuntFlowsStatesAndTimestamps(self, hunt_id):
"""Reads hunt flows states and timestamps.
Args:
hunt_id: The id of the hunt to read counters for.
Returns:
An iterable of FlowStateAndTimestamps objects (in no particular
sorting order).
"""
@abc.abstractmethod
def WriteSignedBinaryReferences(self, binary_id, references):
"""Writes blob references for a signed binary to the DB.
Args:
binary_id: rdf_objects.SignedBinaryID for the binary.
references: rdf_objects.BlobReferences for the given binary.
"""
@abc.abstractmethod
def ReadSignedBinaryReferences(self, binary_id):
"""Reads blob references for the signed binary with the given id.
Args:
binary_id: rdf_objects.SignedBinaryID for the binary.
Returns:
A tuple of the signed binary's rdf_objects.BlobReferences and an
RDFDatetime representing the time when the references were written to the
DB.
"""
@abc.abstractmethod
def ReadIDsForAllSignedBinaries(self):
"""Returns ids for all signed binaries in the DB."""
@abc.abstractmethod
def DeleteSignedBinaryReferences(
self,
binary_id: rdf_objects.SignedBinaryID,
) -> None:
"""Deletes blob references for the given signed binary from the DB.
Does nothing if no entry with the given id exists in the DB.
Args:
binary_id: An id of the signed binary to delete.
"""
@abc.abstractmethod
def WriteClientGraphSeries(self, graph_series, client_label, timestamp=None):
"""Writes the provided graphs to the DB with the given client label.
Args:
graph_series: rdf_stats.ClientGraphSeries containing aggregated data for a
particular type of client report.
client_label: Client label by which data in the graph series was
aggregated.
timestamp: RDFDatetime for the graph series. This will be used for
graphing data in the graph series. If not provided, the current
timestamp will be used.
"""
@abc.abstractmethod
def ReadAllClientGraphSeries(self,
client_label,
report_type,
time_range=None):
"""Reads graph series for the given label and report-type from the DB.
Args:
client_label: Client label for which to return data.
report_type: rdf_stats.ClientGraphSeries.ReportType of data to read from
the DB.
time_range: A TimeRange specifying the range of timestamps to read. If not
provided, all timestamps in the DB will be considered.
Returns:
A dict mapping timestamps to graph-series. The timestamps
represent when the graph-series were written to the DB.
"""
@abc.abstractmethod
def ReadMostRecentClientGraphSeries(self, client_label, report_type):
"""Fetches the latest graph series for a client-label from the DB.
Args:
client_label: Client label for which to return data.
report_type: rdf_stats.ClientGraphSeries.ReportType of the graph series to
return.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
"""
@abc.abstractmethod
def WriteYaraSignatureReference(
self,
blob_id: rdf_objects.BlobID,
username: Text,
) -> None:
"""Marks the specified blob id as a YARA signature.
Args:
blob_id: An identifier of a blob that is to be marked as YARA signature.
username: An name of the GRR user that uploaded the signature.
"""
@abc.abstractmethod
def VerifyYaraSignatureReference(
self,
blob_id: rdf_objects.BlobID,
) -> bool:
"""Verifies whether the specified blob is a YARA signature.
Args:
blob_id: An identifier of a blob to verify.
Returns:
`True` if the blob identifier refers to a YARA signature.
"""
@abc.abstractmethod
def WriteScheduledFlow(
self, scheduled_flow: rdf_flow_objects.ScheduledFlow) -> None:
"""Inserts or updates the ScheduledFlow in the database.
Args:
scheduled_flow: the ScheduledFlow to insert.
Raises:
UnknownClientError: if no client with client_id exists.
UnknownGRRUserError: if creator does not exist as user.
"""
@abc.abstractmethod
def DeleteScheduledFlow(self, client_id: str, creator: str,
scheduled_flow_id: str) -> None:
"""Deletes the ScheduledFlow from the database.
Args:
client_id: The ID of the client of the ScheduledFlow.
creator: The username of the user who created the ScheduledFlow.
scheduled_flow_id: The ID of the ScheduledFlow.
Raises:
UnknownScheduledFlowError: if no such ScheduledFlow exists.
"""
@abc.abstractmethod
def ListScheduledFlows(
self, client_id: str,
creator: str) -> Sequence[rdf_flow_objects.ScheduledFlow]:
"""Lists all ScheduledFlows for the client and creator."""
class DatabaseValidationWrapper(Database):
"""Database wrapper that validates the arguments."""
def __init__(self, delegate: Database):
super().__init__()
self.delegate = delegate
def Now(self) -> rdfvalue.RDFDatetime:
return self.delegate.Now()
def WriteArtifact(self, artifact):
precondition.AssertType(artifact, rdf_artifacts.Artifact)
if not artifact.name:
raise ValueError("Empty artifact name")
_ValidateStringLength("Artifact names", artifact.name,
MAX_ARTIFACT_NAME_LENGTH)
return self.delegate.WriteArtifact(artifact)
def ReadArtifact(self, name):
precondition.AssertType(name, Text)
return self._PatchArtifact(self.delegate.ReadArtifact(name))
def ReadAllArtifacts(self):
return list(map(self._PatchArtifact, self.delegate.ReadAllArtifacts()))
def DeleteArtifact(self, name):
precondition.AssertType(name, Text)
return self.delegate.DeleteArtifact(name)
# TODO: This patching behaviour can be removed once we are sure
# that all artifacts have been properly migrated to use Python 3 serialized
# representation.
def _PatchArtifact(
self, artifact: rdf_artifacts.Artifact) -> rdf_artifacts.Artifact:
"""Patches artifact to not contain byte-string source attributes."""
patched = False
for source in artifact.sources:
attributes = source.attributes.ToDict()
unicode_attributes = compatibility.UnicodeJson(attributes)
if attributes != unicode_attributes:
source.attributes = unicode_attributes
patched = True
if patched:
self.DeleteArtifact(str(artifact.name))
self.WriteArtifact(artifact)
return artifact
def WriteClientMetadata(
self,
client_id,
certificate=None,
fleetspeak_enabled=None,
first_seen=None,
last_ping=None,
last_clock=None,
last_ip=None,
last_foreman=None,
fleetspeak_validation_info: Optional[Dict[str, str]] = None):
precondition.ValidateClientId(client_id)
precondition.AssertOptionalType(certificate, rdf_crypto.RDFX509Cert)
precondition.AssertOptionalType(fleetspeak_enabled, bool)
precondition.AssertOptionalType(first_seen, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(last_ping, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(last_clock, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(last_ip, rdf_client_network.NetworkAddress)
precondition.AssertOptionalType(last_foreman, rdfvalue.RDFDatetime)
if fleetspeak_validation_info is not None:
precondition.AssertDictType(fleetspeak_validation_info, str, str)
return self.delegate.WriteClientMetadata(
client_id,
certificate=certificate,
fleetspeak_enabled=fleetspeak_enabled,
first_seen=first_seen,
last_ping=last_ping,
last_clock=last_clock,
last_ip=last_ip,
last_foreman=last_foreman,
fleetspeak_validation_info=fleetspeak_validation_info)
def DeleteClient(self, client_id):
precondition.ValidateClientId(client_id)
return self.delegate.DeleteClient(client_id)
def MultiReadClientMetadata(self, client_ids):
_ValidateClientIds(client_ids)
return self.delegate.MultiReadClientMetadata(client_ids)
def WriteClientSnapshot(self, snapshot):
precondition.AssertType(snapshot, rdf_objects.ClientSnapshot)
_ValidateStringLength("GRR Version", snapshot.GetGRRVersionString(),
_MAX_GRR_VERSION_LENGTH)
_ValidateStringLength("Platform", snapshot.knowledge_base.os,
_MAX_CLIENT_PLATFORM_LENGTH)
_ValidateStringLength("Platform Release", snapshot.Uname(),
_MAX_CLIENT_PLATFORM_RELEASE_LENGTH)
return self.delegate.WriteClientSnapshot(snapshot)
def MultiReadClientSnapshot(self, client_ids):
_ValidateClientIds(client_ids)
return self.delegate.MultiReadClientSnapshot(client_ids)
def MultiReadClientFullInfo(self, client_ids, min_last_ping=None):
_ValidateClientIds(client_ids)
return self.delegate.MultiReadClientFullInfo(
client_ids, min_last_ping=min_last_ping)
def ReadClientLastPings(self,
min_last_ping=None,
max_last_ping=None,
fleetspeak_enabled=None,
batch_size=CLIENT_IDS_BATCH_SIZE):
precondition.AssertOptionalType(min_last_ping, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(max_last_ping, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(fleetspeak_enabled, bool)
precondition.AssertType(batch_size, int)
if batch_size < 1:
raise ValueError(
"batch_size needs to be a positive integer, got {}".format(
batch_size))
return self.delegate.ReadClientLastPings(
min_last_ping=min_last_ping,
max_last_ping=max_last_ping,
fleetspeak_enabled=fleetspeak_enabled,
batch_size=batch_size)
def WriteClientSnapshotHistory(self, clients):
if not clients:
raise ValueError("Clients are empty")
client_id = None
for client in clients:
precondition.AssertType(client, rdf_objects.ClientSnapshot)
if client.timestamp is None:
raise AttributeError("Client without a `timestamp` attribute")
client_id = client_id or client.client_id
if client.client_id != client_id:
message = "Unexpected client id '%s' instead of '%s'"
raise ValueError(message % (client.client_id, client_id))
return self.delegate.WriteClientSnapshotHistory(clients)
def ReadClientSnapshotHistory(self, client_id, timerange=None):
precondition.ValidateClientId(client_id)
if timerange is not None:
_ValidateTimeRange(timerange)
return self.delegate.ReadClientSnapshotHistory(
client_id, timerange=timerange)
def WriteClientStartupInfo(self, client_id, startup_info):
precondition.AssertType(startup_info, rdf_client.StartupInfo)
precondition.ValidateClientId(client_id)
return self.delegate.WriteClientStartupInfo(client_id, startup_info)
def ReadClientStartupInfo(self,
client_id: str) -> Optional[rdf_client.StartupInfo]:
precondition.ValidateClientId(client_id)
return self.delegate.ReadClientStartupInfo(client_id)
def ReadClientStartupInfoHistory(self, client_id, timerange=None):
precondition.ValidateClientId(client_id)
if timerange is not None:
_ValidateTimeRange(timerange)
return self.delegate.ReadClientStartupInfoHistory(
client_id, timerange=timerange)
def WriteClientCrashInfo(self, client_id, crash_info):
precondition.AssertType(crash_info, rdf_client.ClientCrash)
precondition.ValidateClientId(client_id)
return self.delegate.WriteClientCrashInfo(client_id, crash_info)
def ReadClientCrashInfo(self, client_id):
precondition.ValidateClientId(client_id)
return self.delegate.ReadClientCrashInfo(client_id)
def ReadClientCrashInfoHistory(self, client_id):
precondition.ValidateClientId(client_id)
return self.delegate.ReadClientCrashInfoHistory(client_id)
def AddClientKeywords(self, client_id: Text,
keywords: Iterable[Text]) -> None:
precondition.ValidateClientId(client_id)
precondition.AssertIterableType(keywords, Text)
return self.delegate.AddClientKeywords(client_id, keywords)
def ListClientsForKeywords(
self,
keywords: Iterable[Text],
start_time: Optional[rdfvalue.RDFDatetime] = None
) -> Dict[Text, List[Text]]:
precondition.AssertIterableType(keywords, Text)
keywords = set(keywords)
if start_time:
_ValidateTimestamp(start_time)
result = self.delegate.ListClientsForKeywords(
keywords, start_time=start_time)
precondition.AssertDictType(result, Text, List)
for value in result.values():
precondition.AssertIterableType(value, Text)
return result
def RemoveClientKeyword(self, client_id: Text, keyword: Text) -> None:
precondition.ValidateClientId(client_id)
precondition.AssertType(keyword, Text)
return self.delegate.RemoveClientKeyword(client_id, keyword)
def AddClientLabels(self, client_id: Text, owner: Text,
labels: List[Text]) -> None:
precondition.ValidateClientId(client_id)
_ValidateUsername(owner)
for label in labels:
_ValidateLabel(label)
return self.delegate.AddClientLabels(client_id, owner, labels)
def MultiReadClientLabels(
self,
client_ids: List[Text]) -> Dict[Text, List[rdf_objects.ClientLabel]]:
_ValidateClientIds(client_ids)
result = self.delegate.MultiReadClientLabels(client_ids)
precondition.AssertDictType(result, Text, List)
for value in result.values():
precondition.AssertIterableType(value, rdf_objects.ClientLabel)
return result
def RemoveClientLabels(self, client_id: Text, owner: Text,
labels: List[Text]) -> None:
precondition.ValidateClientId(client_id)
for label in labels:
_ValidateLabel(label)
return self.delegate.RemoveClientLabels(client_id, owner, labels)
def ReadAllClientLabels(self) -> List[rdf_objects.ClientLabel]:
result = self.delegate.ReadAllClientLabels()
precondition.AssertIterableType(result, rdf_objects.ClientLabel)
return result
def WriteClientStats(self, client_id: Text,
stats: rdf_client_stats.ClientStats) -> None:
precondition.ValidateClientId(client_id)
precondition.AssertType(stats, rdf_client_stats.ClientStats)
self.delegate.WriteClientStats(client_id, stats)
def ReadClientStats(
self,
client_id: Text,
min_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> List[rdf_client_stats.ClientStats]:
precondition.ValidateClientId(client_id)
if min_timestamp is None:
min_timestamp = rdfvalue.RDFDatetime.Now() - CLIENT_STATS_RETENTION
else:
_ValidateTimestamp(min_timestamp)
if max_timestamp is None:
max_timestamp = rdfvalue.RDFDatetime.Now()
else:
_ValidateTimestamp(max_timestamp)
return self.delegate.ReadClientStats(client_id, min_timestamp,
max_timestamp)
def DeleteOldClientStats(
self,
yield_after_count: int,
retention_time: Optional[rdfvalue.RDFDatetime] = None
) -> Generator[int, None, None]:
if retention_time is None:
retention_time = rdfvalue.RDFDatetime.Now() - CLIENT_STATS_RETENTION
else:
_ValidateTimestamp(retention_time)
precondition.AssertType(yield_after_count, int)
if yield_after_count < 1:
raise ValueError("yield_after_count must be >= 1. Got %r" %
(yield_after_count,))
for deleted_count in self.delegate.DeleteOldClientStats(
yield_after_count, retention_time):
yield deleted_count
def WriteForemanRule(self, rule):
precondition.AssertType(rule, foreman_rules.ForemanCondition)
if not rule.hunt_id:
raise ValueError("Foreman rule has no hunt_id: %s" % rule)
return self.delegate.WriteForemanRule(rule)
def CountClientVersionStringsByLabel(
self, day_buckets: Set[int]) -> fleet_utils.FleetStats:
_ValidateClientActivityBuckets(day_buckets)
return self.delegate.CountClientVersionStringsByLabel(day_buckets)
def CountClientPlatformsByLabel(
self, day_buckets: Set[int]) -> fleet_utils.FleetStats:
_ValidateClientActivityBuckets(day_buckets)
return self.delegate.CountClientPlatformsByLabel(day_buckets)
def CountClientPlatformReleasesByLabel(
self, day_buckets: Set[int]) -> fleet_utils.FleetStats:
_ValidateClientActivityBuckets(day_buckets)
return self.delegate.CountClientPlatformReleasesByLabel(day_buckets)
def RemoveForemanRule(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.RemoveForemanRule(hunt_id)
def ReadAllForemanRules(self):
return self.delegate.ReadAllForemanRules()
def RemoveExpiredForemanRules(self):
return self.delegate.RemoveExpiredForemanRules()
def WriteGRRUser(self,
username,
password=None,
ui_mode=None,
canary_mode=None,
user_type=None,
email=None):
_ValidateUsername(username)
if password is not None and not isinstance(password, rdf_crypto.Password):
password_str = password
password = rdf_crypto.Password()
password.SetPassword(password_str)
if email is not None:
_ValidateEmail(email)
return self.delegate.WriteGRRUser(
username,
password=password,
ui_mode=ui_mode,
canary_mode=canary_mode,
user_type=user_type,
email=email)
def ReadGRRUser(self, username):
_ValidateUsername(username)
return self.delegate.ReadGRRUser(username)
def ReadGRRUsers(self, offset=0, count=None):
if offset < 0:
raise ValueError("offset has to be non-negative.")
if count is not None and count < 0:
raise ValueError("count has to be non-negative or None.")
return self.delegate.ReadGRRUsers(offset=offset, count=count)
def CountGRRUsers(self):
return self.delegate.CountGRRUsers()
def DeleteGRRUser(self, username):
_ValidateUsername(username)
return self.delegate.DeleteGRRUser(username)
def WriteApprovalRequest(self, approval_request):
precondition.AssertType(approval_request, rdf_objects.ApprovalRequest)
_ValidateUsername(approval_request.requestor_username)
_ValidateApprovalType(approval_request.approval_type)
if approval_request.grants:
message = "Approval request with grants already set: {}"
raise ValueError(message.format(approval_request))
return self.delegate.WriteApprovalRequest(approval_request)
def ReadApprovalRequest(self, requestor_username, approval_id):
_ValidateUsername(requestor_username)
_ValidateApprovalId(approval_id)
return self.delegate.ReadApprovalRequest(requestor_username, approval_id)
def ReadApprovalRequests(
self,
requestor_username,
approval_type,
subject_id=None,
include_expired=False) -> Sequence[rdf_objects.ApprovalRequest]:
_ValidateUsername(requestor_username)
_ValidateApprovalType(approval_type)
if subject_id is not None:
_ValidateStringId("approval subject id", subject_id)
return self.delegate.ReadApprovalRequests(
requestor_username,
approval_type,
subject_id=subject_id,
include_expired=include_expired)
def GrantApproval(self, requestor_username, approval_id, grantor_username):
_ValidateUsername(requestor_username)
_ValidateApprovalId(approval_id)
_ValidateUsername(grantor_username)
return self.delegate.GrantApproval(requestor_username, approval_id,
grantor_username)
def ReadPathInfo(self, client_id, path_type, components, timestamp=None):
precondition.ValidateClientId(client_id)
_ValidateEnumType(path_type, rdf_objects.PathInfo.PathType)
_ValidatePathComponents(components)
if timestamp is not None:
_ValidateTimestamp(timestamp)
return self.delegate.ReadPathInfo(
client_id, path_type, components, timestamp=timestamp)
def ReadPathInfos(self, client_id, path_type, components_list):
precondition.ValidateClientId(client_id)
_ValidateEnumType(path_type, rdf_objects.PathInfo.PathType)
precondition.AssertType(components_list, list)
for components in components_list:
_ValidatePathComponents(components)
return self.delegate.ReadPathInfos(client_id, path_type, components_list)
def ListChildPathInfos(self,
client_id,
path_type,
components,
timestamp=None):
precondition.ValidateClientId(client_id)
_ValidateEnumType(path_type, rdf_objects.PathInfo.PathType)
_ValidatePathComponents(components)
precondition.AssertOptionalType(timestamp, rdfvalue.RDFDatetime)
return self.delegate.ListChildPathInfos(
client_id, path_type, components, timestamp=timestamp)
def ListDescendantPathInfos(self,
client_id,
path_type,
components,
timestamp=None,
max_depth=None):
precondition.ValidateClientId(client_id)
_ValidateEnumType(path_type, rdf_objects.PathInfo.PathType)
_ValidatePathComponents(components)
precondition.AssertOptionalType(timestamp, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(max_depth, int)
return self.delegate.ListDescendantPathInfos(
client_id,
path_type,
components,
timestamp=timestamp,
max_depth=max_depth)
def FindPathInfoByPathID(self, client_id, path_type, path_id, timestamp=None):
precondition.ValidateClientId(client_id)
if timestamp is not None:
_ValidateTimestamp(timestamp)
return self.delegate.FindPathInfoByPathID(
client_id, path_type, path_id, timestamp=timestamp)
def FindPathInfosByPathIDs(self, client_id, path_type, path_ids):
precondition.ValidateClientId(client_id)
return self.delegate.FindPathInfosByPathIDs(client_id, path_type, path_ids)
def WritePathInfos(self, client_id, path_infos):
precondition.ValidateClientId(client_id)
_ValidatePathInfos(path_infos)
return self.delegate.WritePathInfos(client_id, path_infos)
def MultiWritePathInfos(self, path_infos):
precondition.AssertType(path_infos, dict)
for client_id, client_path_infos in path_infos.items():
precondition.ValidateClientId(client_id)
_ValidatePathInfos(client_path_infos)
return self.delegate.MultiWritePathInfos(path_infos)
def InitPathInfos(self, client_id, path_infos):
precondition.ValidateClientId(client_id)
_ValidatePathInfos(path_infos)
return self.delegate.InitPathInfos(client_id, path_infos)
def MultiInitPathInfos(self, path_infos):
precondition.AssertType(path_infos, dict)
for client_id, client_path_infos in path_infos.items():
precondition.ValidateClientId(client_id)
_ValidatePathInfos(client_path_infos)
return self.delegate.MultiInitPathInfos(path_infos)
def ClearPathHistory(self, client_id, path_infos):
precondition.ValidateClientId(client_id)
_ValidatePathInfos(path_infos)
return self.delegate.ClearPathHistory(client_id, path_infos)
def MultiClearPathHistory(self, path_infos):
precondition.AssertType(path_infos, dict)
for client_id, client_path_infos in path_infos.items():
precondition.ValidateClientId(client_id)
_ValidatePathInfos(client_path_infos)
return self.delegate.MultiClearPathHistory(path_infos)
def MultiWritePathHistory(self, client_path_histories):
precondition.AssertType(client_path_histories, dict)
for client_path, client_path_history in client_path_histories.items():
precondition.AssertType(client_path, ClientPath)
precondition.AssertType(client_path_history, ClientPathHistory)
self.delegate.MultiWritePathHistory(client_path_histories)
def FindDescendentPathIDs(self,
client_id,
path_type,
path_id,
max_depth=None):
precondition.ValidateClientId(client_id)
return self.delegate.FindDescendentPathIDs(
client_id, path_type, path_id, max_depth=max_depth)
def WriteUserNotification(self, notification):
precondition.AssertType(notification, rdf_objects.UserNotification)
_ValidateUsername(notification.username)
_ValidateNotificationType(notification.notification_type)
_ValidateNotificationState(notification.state)
return self.delegate.WriteUserNotification(notification)
def ReadUserNotifications(self, username, state=None, timerange=None):
_ValidateUsername(username)
if timerange is not None:
_ValidateTimeRange(timerange)
if state is not None:
_ValidateNotificationState(state)
return self.delegate.ReadUserNotifications(
username, state=state, timerange=timerange)
def ReadPathInfosHistories(
self,
client_id: Text,
path_type: rdf_objects.PathInfo.PathType,
components_list: Iterable[Sequence[Text]],
cutoff: Optional[rdfvalue.RDFDatetime] = None
) -> Dict[Sequence[Text], Sequence[rdf_objects.PathInfo]]:
precondition.ValidateClientId(client_id)
_ValidateEnumType(path_type, rdf_objects.PathInfo.PathType)
precondition.AssertType(components_list, list)
for components in components_list:
_ValidatePathComponents(components)
precondition.AssertOptionalType(cutoff, rdfvalue.RDFDatetime)
return self.delegate.ReadPathInfosHistories(
client_id=client_id,
path_type=path_type,
components_list=components_list,
cutoff=cutoff)
def ReadLatestPathInfosWithHashBlobReferences(self,
client_paths,
max_timestamp=None):
precondition.AssertIterableType(client_paths, ClientPath)
precondition.AssertOptionalType(max_timestamp, rdfvalue.RDFDatetime)
return self.delegate.ReadLatestPathInfosWithHashBlobReferences(
client_paths, max_timestamp=max_timestamp)
def UpdateUserNotifications(self, username, timestamps, state=None):
_ValidateNotificationState(state)
return self.delegate.UpdateUserNotifications(
username, timestamps, state=state)
def ReadAPIAuditEntries(
self,
username: Optional[Text] = None,
router_method_names: Optional[List[Text]] = None,
min_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> List[rdf_objects.APIAuditEntry]:
return self.delegate.ReadAPIAuditEntries(
username=username,
router_method_names=router_method_names,
min_timestamp=min_timestamp,
max_timestamp=max_timestamp)
def CountAPIAuditEntriesByUserAndDay(
self,
min_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> Dict[Tuple[Text, rdfvalue.RDFDatetime], int]:
precondition.AssertOptionalType(min_timestamp, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(max_timestamp, rdfvalue.RDFDatetime)
return self.delegate.CountAPIAuditEntriesByUserAndDay(
min_timestamp=min_timestamp, max_timestamp=max_timestamp)
def WriteAPIAuditEntry(self, entry):
precondition.AssertType(entry, rdf_objects.APIAuditEntry)
return self.delegate.WriteAPIAuditEntry(entry)
def WriteMessageHandlerRequests(self, requests):
precondition.AssertIterableType(requests, rdf_objects.MessageHandlerRequest)
for request in requests:
_ValidateMessageHandlerName(request.handler_name)
return self.delegate.WriteMessageHandlerRequests(requests)
def DeleteMessageHandlerRequests(self, requests):
return self.delegate.DeleteMessageHandlerRequests(requests)
def ReadMessageHandlerRequests(self):
return self.delegate.ReadMessageHandlerRequests()
def RegisterMessageHandler(self, handler, lease_time, limit=1000):
if handler is None:
raise ValueError("handler must be provided")
_ValidateDuration(lease_time)
return self.delegate.RegisterMessageHandler(
handler, lease_time, limit=limit)
def UnregisterMessageHandler(self, timeout=None):
return self.delegate.UnregisterMessageHandler(timeout=timeout)
def WriteCronJob(self, cronjob):
precondition.AssertType(cronjob, rdf_cronjobs.CronJob)
_ValidateCronJobId(cronjob.cron_job_id)
return self.delegate.WriteCronJob(cronjob)
def ReadCronJob(self, cronjob_id):
_ValidateCronJobId(cronjob_id)
return self.delegate.ReadCronJob(cronjob_id)
def ReadCronJobs(self, cronjob_ids=None):
if cronjob_ids is not None:
for cronjob_id in cronjob_ids:
_ValidateCronJobId(cronjob_id)
return self.delegate.ReadCronJobs(cronjob_ids=cronjob_ids)
def EnableCronJob(self, cronjob_id):
_ValidateCronJobId(cronjob_id)
return self.delegate.EnableCronJob(cronjob_id)
def DisableCronJob(self, cronjob_id):
_ValidateCronJobId(cronjob_id)
return self.delegate.DisableCronJob(cronjob_id)
def DeleteCronJob(self, cronjob_id):
_ValidateCronJobId(cronjob_id)
return self.delegate.DeleteCronJob(cronjob_id)
def UpdateCronJob(self,
cronjob_id,
last_run_status=Database.unchanged,
last_run_time=Database.unchanged,
current_run_id=Database.unchanged,
state=Database.unchanged,
forced_run_requested=Database.unchanged):
_ValidateCronJobId(cronjob_id)
if current_run_id is not None and current_run_id != Database.unchanged:
_ValidateCronJobRunId(current_run_id)
return self.delegate.UpdateCronJob(
cronjob_id,
last_run_status=last_run_status,
last_run_time=last_run_time,
current_run_id=current_run_id,
state=state,
forced_run_requested=forced_run_requested)
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None):
if cronjob_ids:
for cronjob_id in cronjob_ids:
_ValidateCronJobId(cronjob_id)
_ValidateDuration(lease_time)
return self.delegate.LeaseCronJobs(
cronjob_ids=cronjob_ids, lease_time=lease_time)
def ReturnLeasedCronJobs(self, jobs):
for job in jobs:
precondition.AssertType(job, rdf_cronjobs.CronJob)
return self.delegate.ReturnLeasedCronJobs(jobs)
def WriteCronJobRun(self, run_object):
precondition.AssertType(run_object, rdf_cronjobs.CronJobRun)
return self.delegate.WriteCronJobRun(run_object)
def ReadCronJobRun(self, job_id, run_id):
_ValidateCronJobId(job_id)
_ValidateCronJobRunId(run_id)
return self.delegate.ReadCronJobRun(job_id, run_id)
def ReadCronJobRuns(self, job_id):
_ValidateCronJobId(job_id)
return self.delegate.ReadCronJobRuns(job_id)
def DeleteOldCronJobRuns(self, cutoff_timestamp):
_ValidateTimestamp(cutoff_timestamp)
return self.delegate.DeleteOldCronJobRuns(cutoff_timestamp)
def WriteHashBlobReferences(self, references_by_hash):
for h, refs in references_by_hash.items():
_ValidateSHA256HashID(h)
precondition.AssertIterableType(refs, rdf_objects.BlobReference)
self.delegate.WriteHashBlobReferences(references_by_hash)
def ReadHashBlobReferences(self, hashes):
precondition.AssertIterableType(hashes, rdf_objects.SHA256HashID)
return self.delegate.ReadHashBlobReferences(hashes)
def WriteClientActionRequests(self, requests):
for request in requests:
precondition.AssertType(request, rdf_flows.ClientActionRequest)
return self.delegate.WriteClientActionRequests(requests)
def LeaseClientActionRequests(self, client_id, lease_time=None, limit=5000):
precondition.ValidateClientId(client_id)
_ValidateDuration(lease_time)
precondition.AssertType(limit, int)
if limit >= 10000:
raise ValueError("Limit of %d is too high.")
return self.delegate.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=limit)
def ReadAllClientActionRequests(self, client_id):
precondition.ValidateClientId(client_id)
return self.delegate.ReadAllClientActionRequests(client_id)
def DeleteClientActionRequests(self, requests):
for request in requests:
precondition.AssertType(request, rdf_flows.ClientActionRequest)
return self.delegate.DeleteClientActionRequests(requests)
def WriteFlowObject(self, flow_obj, allow_update=True):
precondition.AssertType(flow_obj, rdf_flow_objects.Flow)
precondition.AssertType(allow_update, bool)
return self.delegate.WriteFlowObject(flow_obj, allow_update=allow_update)
def ReadFlowObject(self, client_id, flow_id):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.ReadFlowObject(client_id, flow_id)
def ReadAllFlowObjects(
self,
client_id: Optional[Text] = None,
min_create_time: Optional[rdfvalue.RDFDatetime] = None,
max_create_time: Optional[rdfvalue.RDFDatetime] = None,
include_child_flows: bool = True,
) -> List[rdf_flow_objects.Flow]:
if client_id is not None:
precondition.ValidateClientId(client_id)
precondition.AssertOptionalType(min_create_time, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(max_create_time, rdfvalue.RDFDatetime)
return self.delegate.ReadAllFlowObjects(
client_id=client_id,
min_create_time=min_create_time,
max_create_time=max_create_time,
include_child_flows=include_child_flows)
def ReadChildFlowObjects(self, client_id, flow_id):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.ReadChildFlowObjects(client_id, flow_id)
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
_ValidateDuration(processing_time)
return self.delegate.LeaseFlowForProcessing(client_id, flow_id,
processing_time)
def ReleaseProcessedFlow(self, flow_obj):
precondition.AssertType(flow_obj, rdf_flow_objects.Flow)
return self.delegate.ReleaseProcessedFlow(flow_obj)
def UpdateFlow(self,
client_id,
flow_id,
flow_obj=Database.unchanged,
flow_state=Database.unchanged,
client_crash_info=Database.unchanged,
pending_termination=Database.unchanged,
processing_on=Database.unchanged,
processing_since=Database.unchanged,
processing_deadline=Database.unchanged):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
if flow_obj != Database.unchanged:
precondition.AssertType(flow_obj, rdf_flow_objects.Flow)
if flow_state != Database.unchanged:
raise ConflictingUpdateFlowArgumentsError(client_id, flow_id,
"flow_state")
if flow_state != Database.unchanged:
_ValidateEnumType(flow_state, rdf_flow_objects.Flow.FlowState)
if client_crash_info != Database.unchanged:
precondition.AssertType(client_crash_info, rdf_client.ClientCrash)
if pending_termination != Database.unchanged:
precondition.AssertType(pending_termination,
rdf_flow_objects.PendingFlowTermination)
if processing_since != Database.unchanged:
if processing_since is not None:
_ValidateTimestamp(processing_since)
if processing_deadline != Database.unchanged:
if processing_deadline is not None:
_ValidateTimestamp(processing_deadline)
return self.delegate.UpdateFlow(
client_id,
flow_id,
flow_obj=flow_obj,
flow_state=flow_state,
client_crash_info=client_crash_info,
pending_termination=pending_termination,
processing_on=processing_on,
processing_since=processing_since,
processing_deadline=processing_deadline)
def UpdateFlows(self,
client_id_flow_id_pairs,
pending_termination=Database.unchanged):
for client_id, flow_id in client_id_flow_id_pairs:
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
if pending_termination != Database.unchanged:
precondition.AssertType(pending_termination,
rdf_flow_objects.PendingFlowTermination)
return self.delegate.UpdateFlows(
client_id_flow_id_pairs, pending_termination=pending_termination)
def WriteFlowRequests(self, requests):
precondition.AssertIterableType(requests, rdf_flow_objects.FlowRequest)
return self.delegate.WriteFlowRequests(requests)
def UpdateIncrementalFlowRequests(self, client_id: str, flow_id: str,
next_response_id_updates: Dict[int, int]):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
precondition.AssertDictType(next_response_id_updates, int, int)
return self.delegate.UpdateIncrementalFlowRequests(
client_id, flow_id, next_response_id_updates)
def DeleteFlowRequests(self, requests):
precondition.AssertIterableType(requests, rdf_flow_objects.FlowRequest)
return self.delegate.DeleteFlowRequests(requests)
def WriteFlowResponses(
self, responses: Iterable[rdf_flow_objects.FlowMessage]) -> None:
precondition.AssertIterableType(responses, rdf_flow_objects.FlowMessage)
return self.delegate.WriteFlowResponses(responses)
def ReadAllFlowRequestsAndResponses(self, client_id, flow_id):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.ReadAllFlowRequestsAndResponses(client_id, flow_id)
def DeleteAllFlowRequestsAndResponses(self, client_id, flow_id):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.DeleteAllFlowRequestsAndResponses(client_id, flow_id)
def ReadFlowRequestsReadyForProcessing(self,
client_id,
flow_id,
next_needed_request=None):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
if next_needed_request is None:
raise ValueError("next_needed_request must be provided.")
return self.delegate.ReadFlowRequestsReadyForProcessing(
client_id, flow_id, next_needed_request=next_needed_request)
def WriteFlowProcessingRequests(self, requests):
precondition.AssertIterableType(requests, rdf_flows.FlowProcessingRequest)
return self.delegate.WriteFlowProcessingRequests(requests)
def ReadFlowProcessingRequests(self):
return self.delegate.ReadFlowProcessingRequests()
def AckFlowProcessingRequests(self, requests):
precondition.AssertIterableType(requests, rdf_flows.FlowProcessingRequest)
return self.delegate.AckFlowProcessingRequests(requests)
def DeleteAllFlowProcessingRequests(self):
return self.delegate.DeleteAllFlowProcessingRequests()
def RegisterFlowProcessingHandler(self, handler):
if handler is None:
raise ValueError("handler must be provided")
return self.delegate.RegisterFlowProcessingHandler(handler)
def UnregisterFlowProcessingHandler(self, timeout=None):
return self.delegate.UnregisterFlowProcessingHandler(timeout=timeout)
def WriteFlowResults(self, results):
for r in results:
precondition.AssertType(r, rdf_flow_objects.FlowResult)
precondition.ValidateClientId(r.client_id)
precondition.ValidateFlowId(r.flow_id)
if r.HasField("hunt_id") and r.hunt_id:
_ValidateHuntId(r.hunt_id)
return self.delegate.WriteFlowResults(results)
def ReadFlowResults(self,
client_id,
flow_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
precondition.AssertOptionalType(with_tag, Text)
precondition.AssertOptionalType(with_type, Text)
precondition.AssertOptionalType(with_substring, Text)
return self.delegate.ReadFlowResults(
client_id,
flow_id,
offset,
count,
with_tag=with_tag,
with_type=with_type,
with_substring=with_substring)
def CountFlowResults(
self,
client_id,
flow_id,
with_tag=None,
with_type=None,
):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
precondition.AssertOptionalType(with_tag, Text)
precondition.AssertOptionalType(with_type, Text)
return self.delegate.CountFlowResults(
client_id, flow_id, with_tag=with_tag, with_type=with_type)
def CountFlowResultsByType(
self,
client_id,
flow_id,
):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.CountFlowResultsByType(client_id, flow_id)
def CountFlowErrorsByType(self, client_id: str,
flow_id: str) -> Dict[str, int]:
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.CountFlowErrorsByType(client_id, flow_id)
def WriteFlowErrors(self, errors: Iterable[rdf_flow_objects.FlowError]):
for r in errors:
precondition.AssertType(r, rdf_flow_objects.FlowError)
precondition.ValidateClientId(r.client_id)
precondition.ValidateFlowId(r.flow_id)
if r.HasField("hunt_id") and r.hunt_id:
_ValidateHuntId(r.hunt_id)
return self.delegate.WriteFlowErrors(errors)
def ReadFlowErrors(
self,
client_id: str,
flow_id: str,
offset: int,
count: int,
with_tag: Optional[str] = None,
with_type: Optional[str] = None) -> List[rdf_flow_objects.FlowError]:
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
precondition.AssertOptionalType(with_tag, Text)
precondition.AssertOptionalType(with_type, Text)
return self.delegate.ReadFlowErrors(
client_id,
flow_id,
offset,
count,
with_tag=with_tag,
with_type=with_type)
def CountFlowErrors(self,
client_id: str,
flow_id: str,
with_tag: Optional[str] = None,
with_type: Optional[str] = None) -> int:
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
precondition.AssertOptionalType(with_tag, Text)
precondition.AssertOptionalType(with_type, Text)
return self.delegate.CountFlowErrors(
client_id, flow_id, with_tag=with_tag, with_type=with_type)
def WriteFlowLogEntries(self, entries):
for e in entries:
precondition.ValidateClientId(e.client_id)
precondition.ValidateFlowId(e.flow_id)
if e.HasField("hunt_id") and e.hunt_id:
_ValidateHuntId(e.hunt_id)
precondition.AssertIterableType(entries, rdf_flow_objects.FlowLogEntry)
return self.delegate.WriteFlowLogEntries(entries)
def ReadFlowLogEntries(self,
client_id,
flow_id,
offset,
count,
with_substring=None):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
precondition.AssertOptionalType(with_substring, Text)
return self.delegate.ReadFlowLogEntries(
client_id, flow_id, offset, count, with_substring=with_substring)
def CountFlowLogEntries(self, client_id, flow_id):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
return self.delegate.CountFlowLogEntries(client_id, flow_id)
def WriteFlowOutputPluginLogEntries(self, entries):
for e in entries:
precondition.AssertType(e, rdf_flow_objects.FlowOutputPluginLogEntry)
precondition.ValidateClientId(e.client_id)
precondition.ValidateFlowId(e.flow_id)
if e.hunt_id:
_ValidateHuntId(e.hunt_id)
return self.delegate.WriteFlowOutputPluginLogEntries(entries)
def ReadFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
offset,
count,
with_type=None):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
_ValidateOutputPluginId(output_plugin_id)
if with_type is not None:
_ValidateEnumType(with_type,
rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType)
return self.delegate.ReadFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
offset,
count,
with_type=with_type)
def CountFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
with_type=None):
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
_ValidateOutputPluginId(output_plugin_id)
return self.delegate.CountFlowOutputPluginLogEntries(
client_id, flow_id, output_plugin_id, with_type=with_type)
def ReadHuntOutputPluginLogEntries(self,
hunt_id,
output_plugin_id,
offset,
count,
with_type=None):
_ValidateHuntId(hunt_id)
_ValidateOutputPluginId(output_plugin_id)
if with_type is not None:
_ValidateEnumType(with_type,
rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType)
return self.delegate.ReadHuntOutputPluginLogEntries(
hunt_id, output_plugin_id, offset, count, with_type=with_type)
def CountHuntOutputPluginLogEntries(self,
hunt_id,
output_plugin_id,
with_type=None):
_ValidateHuntId(hunt_id)
_ValidateOutputPluginId(output_plugin_id)
if with_type is not None:
_ValidateEnumType(with_type,
rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType)
return self.delegate.CountHuntOutputPluginLogEntries(
hunt_id, output_plugin_id, with_type=with_type)
def WriteHuntObject(self, hunt_obj):
precondition.AssertType(hunt_obj, rdf_hunt_objects.Hunt)
if hunt_obj.hunt_state != rdf_hunt_objects.Hunt.HuntState.PAUSED:
raise ValueError("Creation of hunts in non-paused state is not allowed.")
self.delegate.WriteHuntObject(hunt_obj)
def UpdateHuntObject(self,
hunt_id,
duration=None,
client_rate=None,
client_limit=None,
hunt_state=None,
hunt_state_comment=None,
start_time=None,
num_clients_at_start_time=None):
"""Updates the hunt object by applying the update function."""
_ValidateHuntId(hunt_id)
precondition.AssertOptionalType(duration, rdfvalue.Duration)
precondition.AssertOptionalType(client_rate, (float, int))
precondition.AssertOptionalType(client_limit, int)
if hunt_state is not None:
_ValidateEnumType(hunt_state, rdf_hunt_objects.Hunt.HuntState)
precondition.AssertOptionalType(hunt_state_comment, str)
precondition.AssertOptionalType(start_time, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(num_clients_at_start_time, int)
return self.delegate.UpdateHuntObject(
hunt_id,
duration=duration,
client_rate=client_rate,
client_limit=client_limit,
hunt_state=hunt_state,
hunt_state_comment=hunt_state_comment,
start_time=start_time,
num_clients_at_start_time=num_clients_at_start_time)
def ReadHuntOutputPluginsStates(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.ReadHuntOutputPluginsStates(hunt_id)
def WriteHuntOutputPluginsStates(self, hunt_id, states):
if not states:
return
_ValidateHuntId(hunt_id)
precondition.AssertIterableType(states, rdf_flow_runner.OutputPluginState)
self.delegate.WriteHuntOutputPluginsStates(hunt_id, states)
def UpdateHuntOutputPluginState(self, hunt_id, state_index, update_fn):
_ValidateHuntId(hunt_id)
precondition.AssertType(state_index, int)
return self.delegate.UpdateHuntOutputPluginState(hunt_id, state_index,
update_fn)
def DeleteHuntObject(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.DeleteHuntObject(hunt_id)
def ReadHuntObject(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.ReadHuntObject(hunt_id)
def ReadHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None):
precondition.AssertOptionalType(offset, int)
precondition.AssertOptionalType(count, int)
precondition.AssertOptionalType(with_creator, Text)
precondition.AssertOptionalType(created_after, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(with_description_match, Text)
return self.delegate.ReadHuntObjects(
offset,
count,
with_creator=with_creator,
created_after=created_after,
with_description_match=with_description_match)
def ListHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None):
precondition.AssertOptionalType(offset, int)
precondition.AssertOptionalType(count, int)
precondition.AssertOptionalType(with_creator, Text)
precondition.AssertOptionalType(created_after, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(with_description_match, Text)
return self.delegate.ListHuntObjects(
offset,
count,
with_creator=with_creator,
created_after=created_after,
with_description_match=with_description_match)
def ReadHuntLogEntries(self, hunt_id, offset, count, with_substring=None):
_ValidateHuntId(hunt_id)
precondition.AssertOptionalType(with_substring, Text)
return self.delegate.ReadHuntLogEntries(
hunt_id, offset, count, with_substring=with_substring)
def CountHuntLogEntries(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.CountHuntLogEntries(hunt_id)
def ReadHuntResults(self,
hunt_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None,
with_timestamp=None):
_ValidateHuntId(hunt_id)
precondition.AssertOptionalType(with_tag, Text)
precondition.AssertOptionalType(with_type, Text)
precondition.AssertOptionalType(with_substring, Text)
precondition.AssertOptionalType(with_timestamp, rdfvalue.RDFDatetime)
return self.delegate.ReadHuntResults(
hunt_id,
offset,
count,
with_tag=with_tag,
with_type=with_type,
with_substring=with_substring,
with_timestamp=with_timestamp)
def CountHuntResults(self, hunt_id, with_tag=None, with_type=None):
_ValidateHuntId(hunt_id)
precondition.AssertOptionalType(with_tag, Text)
precondition.AssertOptionalType(with_type, Text)
return self.delegate.CountHuntResults(
hunt_id, with_tag=with_tag, with_type=with_type)
def CountHuntResultsByType(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.CountHuntResultsByType(hunt_id)
def ReadHuntFlows(self,
hunt_id,
offset,
count,
filter_condition=HuntFlowsCondition.UNSET):
_ValidateHuntId(hunt_id)
_ValidateHuntFlowCondition(filter_condition)
return self.delegate.ReadHuntFlows(
hunt_id, offset, count, filter_condition=filter_condition)
def CountHuntFlows(self, hunt_id, filter_condition=HuntFlowsCondition.UNSET):
_ValidateHuntId(hunt_id)
_ValidateHuntFlowCondition(filter_condition)
return self.delegate.CountHuntFlows(
hunt_id, filter_condition=filter_condition)
def ReadHuntCounters(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.ReadHuntCounters(hunt_id)
def ReadHuntClientResourcesStats(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.ReadHuntClientResourcesStats(hunt_id)
def ReadHuntFlowsStatesAndTimestamps(self, hunt_id):
_ValidateHuntId(hunt_id)
return self.delegate.ReadHuntFlowsStatesAndTimestamps(hunt_id)
def WriteSignedBinaryReferences(self, binary_id, references):
precondition.AssertType(binary_id, rdf_objects.SignedBinaryID)
precondition.AssertType(references, rdf_objects.BlobReferences)
if not references.items:
raise ValueError("No actual blob references provided.")
self.delegate.WriteSignedBinaryReferences(binary_id, references)
def ReadSignedBinaryReferences(self, binary_id):
precondition.AssertType(binary_id, rdf_objects.SignedBinaryID)
return self.delegate.ReadSignedBinaryReferences(binary_id)
def ReadIDsForAllSignedBinaries(self):
return self.delegate.ReadIDsForAllSignedBinaries()
def DeleteSignedBinaryReferences(
self,
binary_id: rdf_objects.SignedBinaryID,
) -> None:
precondition.AssertType(binary_id, rdf_objects.SignedBinaryID)
return self.delegate.DeleteSignedBinaryReferences(binary_id)
def WriteClientGraphSeries(self, graph_series, client_label, timestamp=None):
precondition.AssertType(graph_series, rdf_stats.ClientGraphSeries)
_ValidateLabel(client_label)
if timestamp is None:
timestamp = rdfvalue.RDFDatetime.Now()
else:
precondition.AssertType(timestamp, rdfvalue.RDFDatetime)
if (graph_series.report_type ==
rdf_stats.ClientGraphSeries.ReportType.UNKNOWN):
raise ValueError("Report-type for graph series must be set.")
self.delegate.WriteClientGraphSeries(
graph_series, client_label, timestamp=timestamp)
def ReadAllClientGraphSeries(self,
client_label,
report_type,
time_range=None):
_ValidateLabel(client_label)
if (report_type == rdf_stats.ClientGraphSeries.ReportType.UNKNOWN or
str(report_type) not in rdf_stats.ClientGraphSeries.ReportType.enum_dict
):
raise ValueError("Invalid report type given: %s" % report_type)
precondition.AssertOptionalType(time_range, time_utils.TimeRange)
return self.delegate.ReadAllClientGraphSeries(
client_label, report_type, time_range=time_range)
def ReadMostRecentClientGraphSeries(self, client_label, report_type):
_ValidateLabel(client_label)
if (report_type == rdf_stats.ClientGraphSeries.ReportType.UNKNOWN or
str(report_type) not in rdf_stats.ClientGraphSeries.ReportType.enum_dict
):
raise ValueError("Invalid report type given: %s" % report_type)
return self.delegate.ReadMostRecentClientGraphSeries(
client_label, report_type)
def WriteYaraSignatureReference(
self,
blob_id: rdf_objects.BlobID,
username: Text,
) -> None:
_ValidateBlobID(blob_id)
_ValidateUsername(username)
return self.delegate.WriteYaraSignatureReference(blob_id, username)
def VerifyYaraSignatureReference(
self,
blob_id: rdf_objects.BlobID,
) -> bool:
_ValidateBlobID(blob_id)
return self.delegate.VerifyYaraSignatureReference(blob_id)
def WriteScheduledFlow(
self, scheduled_flow: rdf_flow_objects.ScheduledFlow) -> None:
_ValidateStringId("scheduled_flow_id", scheduled_flow.scheduled_flow_id)
_ValidateUsername(scheduled_flow.creator)
precondition.ValidateClientId(scheduled_flow.client_id)
return self.delegate.WriteScheduledFlow(scheduled_flow)
def DeleteScheduledFlow(self, client_id: str, creator: str,
scheduled_flow_id: str) -> None:
precondition.ValidateClientId(client_id)
_ValidateUsername(creator)
_ValidateStringId("scheduled_flow_id", scheduled_flow_id)
return self.delegate.DeleteScheduledFlow(client_id, creator,
scheduled_flow_id)
def ListScheduledFlows(
self, client_id: str,
creator: str) -> Sequence[rdf_flow_objects.ScheduledFlow]:
precondition.ValidateClientId(client_id)
_ValidateUsername(creator)
return self.delegate.ListScheduledFlows(client_id, creator)
def _ValidateEnumType(value, expected_enum_type):
if value not in expected_enum_type.reverse_enum:
message = "Expected one of `%s` but got `%s` instead"
raise TypeError(message % (expected_enum_type.reverse_enum, value))
def _ValidateStringId(typename, value):
precondition.AssertType(value, Text)
if not value:
message = "Expected %s `%s` to be non-empty" % (typename, value)
raise ValueError(message)
def _ValidateClientIds(client_ids):
precondition.AssertIterableType(client_ids, Text)
for client_id in client_ids:
precondition.ValidateClientId(client_id)
def _ValidateOutputPluginId(output_plugin_id):
_ValidateStringId("output_plugin_id", output_plugin_id)
def _ValidateHuntId(hunt_id):
_ValidateStringId("hunt_id", hunt_id)
def _ValidateCronJobId(cron_job_id):
_ValidateStringId("cron_job_id", cron_job_id)
_ValidateStringLength("cron_job_id", cron_job_id, MAX_CRON_JOB_ID_LENGTH)
def _ValidateCronJobRunId(cron_job_run_id):
_ValidateStringId("cron_job_run_id", cron_job_run_id)
# Raises TypeError if cron_job_id is not a valid hex number.
int(cron_job_run_id, 16)
if len(cron_job_run_id) != 8:
raise ValueError("Invalid cron job run id: %s" % cron_job_run_id)
def _ValidateApprovalId(approval_id):
_ValidateStringId("approval_id", approval_id)
def _ValidateApprovalType(approval_type):
if (approval_type ==
rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_NONE):
raise ValueError("Unexpected approval type: %s" % approval_type)
def _ValidateStringLength(name, string, max_length):
if len(string) > max_length:
raise StringTooLongError(
"{} can have at most {} characters, got {}.".format(
name, max_length, len(string)))
def _ValidateUsername(username):
_ValidateStringId("username", username)
_ValidateStringLength("Usernames", username, MAX_USERNAME_LENGTH)
def _ValidateLabel(label):
_ValidateStringId("label", label)
_ValidateStringLength("Labels", label, MAX_LABEL_LENGTH)
def _ValidatePathInfo(path_info):
precondition.AssertType(path_info, rdf_objects.PathInfo)
if not path_info.path_type:
raise ValueError("Expected path_type to be set, got: %s" %
path_info.path_type)
def _ValidatePathInfos(path_infos):
"""Validates a sequence of path infos."""
precondition.AssertIterableType(path_infos, rdf_objects.PathInfo)
validated = set()
for path_info in path_infos:
_ValidatePathInfo(path_info)
path_key = (path_info.path_type, path_info.GetPathID())
if path_key in validated:
message = "Conflicting writes for path: '{path}' ({path_type})".format(
path="/".join(path_info.components), path_type=path_info.path_type)
raise ValueError(message)
if path_info.HasField("hash_entry"):
if path_info.hash_entry.sha256 is None:
message = "Path with hash entry without SHA256: {}".format(path_info)
raise ValueError(message)
validated.add(path_key)
def _ValidatePathComponents(components):
precondition.AssertIterableType(components, Text)
def _ValidateNotificationType(notification_type):
if notification_type is None:
raise ValueError("notification_type can't be None")
if notification_type == rdf_objects.UserNotification.Type.TYPE_UNSET:
raise ValueError("notification_type can't be TYPE_UNSET")
def _ValidateNotificationState(notification_state):
if notification_state is None:
raise ValueError("notification_state can't be None")
if notification_state == rdf_objects.UserNotification.State.STATE_UNSET:
raise ValueError("notification_state can't be STATE_UNSET")
def _ValidateTimeRange(timerange):
"""Parses a timerange argument and always returns non-None timerange."""
if len(timerange) != 2:
raise ValueError("Timerange should be a sequence with 2 items.")
(start, end) = timerange
precondition.AssertOptionalType(start, rdfvalue.RDFDatetime)
precondition.AssertOptionalType(end, rdfvalue.RDFDatetime)
def _ValidateClosedTimeRange(time_range):
"""Checks that a time-range has both start and end timestamps set."""
time_range_start, time_range_end = time_range
_ValidateTimestamp(time_range_start)
_ValidateTimestamp(time_range_end)
if time_range_start > time_range_end:
raise ValueError("Invalid time-range: %d > %d." %
(time_range_start.AsMicrosecondsSinceEpoch(),
time_range_end.AsMicrosecondsSinceEpoch()))
def _ValidateDuration(duration):
precondition.AssertType(duration, rdfvalue.Duration)
def _ValidateTimestamp(timestamp):
precondition.AssertType(timestamp, rdfvalue.RDFDatetime)
def _ValidateClientPathID(client_path_id):
precondition.AssertType(client_path_id, rdf_objects.ClientPathID)
def _ValidateBlobReference(blob_ref):
precondition.AssertType(blob_ref, rdf_objects.BlobReference)
def _ValidateBlobID(blob_id):
precondition.AssertType(blob_id, rdf_objects.BlobID)
def _ValidateBytes(value):
precondition.AssertType(value, bytes)
def _ValidateSHA256HashID(sha256_hash_id):
precondition.AssertType(sha256_hash_id, rdf_objects.SHA256HashID)
def _ValidateHuntFlowCondition(value):
if value < 0 or value > HuntFlowsCondition.MaxValue():
raise ValueError("Invalid hunt flow condition: %r" % value)
def _ValidateMessageHandlerName(name):
_ValidateStringLength("MessageHandler names", name,
MAX_MESSAGE_HANDLER_NAME_LENGTH)
def _ValidateClientActivityBuckets(buckets):
precondition.AssertType(buckets, (set, frozenset))
precondition.AssertIterableType(buckets, int)
if not buckets:
raise ValueError("At least one bucket must be provided.")
def _ValidateEmail(email):
_ValidateStringLength("email", email, MAX_EMAIL_LENGTH)
if email and not _EMAIL_REGEX.match(email):
raise ValueError("Invalid E-Mail address: {}".format(email))
| apache-2.0 | -7,030,527,289,619,377,000 | 33.855917 | 83 | 0.682983 | false |
rossburton/barnum | convert_data.py | 1 | 3586 | #!/usr/bin/python2.5
"""
This application converts the various text files stored in the source-data
directory into a pickled python object to be used by the random data
generator scripts
Copyright (C) 2007 Chris Moffitt
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import csv
import string
import cPickle as pickle
import random
import os
data_dir = "source-data"
simple_files_to_process = ['street-names.txt', 'street-types.txt', 'latin-words.txt',
'email-domains.txt', 'job-titles.txt', 'company-names.txt',
'company-types.txt','nounlist.txt']
def load_files():
# Process Zip Codes
all_zips = {}
reader = csv.reader(open(os.path.join(data_dir,"zip-codes.txt"), "rb"))
for row in reader:
data = [string.capwords(row[3]), row[4]]
all_zips[row[0]] = data
output = open('source-data.pkl', 'wb')
pickle.dump(all_zips, output)
#Process area codes
area_code_file = open(os.path.join(data_dir,"area-codes.txt"), "rb")
state_area_codes = {}
for line in area_code_file:
clean_line = line.replace(' ','').rstrip('\n')
state_area_codes[line.split(':')[0]] = clean_line[3:].split(',')
pickle.dump(state_area_codes, output)
area_code_file.close()
#Process Last Names
last_names = []
last_name_file = open(os.path.join(data_dir,"last-name.txt"),"rb")
for line in last_name_file:
clean_line = line.rstrip('\n')
last_names.append(string.capwords(clean_line.split(' ')[0]))
pickle.dump(last_names, output)
last_name_file.close()
#Process Male First Names
male_first_names = []
male_first_name_file = open(os.path.join(data_dir,"male-first-name.txt"),"rb")
for line in male_first_name_file:
clean_line = line.rstrip('\n')
male_first_names.append(string.capwords(clean_line.split(' ')[0]))
pickle.dump(male_first_names, output)
male_first_name_file.close()
#Process Female First Names
female_first_names = []
female_first_name_file = open(os.path.join(data_dir,"female-first-name.txt"),"rb")
for line in female_first_name_file:
clean_line = line.rstrip('\n')
female_first_names.append(string.capwords(clean_line.split(' ')[0]))
pickle.dump(female_first_names, output)
female_first_name_file.close()
#Process the simple files
for f in simple_files_to_process:
temp = []
sample_file = open(os.path.join(data_dir, f), "rb")
for line in sample_file:
clean_line = line.rstrip('\n')
temp.append(clean_line)
pickle.dump(temp, output)
sample_file.close()
temp = []
output.close()
if __name__ == "__main__":
response = string.lower(raw_input("Type 'yes' to reload the data from source files and create a new source file: "))
if response == 'yes':
load_files()
| gpl-2.0 | -4,699,246,975,335,165,000 | 34.86 | 120 | 0.648355 | false |
joekari/dxbot | dxbot.py | 1 | 22341 | #!/usr/bin/env python
#TODO--------------TODO#
#----------------------#
#Strip nicks from quotes when saving
#Create function to connect to a page and get source instead of repeating every time
#Integrate with pastebin for dumping quotes
#----------------------#
#TODO--------------TODO#
import socket, random, sys, time, string, urllib2, re, HTMLParser, urllib, socket
import dicewords
from quotes import *
from reminders import *
from time import strftime
#Connection and login information
HOST = ""
PORT = 6667
NICK = "dx_bot"
USERNAME = "dx_bot"
REALNAME = "dx_bot"
CHAN = ""
PASS = ""
#Global storage variables
read=""
lastNick = ""
lastMessage = ""
names = ""
""" Gets configuration info from a config file """
def getConfig() :
global HOST
global PORT
global NICK
global USERNAME
global REALNAME
global CHAN
global PASS
with open('config.txt') as fo:
for line in fo:
config = line.split(', ')
HOST = config[0]
PORT = int(config[1])
NICK = config[2]
USERNAME = config[3]
REALNAME = config[4]
CHAN = config[5]
PASS = config[6]
fo.close()
""" Connects to the server, identifies, then connects to channel """
def connect():
irc.connect((HOST,PORT))
time.sleep(1)
irc.send("PASS :%s\r\n" % PASS)
irc.send("NICK :%s\r\n" % NICK)
irc.send("USER :%s * * :%s\r\n" % (USERNAME, REALNAME))
irc.send("JOIN :%s\r\n" % CHAN)
""" Helper function to determine whether a value is an integer """
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
""" Used for generic printing to console. Adds timestamp. """
def log(output):
time = strftime("%H:%M:%S")
print ''.join([time," - ",output,"\r\n"])
def getUrlContents(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.0')
response = urllib2.urlopen(req)
source = response.read()
response.close()
return source
"""
Dissects a given message into useful individual parts.
Probably doesn't need to be its own class. I think it was larger originally
"""
class IrcMessage:
def __init__(self, msg):
self.prefix, self.command, self.args = self.parsemsg(msg)
def parsemsg(self, message):
trailing = 0
prefix = 0
#This will be hit by non-PING messages the bot receives
if message[0] == ":":
#This split leaves <NICK>!<NICK>@irc.tinyspeck.com and <MESSAGE TYPE> <CHANNEL> :<MESSAGE>
message = message[1:].split(' ', 1)
prefix = message[0]
message = message[1]
#This denotes an actual message portion of a string
if " :" in message:
#This split leaves <MESSAGE TYPE> <CHANNEL> and <MESSAGE>
message = message.split(" :", 1)
trailing = message[1]
message = message[0]
#Splits into a list that looks like [<MESSAGE TYPE>, <CHANNEL>]
args = message.split()
#Pops <MESSAGE TYPE> off
command = args.pop(0)
#If there is actual text, add it to the end of arguments to have format [<CHANNEL>, <MESSAGE>]
if trailing != 0:
args.append(trailing)
return prefix, command, args
""" Splits message into nick, channel, message, command, args """
class Privmsg:
def __init__(self, msg) :
try :
#Prefix looks like this: <NICK>!<NICK>@irc.tinyspeck.com <MESSAGE TYPE> <CHANNEL> <NICK> :<MESSAGE>
#So splitting on the !, gives the nick at the beginning
self.nick = msg.prefix.split("!", 1)[0]
#See IrcMessage to see how we know 0 and 1 are channel and message respectively
self.channel = msg.args[0]
self.message = msg.args[1]
#In python .split(None) will split on one or more whitespace characters
#If we split on the first whitespace and all commands are one word, then the first item should be the command
self.command, self.args = self.message.split(None, 1)
self.true = 1
except :
#This case should happen when the message is only a single string with no whitespace, so all of the message is command
self.command = msg.args[1]
self.args = None
self.true = 0
#If the current message isn't a command and isn't a PM to the bot
if not (self.command).startswith("$") and self.channel == "#general":
#Save to global tracking variables. This is necessary for the $grab function to save the prev message as a quote
global lastNick
global lastMessage
lastNick = self.nick
lastMessage = self.message
#BasicCommands will check
BasicCommands(self.nick, self.channel, self.message, self.command, self.args)
"""
Checks which (if any) commands are called and calls them
Could probably split the functions themselves to their own file to clean this up a little
"""
class BasicCommands:
def __init__ (self, nick, channel, message, command, args) :
#New class, so set everything in self for ease of use
self.nick = nick
self.channel = channel
self.message = message
self.command = command
self.args = args
if self.command == "$do" :
self.do()
elif self.command == "$mtg" :
self.mtg()
elif self.command == "$say" :
self.say()
elif self.command == "$commands" or self.command == "$help" :
self.commands()
elif self.command == "$member" :
self.memberquery()
elif self.command == "$quote" :
self.quotes()
elif self.command == "$dump" :
self.dump()
elif self.command == "$grab" :
self.grab()
elif self.command == "$remindme" :
self.remindme()
elif self.command == "/giphy" :
self.giphy()
elif self.command == "$stuff" :
dicenum = ""
output = ""
#Randomly generate 6 numbers all between 0 and 5.
for i in range(0,6):
for j in range(0,5):
dicenum = dicenum + str(random.randint(1, 6))
#The dicenum references a string in the dicewords dict
#Append the new string to whatever was there before
output = ' '.join([output, dicewords.dicewords[dicenum]])
dicenum = ""
#Send the gibberish to everyone
irc.send('PRIVMSG %s :%s\r\n' % (self.channel, output))
#If there were no commands, check to see if the message contained a link
elif self.channel == "#general" :
#Jacked this from somewhere on the internet.. it's a pretty effective regex for detecting a url in a string
pattern = re.compile('((http|https|ftp)\://|)([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)?((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.[a-zA-Z]{2,4})(\:[0-9]+)?(/[^/][a-zA-Z0-9\.\,\?\'\\/\+&%\$#\=~_\-@]*)*')
urls = re.findall(pattern, self.message, flags=0)
#If there were actually any urls found
if len(urls) != 0 :
for u in urls :
x = u[0]
y = u[3]
z = u [4]
title = ""
s = ""
try :
s = "http://" + u[4] + u[11]
title = self.grabTitle(s)
except :
s = x + u[4] + u[11]
self.grabTitle(s)
title = self.grabTitle(s)
self.shortenUrl(title, s)
""" Private messages the user who sent the command a list of other commands and the format to call them """
def commands(self) :
if self.args == None :
irc.send("PRIVMSG %s :$say <text> will make the bot print your text\r\n" % self.nick)
irc.send("PRIVMSG %s :$do <text> will make the bot perform a /me of your text\r\n" % self.nick)
irc.send("PRIVMSG %s :$skincode will message you a LoL skincode\r\n" % self.nick)
irc.send("PRIVMSG %s :$mtg <card> will link you to a MTG card\r\n" % self.nick)
irc.send("PRIVMSG %s :$stuff will print out a random string\r\n" % self.nick)
# irc.send("PRIVMSG %s :$dump will message you all stored quotes\r\n" % self.nick)
irc.send("PRIVMSG %s :$quote will print a random stored quote\r\n" % self.nick)
irc.send("PRIVMSG %s :$quote # will print how many quotes are stored\r\n" % self.nick)
irc.send("PRIVMSG %s :$quote <#> will print the numbered quote\r\n" % self.nick)
irc.send("PRIVMSG %s :$quote <nickname> will print a random stored quote from that user\r\n" % self.nick)
irc.send("PRIVMSG %s :$quote <nickname> <string> will store a new quote\r\n" % self.nick)
irc.send("PRIVMSG %s :$grab will store the last non-command message as a quote\r\n" % self.nick)
else :
log("Extra args")
""" Makes the bot do a /me of whatever is sent """
def do(self) :
if self.args == None :
irc.send("PRIVMSG %s :$do <text> is the correct format\r\n" % self.nick)
else :
irc.send("PRIVMSG %s :\x01ACTION %s\x01\r\n" % (self.channel, self.args))
""" Kind of a helper function to grab the title of the page that is passed in """
def grabTitle(self, url) :
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.0')
response = urllib2.urlopen(req)
source = response.read()
response.close()
match = re.compile('<title>(.+?)</title>').findall(source)
return match
except:
log("grab title except")
return "except"
""" Generates various links for a specfic mtg card """
def mtg(self) :
try:
#Slidshocking Krow is a dumb joke card, so we of course have a special case for it
if self.args.lower() == 'slidshocking krow' :
irc.send("PRIVMSG %s :<Slidshocking Krow> [http://is.gd/WVZvnI] (https://i.imgur.com/AWTQ0bM.jpg)\r\n" % self.channel)
else :
#Gatherer always has the same start, so we really only have to worry about the actual card name being in the right format
card_url = "http://gatherer.wizards.com/Pages/Card/Details.aspx?name=" + urllib.quote_plus(self.args)
source = getUrlContents(card_url)
#This is the element the card name is stored in.. this isn't the best way to do it. But it checks that element, grabs copy and if it isn't a bad match the card must exist
match = re.compile('<span id="ctl00_ctl00_ctl00_MainContent_SubContent_SubContentHeader_subtitleDisplay"(?: style="font-size:.+?")?>(.+?)</span>').findall(source)
if match != []:
#If you called $mtg with no arguments it shouldn't actually get this far, but hey.. better safe than sorry
if self.args == None :
irc.send("PRIVMSG &s :Provide a card name\r\n", self.nick)
else :
try :
#Set title so we can pass it to the shortener later
title = self.args
#Probably don't need temp2 to be unique, but oh well
temp2 = self.args
#This site is the one that has the spoiler image
card_url2 = "http://magiccards.info/query?q=" + urllib.quote_plus(temp2)
source = getUrlContents(card_url2)
#Gets the link to all of the images that are on the page
match2 = re.compile('<img src="(.+?).jpg').findall(source)
if match != [] and self.args != None:
#Save the link to the first image, because it should match our card the best (hopefully)
image_url = match2[0]+".jpg"
self.shortenUrl(title, card_url, image_url)
except :
log("Unable to generate card URLs")
else:
irc.send("PRIVMSG %s :Invalid card name, check your spelling\r\n" % (self.channel))
except:
log("mtg except")
return "except"
""" The bot will say whatever is sent after $say. Also works through private message, because why not? """
def say(self) :
if self.args == None :
irc.send("PRIVMSG %s :$say <text> is the correct format\r\n" % self.nick)
else :
irc.send("PRIVMSG %s :%s\r\n" % (CHAN, self.args))
""" Prints a shortened url and the title. If coming from $mtg, will also include some additional links """
def shortenUrl(self, match, url, image=None) :
try:
giphy = re.compile("http://giphy");
if giphy.match(url) == None :
url = url.replace("https://", '').replace("http://", '').replace("ftp://", '')
hostnme = url.split('/')[0]
rip = socket.gethostbyname(hostnme)
encode = urllib.quote_plus(url)
temp = string.replace(encode, '+', '%2B')
#is.gd is the url shortening site we use since it prints out the shortened url in an easy to grab way
url = "http://is.gd/create.php?url=" + temp
source = getUrlContents(url)
#This is where the shortened url is actually contained in the page
value = re.compile('id="short_url" value="(.+?)"').findall(source)
html_parser = HTMLParser.HTMLParser()
if match == self.args :
for new_url in value :
#If there is an image, that means the function was called from mtg
if image != None :
#This tries to find the first page when searching tcgplayer for the card name and returns the correct card about 95% of the time
try :
url = "www.google.com/search?q=site:http://tcgplayer.com+" + html_parser.unescape(match.replace("·","|").lower().title()).replace(" ", "+")
except :
url = "www.google.com/search?q=site:http://tcgplayer.com+" + match.replace(" ", "+")
src = getUrlContents(url)
#This is where the link to the tcgplayer page for any returned cards are
link = re.compile('<h3 class="r"><a href="\/url\?q=(.+?)&.+"').findall(src)
try :
#Tries to convert the title to a better format
irc.send("PRIVMSG %s :<%s> [%s] (%s) {%s}\r\n" % (self.channel, html_parser.unescape(match.replace("·","|").lower().title()), new_url, image, link[0]))
except :
#If that doesn't work, just use whatever is there
irc.send("PRIVMSG %s :<%s> [%s] (%s) {%s}\r\n" % (self.channel, match, new_url, image, link[0]))
else :
#For everything else just send the title and the shortened url, convert if possible
try :
irc.send("PRIVMSG %s :<%s> [%s]\r\n" % (self.channel, html_parser.unescape(match.replace("·","|").lower().title()), new_url))
except :
irc.send("PRIVMSG %s :<%s> [%s]\r\n" % (self.channel, match, new_url))
else :
#If there isn't a title we don't want a blank bar, so throw a default in
if match == [] :
match = ['Title not found']
#There were a few edge cases where multiple things would be found.. the first title is always right
match = [match[0]]
#This should only happen once, but just in case
for title in match :
for new_url in value :
try :
#Tries to convert the title to a better format
if image != None :
irc.send("PRIVMSG %s :<%s> [%s] (%s) {%s}\r\n" % (self.channel, html_parser.unescape(title.replace("·","|").lower.title()), new_url, image))
else :
irc.send("PRIVMSG %s :<%s> [%s]\r\n" % (self.channel, html_parser.unescape(title.replace("·","|")), new_url))
except :
#Or just ust what is there
if image != None :
irc.send("PRIVMSG %s :<%s> [%s] (%s) {%s}\r\n" % (self.channel, title, new_url, image))
else :
irc.send("PRIVMSG %s :<%s> [%s]\r\n" % (self.channel, title, new_url))
except:
log("shortenURL except")
""" Handles adding specific quotes, or printing specific quotes """
def quotes(self) :
quotesLength = len(quotesList)
#If there are arguments we need to split (mostly relevant for adding quote)
if self.args != None :
split = (self.args).split()
#If there are no args defualt behavior is to grab a random quote
if self.args == None :
#Only relevant when there have not been any quotes saved yet
if quotesLength != 0:
quotesIndex = random.randint(0, (quotesLength - 1))
irc.send("PRIVMSG %s :%s - \"%s\"\r\n" % (self.channel, quotesList[quotesIndex][0].title(), quotesList[quotesIndex][1]))
#$quote # is a request for the number of quotes that exist
elif self.args == "#":
irc.send("PRIVMSG %s :There are %s quotes saved.\r\n" % (self.channel, quotesLength))
#$quote <NUMBER> is a request for a specific quote based on it's number
elif isInt(self.args) :
try:
irc.send("PRIVMSG %s :%s - \"%s\"\r\n" % (self.channel, quotesList[int(self.args) - 1][0].title(), quotesList[int(self.args) - 1][1]))
except:
irc.send("PRIVMSG %s :There aren't that many quotes\r\n" % self.channel)
#If there is an argument that isn't # or a number, then it should be a request for a quote from a specific user
elif len(split) == 1 :
noQuotesFoundForUser = True
for person in userQuotes:
if (person[0]).lower() == (self.args).lower():
#The person list has a sublist that is [<NAME>, [<QUOTE>, <QUOTE>, ...]]
irc.send("PRIVMSG %s :%s - \"%s\"\r\n" % (self.channel, person[0].title(), person[1][random.randint(0, (len(person[1]) - 1))]))
noQuotesFoundForUser = False
if noQuotesFoundForUser:
irc.send("PRIVMSG %s :There are no quotes from that user\r\n" % self.channel)
#If there is more than one argument, then it should be a user adding a quote
elif len(split) > 1 :
global names
isUserValid = False
#Make sure the user exists for real
for name in names:
if split[0].lower() == name.lower():
isUserValid = True
#If it is, the first string will be the name, the rest will be the quote itself
if isUserValid:
addQuote(split[0], split[1:])
irc.send("PRIVMSG %s :Quote added\r\n" % self.channel)
else:
irc.send("PRIVMSG %s :Jordan is a dummy. Also, not a valid user.\r\n" % self.channel)
""" Handles printing multiple quotes """
def dump(self) :
#Get length of the list containing quotes, to make it easier to loop
quotesLength = len(quotesList)
#If there are no arguments, all quotes are dumped
if self.args == None:
################################
# Dump is temporarily disabled while I integrate pastebin
################################
# for i in range(0, quotesLength):
# irc.send("PRIVMSG %s : %s - \"%s\"\r\n" % (self.nick, quotesList[i][0].title(), quotesList[i][1]))
#The only other option is to print for a user, so do that
else:
for i in range(0, quotesLength):
if quotesList[i][0].lower() == self.args.lower():
irc.send("PRIVMSG %s : #%s %s - \"%s\"\r\n" % (self.nick, str(i + 1), quotesList[i][0].title(), quotesList[i][1]))
sleep(0.5)
""" Saves previous non-command message as a quote """
def grab(self) :
global names
doesUserExist = False
#Loop through the list of names and make sure the previous nick is a valid user
for name in names:
if lastNick.lower() == name.lower():
doesUserExist = True
#If the user was found, add the quote
if doesUserExist:
addQuote(lastNick.lower(), [lastMessage])
irc.send("PRIVMSG %s :Quote added\r\n" % self.channel)
else:
irc.send("PRIVMSG %s :That is not a valid user.\r\n" % self.channel)
def remindme(self) :
if self.args == None :
irc.send("PRIVMSG %s :Invalid format.\r\n" % self.channel)
else :
split = self.args.split(' ')
valid = True
if len(split) >= 3 :
date = split[0]
time = split[1]
message = ' '.join(split[2:]).lstrip()
if date == 'tomorrow' :
month = strftime('%m')
day = int(strftime('%d')) + 1
year = strftime('%Y')
monthMax = monthrange(int(year), int(month))
if day == monthMax :
day = 1
if month == 12 :
month = 1
date = str(month) + '/' + str(day)
elif '/' not in date or re.match('^\d{2}\/\d{2}\/\d{2}$', date, flags=re.IGNORECASE) == None :
valid = False
irc.send("PRIVMSG %s :Date format is incorrect. It should be mm/dd/yy.\r\n" % self.channel)
if ':' not in time or re.match('\d\d:\d\d', time) == None :
valid = False
irc.send("PRIVMSG %s :Time format is incorrect. It should be hh:mm in 24-hour format.\r\n" % self.channel)
if valid :
dateSplit = date.split('/')
timeSplit = time.split(':')
if int(dateSplit[0]) > 12 or int(dateSplit[0]) < 1 or int(dateSplit[1]) > 31 or int(dateSplit[1]) < 1 or int(dateSplit[2]) < 1 :
valid = False
irc.send("PRIVMSG %s :Invalid date.\r\n" % self.channel)
if int(timeSplit[0]) > 24 or int(timeSplit[0]) < 0 or int(timeSplit[1]) > 59 or int(timeSplit[0]) < 0 :
valid = False
irc.send("PRIVMSG %s :Invalid time.\r\n" % self.channel)
if valid :
addReminder(self.nick, date, time, message)
irc.send("PRIVMSG %s :Reminder added.\r\n" % self.channel)
else :
irc.send("PRIVMSG %s :Not enough arguments.\r\n" % self.channel)
isBotTryingToJoin = True
counter = 0
irc = socket.socket()
connect()
#requests a list of the names/nicks from the channel
irc.send("NAMES %s" % (CHAN,))
while True:
try:
read = read + irc.recv(1024)
except socket.timeout, e:
#pretty sure this block doesn't work even a little bit
err = e.args[0]
if err == 'timed out':
sleep(1)
counter = counter + 1
if counter > 100:
connect()
continue
else:
temp = read.split("\r\n")
#I can't figure out why this is here but it doesn't work w/o it
#I'll worry about it later
read = temp.pop()
for msg in temp:
log(msg)
#Split message apart into useful components
msg = IrcMessage(msg)
#PING messages from the server require a PONG response or else the user will be timed out after a while
if msg.command == "PING" :
irc.send("PONG %s\r\n" % (msg.args[0],))
reminders = checkReminders()
if reminders != False :
for reminder in reminders :
irc.send("PRIVMSG %s :@%s %s\r\n" % (CHAN, reminder[1], reminder[4]))
removeReminder(reminder[0])
#This is for the pretty useless reconnect
counter = 0
#PRIVMSG are general messages to the channel and therefore we should check them for commands
elif msg.command == "PRIVMSG" :
Privmsg(msg)
#JOIN triggers when the bot joins the channel..
#Doesn't work for other users because slack doesn't fully disconnect users, just sets as away or something
elif msg.command == "JOIN" :
#Apparently these 2 lines make the 3rd work, but I don't have the slightest idea why
msg.nick = msg.prefix.split("!", 1)[0]
irc.send("PRIVMSG %s :Type $commands to see the bot's commands\r\n" % msg.nick)
#irc.send("PRIVMSG %s :%s\r\n" % ("crypt", "Booting."))
#This joins the channel, but I think is redundant since connect() exists
elif isBotTryingToJoin and msg.command == "396" and msg.args[1] == "user/crypt/bot/dxbot" :
irc.send("JOIN :%s\r\n" % (CHAN,))
isBotTryingToJoin = False
#353 is the response from the NAMES request from earlier.. this saves it to a variable we can use for quote stuff
elif msg.command == "353":
names = msg.args[3].rstrip().split() | mit | -1,552,409,198,266,731,000 | 40.145488 | 464 | 0.63614 | false |
mflu/openvstorage_centos | webapps/api/backend/decorators.py | 1 | 14128 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains various decorator
"""
import math
import re
import inspect
import time
from ovs.dal.lists.userlist import UserList
from rest_framework.response import Response
from toolbox import Toolbox
from rest_framework.exceptions import PermissionDenied, NotAuthenticated, NotAcceptable, Throttled
from rest_framework import status
from django.http import Http404
from django.conf import settings
from ovs.dal.exceptions import ObjectNotFoundException
from backend.serializers.serializers import FullSerializer
from ovs.log.logHandler import LogHandler
from ovs.extensions.storage.volatilefactory import VolatileFactory
from ovs.extensions.generic.volatilemutex import VolatileMutex
from ovs.dal.hybrids.log import Log
logger = LogHandler('api')
regex = re.compile('^(.*; )?version=(?P<version>([0-9]+|\*)?)(;.*)?$')
def required_roles(roles):
"""
Role validation decorator
"""
def wrap(f):
"""
Wrapper function
"""
def new_function(*args, **kw):
"""
Wrapped function
"""
request = args[1]
if not hasattr(request, 'user') or not hasattr(request, 'client'):
raise NotAuthenticated()
user = UserList.get_user_by_username(request.user.username)
if user is None:
raise NotAuthenticated()
if not Toolbox.is_token_in_roles(request.token, roles):
raise PermissionDenied('This call requires roles: %s' % (', '.join(roles)))
return f(*args, **kw)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
def load(object_type=None, min_version=settings.VERSION[0], max_version=settings.VERSION[-1]):
"""
Parameter discovery decorator
"""
def wrap(f):
"""
Wrapper function
"""
def _try_parse(value):
"""
Tries to parse a value to a pythonic value
"""
if value == 'true':
return True
if value == 'false':
return False
return value
def new_function(self, request, **kwargs):
"""
Wrapped function
"""
new_kwargs = {}
# Find out the arguments of the decorated function
function_info = inspect.getargspec(f)
if function_info.defaults is None:
mandatory_vars = function_info.args[1:]
optional_vars = []
else:
mandatory_vars = function_info.args[1:-len(function_info.defaults)]
optional_vars = function_info.args[len(mandatory_vars) + 1:]
# Check versioning
version = regex.match(request.META['HTTP_ACCEPT']).groupdict()['version']
versions = (max(min_version, settings.VERSION[0]), min(max_version, settings.VERSION[-1]))
if version == '*': # If accepting all versions, it defaults to the highest one
version = versions[1]
version = int(version)
if version < versions[0] or version > versions[1]:
raise NotAcceptable('API version requirements: {0} <= <version> <= {1}. Got {2}'.format(versions[0], versions[1], version))
if 'version' in mandatory_vars:
new_kwargs['version'] = version
mandatory_vars.remove('version')
# Fill request parameter, if available
if 'request' in mandatory_vars:
new_kwargs['request'] = request
mandatory_vars.remove('request')
# Fill main object, if required
if 'pk' in kwargs and object_type is not None:
typename = object_type.__name__.lower()
try:
instance = object_type(kwargs['pk'])
if typename in mandatory_vars:
new_kwargs[typename] = instance
mandatory_vars.remove(typename)
except ObjectNotFoundException:
raise Http404()
# Fill mandatory parameters
for name in mandatory_vars:
if name in kwargs:
new_kwargs[name] = kwargs[name]
else:
if name not in request.DATA:
if name not in request.QUERY_PARAMS:
raise NotAcceptable('Invalid data passed: {0} is missing'.format(name))
new_kwargs[name] = _try_parse(request.QUERY_PARAMS[name])
else:
new_kwargs[name] = _try_parse(request.DATA[name])
# Try to fill optional parameters
for name in optional_vars:
if name in kwargs:
new_kwargs[name] = kwargs[name]
else:
if name in request.DATA:
new_kwargs[name] = _try_parse(request.DATA[name])
elif name in request.QUERY_PARAMS:
new_kwargs[name] = _try_parse(request.QUERY_PARAMS[name])
# Call the function
return f(self, **new_kwargs)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
def return_list(object_type, default_sort=None):
"""
List decorator
"""
def wrap(f):
"""
Wrapper function
"""
def new_function(self, request, *args, **kwargs):
"""
Wrapped function
"""
_ = self
# 1. Pre-loading request data
sort = request.QUERY_PARAMS.get('sort')
if sort is None and default_sort is not None:
sort = default_sort
sort = None if sort is None else [s for s in reversed(sort.split(','))]
page = request.QUERY_PARAMS.get('page')
page = int(page) if page is not None and page.isdigit() else None
contents = request.QUERY_PARAMS.get('contents')
contents = None if contents is None else contents.split(',')
# 2. Construct hints for decorated function (so it can provide full objects if required)
if 'hints' not in kwargs:
kwargs['hints'] = {}
kwargs['hints']['full'] = sort is not None or contents is not None
# 3. Fetch data
data_list = f(self, request=request, *args, **kwargs)
guid_list = isinstance(data_list, list) and len(data_list) > 0 and isinstance(data_list[0], basestring)
# 4. Sorting
if sort is not None:
if guid_list is True:
data_list = [object_type(guid) for guid in data_list]
guid_list = False # The list is converted to objects
for sort_item in sort:
desc = sort_item[0] == '-'
field = sort_item[1 if desc else 0:]
data_list.sort(key=lambda e: Toolbox.extract_key(e, field), reverse=desc)
# 5. Paging
items_pp = 10
total_items = len(data_list)
page_metadata = {'total_items': total_items,
'current_page': 1,
'max_page': 1,
'start_number': min(1, total_items),
'end_number': total_items}
if page is not None:
max_page = int(math.ceil(total_items / (items_pp * 1.0)))
if page > max_page:
page = max_page
if page == 0:
start_number = -1
end_number = 0
else:
start_number = (page - 1) * items_pp # Index - e.g. 0 for page 1, 10 for page 2
end_number = start_number + items_pp # Index - e.g. 10 for page 1, 20 for page 2
data_list = data_list[start_number: end_number]
page_metadata = dict(page_metadata.items() + {'current_page': max(1, page),
'max_page': max(1, max_page),
'start_number': start_number + 1,
'end_number': min(total_items, end_number)}.items())
# 6. Serializing
if contents is not None:
if guid_list is True:
data_list = [object_type(guid) for guid in data_list]
data = FullSerializer(object_type, contents=contents, instance=data_list, many=True).data
else:
if guid_list is False:
data_list = [item.guid for item in data_list]
data = data_list
result = {'data': data,
'_paging': page_metadata,
'_contents': contents,
'_sorting': [s for s in reversed(sort)] if sort else sort}
# 7. Building response
return Response(result, status=status.HTTP_200_OK)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
def return_object(object_type):
"""
Object decorator
"""
def wrap(f):
"""
Wrapper function
"""
def new_function(self, request, *args, **kwargs):
"""
Wrapped function
"""
_ = self
# 1. Pre-loading request data
contents = request.QUERY_PARAMS.get('contents')
contents = None if contents is None else contents.split(',')
# 5. Serializing
obj = f(self, request, *args, **kwargs)
return Response(FullSerializer(object_type, contents=contents, instance=obj).data, status=status.HTTP_200_OK)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
def return_task():
"""
Object decorator
"""
def wrap(f):
"""
Wrapper function
"""
def new_function(self, *args, **kwargs):
"""
Wrapped function
"""
_ = self
task = f(self, *args, **kwargs)
return Response(task.id, status=status.HTTP_200_OK)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
def limit(amount, per, timeout):
"""
Rate-limits the decorated call
"""
def wrap(f):
"""
Wrapper function
"""
def new_function(self, request, *args, **kwargs):
"""
Wrapped function
"""
now = time.time()
key = 'ovs_api_limit_{0}.{1}_{2}'.format(
f.__module__, f.__name__,
request.META['HTTP_X_REAL_IP']
)
client = VolatileFactory.get_client()
mutex = VolatileMutex(key)
try:
mutex.acquire()
rate_info = client.get(key, {'calls': [],
'timeout': None})
active_timeout = rate_info['timeout']
if active_timeout is not None:
if active_timeout > now:
raise Throttled(wait=active_timeout - now)
else:
rate_info['timeout'] = None
rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
calls = len(rate_info['calls'])
if calls > amount:
rate_info['timeout'] = now + timeout
client.set(key, rate_info)
raise Throttled(wait=timeout)
client.set(key, rate_info)
finally:
mutex.release()
return f(self, request, *args, **kwargs)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
def log():
"""
Task logger
"""
def wrap(f):
"""
Wrapper function
"""
def new_function(self, request, *args, **kwargs):
"""
Wrapped function
"""
# Log the call
log_entry = Log()
log_entry.source = 'API'
log_entry.module = f.__module__
log_entry.method = f.__name__
log_entry.method_args = list(args)
log_entry.method_kwargs = kwargs
log_entry.time = time.time()
log_entry.user = getattr(request, 'client').user if hasattr(request, 'client') else None
log_entry.metadata = {'meta': dict((str(key), str(value)) for key, value in request.META.iteritems()),
'request': dict((str(key), str(value)) for key, value in request.REQUEST.iteritems()),
'cookies': dict((str(key), str(value)) for key, value in request.COOKIES.iteritems())}
log_entry.save()
# Call the function
return f(self, request, *args, **kwargs)
new_function.__name__ = f.__name__
new_function.__module__ = f.__module__
return new_function
return wrap
| apache-2.0 | -4,991,424,155,239,962,000 | 36.474801 | 139 | 0.517058 | false |
gjbex/vsc-tools-lib | tests/test/pbsnodes_test.py | 1 | 3222 | #!/usr/bin/env python
'''module to test the vsc.pbs.pbsnodes.Pbsnodes parser'''
import io, sys, unittest
from vsc.pbs.pbsnodes import PbsnodesParser
class PbsnodesParserTest(unittest.TestCase):
'''Tests for the pbsnodes output parser'''
def test_parsing_with_messages(self):
file_name = 'tests/test/data/pbsnodes_message.txt'
nr_nodes = 1
warning_start = '### warning: message ERROR'
warning_end = 'cleaned up on node r5i0n6\n'
hostname = 'r5i0n6'
loadave = '0.10'
netload = '250792032533'
state = 'free'
nr_gpus = 0
parser = PbsnodesParser(is_verbose=True)
with open(file_name, 'r') as pbsnodes_file:
os_stderr = sys.stderr
sys.stderr = io.StringIO()
node_infos = parser.parse_file(pbsnodes_file)
warning_msg = sys.stderr.getvalue()
sys.stderr = os_stderr
self.assertEqual(nr_nodes, len(node_infos))
self.assertEqual(warning_start,
warning_msg[:len(warning_start)])
self.assertEqual(warning_end,
warning_msg[-len(warning_end):])
node_info = node_infos[0]
self.assertEqual(hostname, node_info.hostname)
self.assertEqual(loadave, node_info.status['loadave'])
self.assertEqual(netload, node_info.status['netload'])
self.assertEqual(netload, node_info.status['netload'])
self.assertEqual(state, node_info.state)
self.assertEqual(nr_gpus, node_info.gpus)
self.assertEqual(0, len(node_info.gpu_states))
def test_parsing(self):
file_name = 'tests/test/data/pbsnodes.txt'
nr_nodes = 173
np = 20
rack_str = 'r'
parser = PbsnodesParser()
with open(file_name, 'r') as pbsnodes_file:
node_infos = parser.parse_file(pbsnodes_file)
self.assertEqual(nr_nodes, len(node_infos))
for node_info in node_infos:
self.assertTrue(np <= node_info.np)
self.assertTrue(node_info.hostname.startswith(rack_str))
def test_parsing_gpu_node(self):
file_name = 'tests/test/data/pbsnodes_gpu.txt'
nr_nodes = 1
np = 36
hostname = 'r22g35'
memory = 192494548*1024
cpuload = 3.02/36
nr_jobs = 3
nr_gpus = 4
gpu_states = ['Exclusive', 'Exclusive', 'Exclusive', 'Unallocated',]
parser = PbsnodesParser()
with open(file_name, 'r') as pbsnodes_file:
node_infos = parser.parse_file(pbsnodes_file)
self.assertEqual(nr_nodes, len(node_infos))
node_info = node_infos[0]
self.assertEqual(np, node_info.np)
self.assertEqual(node_info.hostname, hostname)
self.assertEqual(node_info.memory, memory)
self.assertEqual(node_info.cpuload, cpuload)
self.assertEqual(len(node_info.jobs), nr_jobs)
self.assertEqual(4, len(node_info.gpu_status))
self.assertEqual('38%', node_info.gpu_status[3]['gpu_utilization'])
self.assertEqual('0%', node_info.gpu_status[0]['gpu_utilization'])
self.assertEqual(nr_gpus, node_info.gpus)
self.assertEqual(gpu_states, node_info.gpu_states)
| lgpl-3.0 | 1,442,642,939,536,923,100 | 40.307692 | 76 | 0.612042 | false |
tensorflow/graphics | tensorflow_graphics/geometry/transformation/look_at.py | 1 | 3703 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements OpenGL lookAt functionalities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.math import vector
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
def right_handed(camera_position: type_alias.TensorLike,
look_at, up_vector: type_alias.TensorLike,
name: str = "right_handed") -> tf.Tensor:
"""Builds a right handed look at view matrix.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last
dimension represents the 3D position of the camera.
look_at: A tensor of shape `[A1, ..., An, 3]`, with the last dimension
storing the position where the camera is looking at.
up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
defines the up vector of the camera.
name: A name for this op. Defaults to 'right_handed'.
Raises:
ValueError: if the all the inputs are not of the same shape, or if any input
of of an unsupported shape.
Returns:
A tensor of shape `[A1, ..., An, 4, 4]`, containing right handed look at
matrices.
"""
with tf.name_scope(name):
camera_position = tf.convert_to_tensor(value=camera_position)
look_at = tf.convert_to_tensor(value=look_at)
up_vector = tf.convert_to_tensor(value=up_vector)
shape.check_static(
tensor=camera_position,
tensor_name="camera_position",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=look_at, tensor_name="look_at", has_dim_equals=(-1, 3))
shape.check_static(
tensor=up_vector, tensor_name="up_vector", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(camera_position, look_at, up_vector),
last_axes=-2,
tensor_names=("camera_position", "look_at", "up_vector"),
broadcast_compatible=False)
z_axis = tf.linalg.l2_normalize(look_at - camera_position, axis=-1)
horizontal_axis = tf.linalg.l2_normalize(
vector.cross(z_axis, up_vector), axis=-1)
vertical_axis = vector.cross(horizontal_axis, z_axis)
batch_shape = tf.shape(input=horizontal_axis)[:-1]
zeros = tf.zeros(
shape=tf.concat((batch_shape, (3,)), axis=-1),
dtype=horizontal_axis.dtype)
one = tf.ones(
shape=tf.concat((batch_shape, (1,)), axis=-1),
dtype=horizontal_axis.dtype)
matrix = tf.concat(
(horizontal_axis, -vector.dot(horizontal_axis, camera_position),
vertical_axis, -vector.dot(vertical_axis, camera_position), -z_axis,
vector.dot(z_axis, camera_position), zeros, one),
axis=-1)
matrix_shape = tf.shape(input=matrix)
output_shape = tf.concat((matrix_shape[:-1], (4, 4)), axis=-1)
return tf.reshape(matrix, shape=output_shape)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| apache-2.0 | -4,471,860,227,454,976,500 | 37.572917 | 80 | 0.678639 | false |
BrainTech/openbci | obci/acquisition/tests/test_manual_tags_sending.py | 1 | 1547 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# OpenBCI - framework for Brain-Computer Interfaces based on EEG signal
# Project was initiated by Magdalena Michalska and Krzysztof Kulewski
# as part of their MSc theses at the University of Warsaw.
# Copyright (C) 2008-2009 Krzysztof Kulewski and Magdalena Michalska
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author:
# Mateusz Kruszyński <[email protected]>
from tags import tagger
import time
TAGGER = tagger.get_tagger()
import random
def run():
while True:
i = raw_input()
print("LEN i: "+str(len(i)))
if len(i) == 0:
i = "XYZ"
i = i.strip()
t= time.time()
print("SEND TAG name"+i+" with time: "+repr(t))
TAGGER.send_tag(t, t+3.0*random.random()+1.0, i, {'a':1, 'b':2.2, 'c':'napis_jakis', 'annotation':"ANNOTACJA JAKAS"})
if __name__ == "__main__":
print("Type anything to send tag!!!")
run()
| gpl-3.0 | 2,332,366,705,002,808,000 | 34.136364 | 125 | 0.677878 | false |
3dfxsoftware/cbss-addons | mrp_workorder_lot/wizard/__init__.py | 1 | 1407 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
############# Credits #########################################################
# Coded by: Katherine Zaoral <[email protected]>
# Planified by: Katherine Zaoral <[email protected]>
# Audited by: Humberto Arocha <[email protected]>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
import mrp_consume_produce
| gpl-2.0 | -6,196,814,492,123,642,000 | 55.28 | 79 | 0.55295 | false |
muLAn-project/muLAn | muLAn/plottypes/datalign.py | 1 | 19958 | # -*-coding:Utf-8 -*
# ----------------------------------------------------------------------
# Align data without model
# ----------------------------------------------------------------------
# External libraries
# ----------------------------------------------------------------------
import sys
import os
# Full path of this file
full_path_here = os.path.realpath(__file__)
text = full_path_here.split('/')
a = ''
i = 0
while i < len(text)-1:
a = a + text[i] + '/'
i = i + 1
full_path = a
#filename = full_path + '../' + '.pythonexternallibpath'
#file = open(filename, 'r')
#for line in file:
# path_lib_ext=line
#file.close()
#if path_lib_ext != 'None':
# sys.path.insert(0, path_lib_ext[:-1])
# ----------------------------------------------------------------------
# Packages
# ----------------------------------------------------------------------
import os
import sys
import glob
import numpy as np
import pandas as pd
from scipy import stats
import ConfigParser as cp
import bokeh.layouts as blyt
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import bokeh.plotting as bplt
from bokeh.models import HoverTool, TapTool, ColumnDataSource, OpenURL
from bokeh.models.widgets import DateFormatter, NumberFormatter, DataTable, TableColumn
import bokeh.io as io
#
# ======================================================================
# To align the data without model.
# ======================================================================
def help():
text = "Non parametric alignement of the data."
return text
def communicate(cfg, verbose, text):
if cfg.getint('Modelling', 'Verbose') >= verbose:
print text
# ----------------------------------------------------------------------
def plot(cfgsetup=False, models=False, model_param=False, time_serie=False,\
obs_properties=False, options=False, interpol_method=False):
"""
To align the data without model.
"""
# ----------------------------------------------------------------------
# Arnaud's method
# ----------------------------------------------------------------------
def align(photometryData, tel_ref, obs_name, cfg):
cond = (photometryData['reject'] == 0) &\
(photometryData['obs'] == tel_ref)
ref_date = photometryData[cond]['dates'].values
ref_mag = photometryData[cond]['magnitude'].values
ref_flux = np.power(10,-ref_mag/2.5)
# Read lc to calibrate
cond = (photometryData['reject'] == 0) &\
(photometryData['obs'] == obs_name)
obs_date = photometryData[cond]['dates'].values
obs_mag = photometryData[cond]['magnitude'].values
obs_flux = np.power(10,-obs_mag/2.5)
# Plot LC
# fig1 = plt.figure(1)
# plt.subplot(211)
# plt.plot(ref_date,ref_flux,'+r')
# plt.subplot(212)
# plt.plot(obs_date,obs_flux,'+g')
# plt.savefig("LC.pdf")
# plt.close(1)
## Interpolation inter-lc
datmin = np.max([np.min(ref_date), np.min(obs_date)])
datmax = np.min([np.max(ref_date), np.max(obs_date)])
# ref_ind = np.where((ref_date >= datmin) & (ref_date <= datmax))
obs_ind = np.where((obs_date >= datmin) & (obs_date <= datmax))
# ref_fselec = ref_flux[ref_ind]
obs_fselec = obs_flux[obs_ind]
##
# t_com = np.concatenate([ref_date[ref_ind],obs_date[obs_ind]])
t_com = obs_date[obs_ind]
##
ref_int = interp1d(ref_date,ref_flux)
ref_fint = ref_int(t_com)
# obs_int = interp1d(obs_date,obs_flux)
# obs_fint = obs_int(t_com)
if len(t_com) < 3:
text = " %3d data: WARNING no data plotted" % len(t_com)
communicate(cfg, 1, text)
return [ref_date,ref_flux,obs_date,np.zeros_like(ref_fint)]
else:
text = " %3d data" % len(t_com)
communicate(cfg, 1, text)
## Plot LC interpol
# fig1 = plt.figure(3)
# plt.subplot(211)
# plt.title('Reference interpolee')
# plt.plot(t_com,ref_fint,'+r')
# plt.subplot(212)
# plt.title('Observatoire interpole')
# plt.plot(t_com,obs_fint,'+g')
# plt.savefig("LCinterp.pdf")
# plt.close(3)
## Calcul des regressions
fig2 = plt.figure(2)
xreg = np.linspace(np.amin(ref_fint),np.amax(ref_fint),2)
##
## Methode regression lineaire (event. avec sigma-clipping)
##
# a1, b1, r_value, p_value, std_err = stats.linregress(ref_fint,obs_fint) # reglin 1 fois
a1, b1, r_value, p_value, std_err = stats.linregress(ref_fint,obs_fselec) # reglin 1 fois
## Visualisation resultat
plt.subplot(211)
plt.title('Least square')
plt.plot(xreg,a1*xreg+b1,'-k')
# plt.plot(ref_fint,obs_fint,'or')
plt.plot(ref_fint,obs_fselec,'or')
##
## Methode RLM
##
rlmx = sm.add_constant(ref_fint)
# rlmy = obs_fint
rlmy = obs_fselec
rlm_model = sm.RLM(rlmy,rlmx,M=sm.robust.norms.HuberT())
rlm_results = rlm_model.fit()
[b2, a2] = rlm_results.params
## Visualisation resulta
plt.subplot(212)
plt.title('RLM')
plt.plot(xreg,a2*xreg+b2,'-k')
# plt.plot(ref_fint,obs_fint,'or')
plt.plot(ref_fint,obs_fselec,'or')
filename = cfgsetup.get('FullPaths', 'Event')\
+ cfgsetup.get('RelativePaths', 'Plots')\
+ obs_name + '_reglin.pdf'
plt.savefig(filename)
plt.close(2)
## Trace des CL alignees pour toutes les donnees
obs_fluxali1 = (obs_flux-b1)/a1 # methode 1
obs_fluxali2 = (obs_flux-b2)/a2 # methode 2
##
# fig1 = plt.figure(4)
# ax1 = plt.subplot(211)
# plt.title('Least square')
# plt.plot(ref_date,ref_flux,'or',obs_date,obs_fluxali1,'og')
# ax2 = plt.subplot(212)
# plt.title('RLM')
# plt.plot(ref_date,ref_flux,'or',obs_date,obs_fluxali2,'og')
# plt.savefig("LCalign.pdf")
# plt.close(4)
cond = (photometryData['reject'] == 0) &\
(photometryData['obs'] == obs_name)
photometryData.loc[cond, 'mag_align'] = -2.5*np.log10(obs_fluxali2)
cond2 = pd.isnull(photometryData['mag_align'])
if np.any(np.array(cond2)):
photometryData.loc[cond, 'mag_align'] = int(0)
cond = (photometryData['reject'] == 0) &\
(photometryData['obs'] == tel_ref)
photometryData.loc[cond, 'mag_align'] = -2.5*np.log10(ref_flux)
return 0
# ----------------------------------------------------------------------
# Loading data full information.
# ----------------------------------------------------------------------
# Call function to align data - WRAP
# ----------------------------------------------------------------------
tel_list = np.array(obs_properties['key'])
tel_col = np.array(['#' + a for a in obs_properties['colour']])
tel_ref = np.where(tel_list == cfgsetup.get('Observatories', 'Reference'))[0][0]
text = " Telescope de reference : " + obs_properties['name'][tel_ref]
communicate(cfgsetup, 1, text)
obs_list = np.delete(tel_list, tel_ref)
tel_col = np.delete(tel_col, tel_ref)
photometryData = pd.DataFrame({})
for key in time_serie:
photometryData[key]=time_serie[key]
photometryData['mag_align'] = 0
photometryData['reject'] = 0
photometryData['color'] = '#000000'
photometryData['obs_leg'] = '?'
photometryData['alpha'] = 0.7
# Trace interactif RLM
for i in xrange(len(obs_list)):
obs_name = obs_list[i]
text = " %s" % obs_name
communicate(cfgsetup, 1, text)
align(photometryData, tel_list[tel_ref], obs_name, cfgsetup)
# ----------------------------------------------------------------------
# Create an html webpage
# ----------------------------------------------------------------------
filename = cfgsetup.get('FullPaths', 'Event')\
+ cfgsetup.get('RelativePaths', 'Plots')\
+ cfgsetup.get('Controls', 'Archive') + '-datalign.html'
bplt.output_file(filename)
fig = np.array([])
# ..................................................................
# Plot 0
# ..................................................................
# cond = photometryData['obs'] == 'moa-i'
# photometryData = photometryData.loc[cond]
observatories = np.unique(photometryData['obs'])
for i in xrange(len(observatories)):
cond = photometryData['obs'] == observatories[i]
photometryData.loc[cond, 'color'] = '#' +\
obs_properties['colour'][np.where(np.array(obs_properties['key']) == observatories[i])[0][0]]
photometryData.loc[cond, 'obs_leg'] = \
obs_properties['name'][np.where(np.array(obs_properties['key']) == observatories[i])[0][0]]
cond = photometryData['reject'] == 0
photometryData = photometryData[cond]
source = ColumnDataSource(photometryData)
hover0 = HoverTool(
tooltips=[
("ID", "@id{int}"),
("Obs", "@obs"),
("Date", "@dates{1.11}"),
("Mag", "@mag_align{1.111}"),
("Err", "@err_magn{1.111}"),
("Seeing", "@seeing{1.11}"),
("Bkg", "@background{1.11}"),
]
)
# Plot the data
tools = ["save", "pan", "box_zoom", "wheel_zoom", "reset", "tap", hover0]
cond = (photometryData['reject'] == 0)
# xmin = np.min(photometryData[cond]['dates'].values)
# xmax = np.max(photometryData[cond]['dates'].values)
xmin = float(options.split('-')[0].strip())
xmax = float(options.split('-')[1].strip())
ymin = np.min(photometryData[cond].mag_align.values)
ymax = np.max(photometryData[cond].mag_align.values)
fig = np.append(fig,\
bplt.figure(toolbar_location="above", plot_width=1200, plot_height=600, x_range=(xmin, xmax), y_range=(ymax, ymin),\
title=None, min_border=10, min_border_left=50, tools=tools))
fig[0].circle('dates', 'mag_align', size=8, color='color', alpha='alpha', source=source)
# Plot the errorbars
for i in xrange(len(observatories)):
cond = photometryData['obs'] == observatories[i]
fig[0].circle(0, 0, size=8, color=photometryData[cond]['color'].values[0], alpha=0.8, legend=photometryData[cond]['obs_leg'].values[0])
x = photometryData[cond]['dates']
y = photometryData[cond]['mag_align']
color = "#" + obs_properties['colour'][np.where(np.array(obs_properties['key']) == observatories[i])[0][0]]
err_xs = []
err_ys = []
err_xs2 = []
err_ys2 = []
err_alpha = []
for x, y, yerr, alpha_c in zip(x, y, photometryData[cond]['err_magn'], photometryData[cond]['alpha']):
err_xs.append((x, x))
err_xs2.append((x-5, x+5))
err_ys.append((y - yerr, y + yerr))
err_ys2.append((y - yerr, y - yerr))
err_alpha.append(alpha_c)
fig[0].multi_line(err_xs, err_ys, color=color, alpha=err_alpha)
# Layout
# ^^^^^^
fig[0].xaxis.axis_label = 'HJD - 2,450,000'
fig[0].yaxis.axis_label = 'Magnitude'
fig[0].xaxis.axis_label_text_font = 'helvetica'
fig[0].yaxis.axis_label_text_font = 'helvetica'
fig[0].xaxis.axis_label_text_font_size = '10pt'
fig[0].yaxis.axis_label_text_font_size = '10pt'
fig[0].min_border_top = 10
fig[0].min_border_bottom = 0
fig[0].min_border_left = 0
fig[0].xgrid.grid_line_color = None
fig[0].ygrid.grid_line_color = None
# ..................................................................
# Plot 1
# ..................................................................
# Plot the residus
hover1 = HoverTool(
tooltips=[
("ID", "@ID{int}"),
("Obs", "@obs"),
("Date", "@dates{1.11}"),
("Mag", "@mag_align{1.111}"),
("Err", "@err_magn{1.111}"),
("Seeing", "@seeing{1.11}"),
("Bkg", "@background{int}"),
]
)
tools = ["save", "pan", "box_zoom", "wheel_zoom", "reset", "tap", hover1]
fig = np.append(fig,\
bplt.figure(toolbar_location="above", plot_width=fig[0].plot_width, plot_height=400, x_range=fig[0].x_range,
title=None, min_border=10, min_border_left=50, tools=tools))
fig[1].circle('dates', 'seeing', size=8, color='color', alpha='alpha', source=source)
# Layout
# ^^^^^^
fig[1].xaxis.axis_label = 'HJD - 2,450,000'
fig[1].yaxis.axis_label = 'Seeing'
fig[1].xaxis.axis_label_text_font = 'helvetica'
fig[1].yaxis.axis_label_text_font = 'helvetica'
fig[1].xaxis.axis_label_text_font_size = '10pt'
fig[1].yaxis.axis_label_text_font_size = '10pt'
fig[1].min_border_top = 10
fig[1].min_border_bottom = 0
fig[1].min_border_left = 0
fig[1].xgrid.grid_line_color = None
fig[1].ygrid.grid_line_color = None
# ..................................................................
# Plot 2
# ..................................................................
# Plot the residus
hover2 = HoverTool(
tooltips=[
("ID", "@ID{int}"),
("Obs", "@obs"),
("Date", "@dates{1.11}"),
("Mag", "@mag_align{1.111}"),
("Err", "@err_magn{1.111}"),
("Seeing", "@seeing{1.11}"),
("Bkg", "@background{int}"),
]
)
tools = ["save", "pan", "box_zoom", "wheel_zoom", "reset", "tap", hover2]
fig = np.append(fig,\
bplt.figure(toolbar_location="above", plot_width=fig[0].plot_width, plot_height=400, x_range=fig[0].x_range,
title=None, min_border=10, min_border_left=50, tools=tools))
fig_curr = fig[2]
fig_curr.circle('dates', 'background', size=8, color='color', alpha='alpha', source=source)
# Layout
# ^^^^^^
fig_curr.xaxis.axis_label = 'HJD - 2,450,000'
fig_curr.yaxis.axis_label = 'Background'
fig_curr.xaxis.axis_label_text_font = 'helvetica'
fig_curr.yaxis.axis_label_text_font = 'helvetica'
fig_curr.xaxis.axis_label_text_font_size = '10pt'
fig_curr.yaxis.axis_label_text_font_size = '10pt'
fig_curr.min_border_top = 10
fig_curr.min_border_bottom = 0
fig_curr.min_border_left = 0
fig_curr.xgrid.grid_line_color = None
fig_curr.ygrid.grid_line_color = None
# ..................................................................
# Table
# ..................................................................
columns = [
TableColumn(field="id", title="Data ID", width=50),
TableColumn(field="obs", title="Observatory", width=50),
TableColumn(field="dates", title="Date", width=100),
TableColumn(field="mag_align", title="Magnitude (Output code)", width=100),
TableColumn(field="err_magn", title="Err_Magnitude", width=100),
TableColumn(field="seeing", title="Seeing", width=100),
TableColumn(field="background", title="Background", width=100),
]
data_table = DataTable(source=source, columns=columns, width=1200, height=280)
# Save
# ^^^^
final = blyt.column(fig[0], fig[1], fig[2], blyt.WidgetBox(data_table))
bplt.save(final)
# ------------------------------------------------------------------
# Modify the html page
# ------------------------------------------------------------------
filename = cfgsetup.get('FullPaths', 'Event')\
+ cfgsetup.get('RelativePaths', 'Plots')\
+ cfgsetup.get('Controls', 'Archive') + '-datalign.html'
title = cfgsetup.get('EventDescription',
'Name') + ' - Datalign'
file = open(filename, 'r')
file_new = ''
for line in file:
# print line.strip()[:7]
if line.strip()[:7] == '<title>':
file_new = file_new \
+ ' <style type="text/css">\n' \
+ ' p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; line-height: 43.0px; font: 36.0px "Lucida Grande"; color: #000000; -webkit-text-stroke: #000000}\n' \
+ ' p.p2 {margin: 0.0px 0.0px 0.0px 0.0px; line-height: 21.0px; font: 18.0px "Lucida Grande"; color: #000000; -webkit-text-stroke: #000000}\n' \
+ ' p.p3 {margin: 0.0px 0.0px 0.0px 0.0px; line-height: 15.0px; font: 12.0px "Lucida Grande"; color: #000000; -webkit-text-stroke: #000000}\n' \
+ ' p.p4 {margin: 0.0px 0.0px 0.0px 0.0px; line-height: 17.0px; font: 14.0px "Lucida Grande"; color: #000000; -webkit-text-stroke: #000000; min-height: 17.0px}\n' \
+ ' p.p5 {margin: 0.0px 0.0px 0.0px 0.0px; line-height: 14.0px; font: 12.0px Times; color: #000000; -webkit-text-stroke: #000000; min-height: 14.0px}\n' \
+ ' p.p6 {margin: 0.0px 0.0px 12.0px 0.0px; line-height: 14.0px; font: 12.0px Times; color: #000000; -webkit-text-stroke: #000000; min-height: 14.0px}\n' \
+ ' p.p7 {margin: 0.0px 0.0px 0.0px 0.0px; line-height: 17.0px; font: 14.0px "Lucida Grande"; color: #000000; -webkit-text-stroke: #000000}\n' \
+ ' span.s1 {font-kerning: none}\n' \
+ ' span.s10 {font: 14.0px "Lucida Grande"; color: #585858}\n' \
+ ' hr {\n' \
+ ' display: block;\n' \
+ ' margin-top: 0.5em;\n' \
+ ' margin-bottom: 0.5em;\n' \
+ ' margin-left: auto;\n' \
+ ' margin-right: auto;\n' \
+ ' border-style: inset;\n' \
+ ' border-width: 1px;\n' \
+ ' }\n' \
+ ' </style>\n' \
+ ' <title>' + 'muLAn ' + cfgsetup.get('EventDescription', 'Name')[4:] + '- Datalign</title>\n' \
+ ' <meta name="Author" content="Clement Ranc">\n'
elif line.strip()[:6] == '<body>':
file_new = file_new \
+ ' <body>\n\n' \
+ '<p class="p1"><span class="s1"><b>' + title + '</b></span></p>\n' \
+ '<p class="p2"><span class="s1"><br>\n' \
+ '</span></p>\n'
elif line.strip()[:7] == '</body>':
file_new = file_new \
+ ' <BR>\n' \
+ ' <hr>\n' \
+ ' <BR>\n' \
+ ' <footer>\n' \
+ ' <p class="p7"><span class="s10">Modelling and page by muLAn (MicroLensing Analysis software).</span></p>\n' \
+ ' <BR>\n' \
+ ' <BR>\n' \
+ ' <BR>\n' \
+ ' <BR>\n' \
+ ' <BR>\n' \
+ ' </footer>\n' \
+ ' </body>\n'
else:
file_new = file_new + line
file.close()
file = open(filename, 'w')
file.write(file_new)
file.close()
#
# ======================================================================
# Main
# ======================================================================
if (__name__ == "__main__"):
sys.exit("Option not supported yet.")
| mit | 2,147,246,113,030,534,700 | 38.916 | 194 | 0.477904 | false |
AbhiAgarwal/django-report-utils | report_utils/mixins.py | 1 | 24116 | from six import BytesIO, StringIO, text_type, string_types
from django.http import HttpResponse
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields.related import ReverseManyRelatedObjectsDescriptor
from django.db.models import Avg, Count, Sum, Max, Min
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.cell import get_column_letter
from openpyxl.styles import Font
import csv
import re
from collections import namedtuple
from decimal import Decimal
from numbers import Number
from functools import reduce
import datetime
from report_utils.model_introspection import (
get_relation_fields_from_model,
get_properties_from_model,
get_direct_fields_from_model,
get_model_from_path_string,
get_custom_fields_from_model,
)
DisplayField = namedtuple(
"DisplayField",
"path path_verbose field field_verbose aggregate total group choices field_type",
)
def generate_filename(title, ends_with):
title = title.split('.')[0]
title.replace(' ', '_')
title += ('_' + datetime.datetime.now().strftime("%m%d_%H%M"))
if not title.endswith(ends_with):
title += ends_with
return title
class DataExportMixin(object):
def build_sheet(self, data, ws, sheet_name='report', header=None, widths=None):
first_row = 1
column_base = 1
ws.title = re.sub(r'\W+', '', sheet_name)[:30]
if header:
for i, header_cell in enumerate(header):
cell = ws.cell(row=first_row, column=i+column_base)
cell.value = header_cell
cell.font = Font(bold=True)
if widths:
ws.column_dimensions[get_column_letter(i+1)].width = widths[i]
for row in data:
for i in range(len(row)):
item = row[i]
# If item is a regular string
if isinstance(item, str):
# Change it to a unicode string
try:
row[i] = text_type(item)
except UnicodeDecodeError:
row[i] = text_type(item.decode('utf-8', 'ignore'))
elif type(item) is dict:
row[i] = text_type(item)
try:
ws.append(row)
except ValueError as e:
ws.append([e.message])
except:
ws.append(['Unknown Error'])
def build_xlsx_response(self, wb, title="report"):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
def build_csv_response(self, wb, title="report"):
""" Take a workbook and return a csv file response """
title = generate_filename(title, '.csv')
myfile = StringIO()
sh = wb.get_active_sheet()
c = csv.writer(myfile)
for r in sh.rows:
c.writerow([cell.value for cell in r])
response = HttpResponse(
myfile.getvalue(),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
def list_to_workbook(self, data, title='report', header=None, widths=None):
""" Create just a openpxl workbook from a list of data """
wb = Workbook()
title = re.sub(r'\W+', '', title)[:30]
if isinstance(data, dict):
i = 0
for sheet_name, sheet_data in data.items():
if i > 0:
wb.create_sheet()
ws = wb.worksheets[i]
self.build_sheet(
sheet_data, ws, sheet_name=sheet_name, header=header)
i += 1
else:
ws = wb.worksheets[0]
self.build_sheet(data, ws, header=header, widths=widths)
return wb
def list_to_xlsx_file(self, data, title='report', header=None, widths=None):
""" Make 2D list into a xlsx response for download
data can be a 2d array or a dict of 2d arrays
like {'sheet_1': [['A1', 'B1']]}
returns a StringIO file
"""
wb = self.list_to_workbook(data, title, header, widths)
if not title.endswith('.xlsx'):
title += '.xlsx'
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
return myfile
def list_to_csv_file(self, data, title='report', header=None, widths=None):
""" Make a list into a csv response for download.
"""
wb = self.list_to_workbook(data, title, header, widths)
if not title.endswith('.csv'):
title += '.csv'
myfile = StringIO()
sh = wb.get_active_sheet()
c = csv.writer(myfile)
for r in sh.rows:
c.writerow([cell.value for cell in r])
return myfile
def list_to_xlsx_response(self, data, title='report', header=None,
widths=None):
""" Make 2D list into a xlsx response for download
data can be a 2d array or a dict of 2d arrays
like {'sheet_1': [['A1', 'B1']]}
"""
wb = self.list_to_workbook(data, title, header, widths)
return self.build_xlsx_response(wb, title=title)
def list_to_csv_response(self, data, title='report', header=None,
widths=None):
""" Make 2D list into a csv response for download data.
"""
wb = self.list_to_workbook(data, title, header, widths)
return self.build_csv_response(wb, title=title)
def add_aggregates(self, queryset, display_fields):
agg_funcs = {
'Avg': Avg, 'Min': Min, 'Max': Max, 'Count': Count, 'Sum': Sum
}
for display_field in display_fields:
if display_field.aggregate:
func = agg_funcs[display_field.aggregate]
full_name = display_field.path + display_field.field
queryset = queryset.annotate(func(full_name))
return queryset
def report_to_list(self, queryset, display_fields, user, property_filters=[], preview=False):
""" Create list from a report with all data filtering.
queryset: initial queryset to generate results
display_fields: list of field references or DisplayField models
user: requesting user
property_filters: ???
preview: return only first 50 rows
Returns list, message in case of issues.
"""
model_class = queryset.model
def can_change_or_view(model):
""" Return True iff `user` has either change or view permission
for `model`.
"""
try:
model_name = model._meta.model_name
except AttributeError:
# Needed for Django 1.4.* (LTS).
model_name = model._meta.module_name
app_label = model._meta.app_label
can_change = user.has_perm(app_label + '.change_' + model_name)
can_view = user.has_perm(app_label + '.view_' + model_name)
return can_change or can_view
if not can_change_or_view(model_class):
return [], 'Permission Denied'
if isinstance(display_fields, list):
# Convert list of strings to DisplayField objects.
new_display_fields = []
for display_field in display_fields:
field_list = display_field.split('__')
field = field_list[-1]
path = '__'.join(field_list[:-1])
if path:
path += '__' # Legacy format to append a __ here.
new_model = get_model_from_path_string(model_class, path)
model_field = new_model._meta.get_field_by_name(field)[0]
choices = model_field.choices
new_display_fields.append(DisplayField(
path, '', field, '', '', None, None, choices, ''
))
display_fields = new_display_fields
# Build group-by field list.
group = [df.path + df.field for df in display_fields if df.group]
# To support group-by with multiple fields, we turn all the other
# fields into aggregations. The default aggregation is `Max`.
if group:
for field in display_fields:
if (not field.group) and (not field.aggregate):
field.aggregate = 'Max'
message = ""
objects = self.add_aggregates(queryset, display_fields)
# Display Values
display_field_paths = []
property_list = {}
custom_list = {}
display_totals = {}
for i, display_field in enumerate(display_fields):
model = get_model_from_path_string(model_class, display_field.path)
if display_field.field_type == "Invalid":
continue
if not model or can_change_or_view(model):
display_field_key = display_field.path + display_field.field
if display_field.field_type == "Property":
property_list[i] = display_field_key
elif display_field.field_type == "Custom Field":
custom_list[i] = display_field_key
elif display_field.aggregate == "Avg":
display_field_key += '__avg'
elif display_field.aggregate == "Max":
display_field_key += '__max'
elif display_field.aggregate == "Min":
display_field_key += '__min'
elif display_field.aggregate == "Count":
display_field_key += '__count'
elif display_field.aggregate == "Sum":
display_field_key += '__sum'
if display_field.field_type not in ('Property', 'Custom Field'):
display_field_paths.append(display_field_key)
if display_field.total:
display_totals[display_field_key] = Decimal(0)
else:
message += 'Error: Permission denied on access to {0}.'.format(
display_field.name
)
def increment_total(display_field_key, val):
""" Increment display total by `val` if given `display_field_key` in
`display_totals`.
"""
if display_field_key in display_totals:
if isinstance(val, bool):
# True: 1, False: 0
display_totals[display_field_key] += Decimal(val)
elif isinstance(val, Number):
display_totals[display_field_key] += Decimal(str(val))
elif val:
display_totals[display_field_key] += Decimal(1)
# Select pk for primary and m2m relations in order to retrieve objects
# for adding properties to report rows. Group-by queries do not support
# Property nor Custom Field filters.
if not group:
display_field_paths.insert(0, 'pk')
m2m_relations = []
for position, property_path in property_list.items():
property_root = property_path.split('__')[0]
root_class = model_class
try:
property_root_class = getattr(root_class, property_root)
except AttributeError: # django-hstore schema compatibility
continue
if type(property_root_class) == ReverseManyRelatedObjectsDescriptor:
display_field_paths.insert(1, '%s__pk' % property_root)
m2m_relations.append(property_root)
if group:
values = objects.values(*group)
values = self.add_aggregates(values, display_fields)
filtered_report_rows = [
[row[field] for field in display_field_paths]
for row in values
]
for row in filtered_report_rows:
for pos, field in enumerate(display_field_paths):
increment_total(field, row[pos])
else:
filtered_report_rows = []
values_and_properties_list = []
values_list = objects.values_list(*display_field_paths)
for row in values_list:
row = list(row)
values_and_properties_list.append(row[1:])
obj = None # we will get this only if needed for more complex processing
#related_objects
remove_row = False
# filter properties (remove rows with excluded properties)
for property_filter in property_filters:
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
root_relation = property_filter.path.split('__')[0]
if root_relation in m2m_relations:
pk = row[0]
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, [property_filter.field], m2m_obj)
else:
val = None
else:
if property_filter.field_type == 'Custom Field':
for relation in property_filter.path.split('__'):
if hasattr(obj, root_relation):
obj = getattr(obj, root_relation)
val = obj.get_custom_value(property_filter.field)
else:
val = reduce(getattr, (property_filter.path + property_filter.field).split('__'), obj)
if property_filter.filter_property(val):
remove_row = True
values_and_properties_list.pop()
break
if not remove_row:
for i, field in enumerate(display_field_paths[1:]):
increment_total(field, row[i + 1])
for position, display_property in property_list.items():
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
relations = display_property.split('__')
root_relation = relations[0]
if root_relation in m2m_relations:
pk = row.pop(0)
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, relations[1:], m2m_obj)
else:
val = None
else:
# Could error if a related field doesn't exist
try:
val = reduce(getattr, relations, obj)
except AttributeError:
val = None
values_and_properties_list[-1].insert(position, val)
increment_total(display_property, val)
for position, display_custom in custom_list.items():
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
val = obj.get_custom_value(display_custom)
values_and_properties_list[-1].insert(position, val)
increment_total(display_custom, val)
filtered_report_rows.append(values_and_properties_list[-1])
if preview and len(filtered_report_rows) == 50:
break
# Sort results if requested.
if hasattr(display_fields, 'filter'):
defaults = {
None: text_type,
datetime.date: lambda: datetime.date(datetime.MINYEAR, 1, 1),
datetime.datetime: lambda: datetime.datetime(datetime.MINYEAR, 1, 1),
}
# Order sort fields in reverse order so that ascending, descending
# sort orders work together (based on Python's stable sort). See
# http://stackoverflow.com/questions/6666748/ for details.
sort_fields = display_fields.filter(sort__gt=0).order_by('-sort')
sort_values = sort_fields.values_list('position', 'sort_reverse')
for pos, reverse in sort_values:
column = (row[pos] for row in filtered_report_rows)
type_col = (type(val) for val in column if val is not None)
field_type = next(type_col, None)
default = defaults.get(field_type, field_type)()
filtered_report_rows = sorted(
filtered_report_rows,
key=lambda row: self.sort_helper(row[pos], default),
reverse=reverse,
)
values_and_properties_list = filtered_report_rows
# Build mapping from display field position to choices list.
choice_lists = {}
for df in display_fields:
if df.choices and hasattr(df, 'choices_dict'):
df_choices = df.choices_dict
# Insert blank and None as valid choices.
df_choices[''] = ''
df_choices[None] = ''
choice_lists[df.position] = df_choices
# Build mapping from display field position to format.
display_formats = {}
for df in display_fields:
if hasattr(df, 'display_format') and df.display_format:
display_formats[df.position] = df.display_format
def formatter(value, style):
# Convert value to Decimal to apply numeric formats.
try:
value = Decimal(value)
except Exception:
pass
try:
return style.string.format(value)
except ValueError:
return value
# Iterate rows and convert values by choice lists and field formats.
final_list = []
for row in values_and_properties_list:
row = list(row)
for position, choice_list in choice_lists.items():
try:
row[position] = text_type(choice_list[row[position]])
except Exception:
row[position] = text_type(row[position])
for pos, style in display_formats.items():
row[pos] = formatter(row[pos], style)
final_list.append(row)
values_and_properties_list = final_list
if display_totals:
display_totals_row = []
fields_and_properties = list(display_field_paths[0 if group else 1:])
for position, value in property_list.items():
fields_and_properties.insert(position, value)
for field in fields_and_properties:
display_totals_row.append(display_totals.get(field, ''))
# Add formatting to display totals.
for pos, style in display_formats.items():
display_totals_row[pos] = formatter(display_totals_row[pos], style)
values_and_properties_list.append(
['TOTALS'] + (len(fields_and_properties) - 1) * ['']
)
values_and_properties_list.append(display_totals_row)
return values_and_properties_list, message
def sort_helper(self, value, default):
if value is None:
value = default
if isinstance(value, string_types):
value = value.lower()
return value
class GetFieldsMixin(object):
def get_fields(self, model_class, field_name='', path='', path_verbose=''):
""" Get fields and meta data from a model
:param model_class: A django model class
:param field_name: The field name to get sub fields from
:param path: path of our field in format
field_name__second_field_name__ect__
:param path_verbose: Human readable version of above
:returns: Returns fields and meta data about such fields
fields: Django model fields
custom_fields: fields from django-custom-field if installed
properties: Any properties the model has
path: Our new path
path_verbose: Our new human readable path
:rtype: dict
"""
fields = get_direct_fields_from_model(model_class)
properties = get_properties_from_model(model_class)
custom_fields = get_custom_fields_from_model(model_class)
app_label = model_class._meta.app_label
if field_name != '':
field = model_class._meta.get_field_by_name(field_name)
if path_verbose:
path_verbose += "::"
# TODO: need actual model name to generate choice list (not pluralized field name)
# - maybe store this as a separate value?
if field[3] and hasattr(field[0], 'm2m_reverse_field_name'):
path_verbose += field[0].m2m_reverse_field_name()
else:
path_verbose += field[0].name
path += field_name
path += '__'
if field[2]: # Direct field
try:
new_model = field[0].related.parent_model
except AttributeError:
new_model = field[0].related.model
path_verbose = new_model.__name__.lower()
else: # Indirect related field
try:
new_model = field[0].related_model
except AttributeError: # Django 1.7
new_model = field[0].model
path_verbose = new_model.__name__.lower()
fields = get_direct_fields_from_model(new_model)
custom_fields = get_custom_fields_from_model(new_model)
properties = get_properties_from_model(new_model)
app_label = new_model._meta.app_label
return {
'fields': fields,
'custom_fields': custom_fields,
'properties': properties,
'path': path,
'path_verbose': path_verbose,
'app_label': app_label,
}
def get_related_fields(self, model_class, field_name, path="", path_verbose=""):
""" Get fields for a given model """
if field_name:
field = model_class._meta.get_field_by_name(field_name)
if field[2]:
# Direct field
try:
new_model = field[0].related.parent_model()
except AttributeError:
new_model = field[0].related.model
else:
# Indirect related field
if hasattr(field[0], 'related_model'): # Django>=1.8
new_model = field[0].related_model
else:
new_model = field[0].model()
if path_verbose:
path_verbose += "::"
path_verbose += field[0].name
path += field_name
path += '__'
else:
new_model = model_class
new_fields = get_relation_fields_from_model(new_model)
model_ct = ContentType.objects.get_for_model(new_model)
return (new_fields, model_ct, path)
| bsd-3-clause | 7,736,433,183,464,891,000 | 38.729819 | 114 | 0.533878 | false |
gloaec/trifle | src/trifle/anyconfig/backend/base.py | 1 | 4833 | #
# Copyright (C) 2012, 2013 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
from trifle.anyconfig.compat import StringIO
from trifle.anyconfig.globals import LOGGER as logging
import trifle.anyconfig.mergeabledict as D
import trifle.anyconfig.utils as U
import os.path
import os
SUPPORTED = False
def mk_opt_args(keys, kwargs):
"""
Make optional kwargs valid and optimized for each backend.
:param keys: optional argument names
:param kwargs: keyword arguements to process
>>> mk_opt_args(("aaa", ), dict(aaa=1, bbb=2))
{'aaa': 1}
>>> mk_opt_args(("aaa", ), dict(bbb=2))
{}
"""
def filter_kwargs(kwargs):
for k in keys:
if k in kwargs:
yield (k, kwargs[k])
return dict((k, v) for k, v in filter_kwargs(kwargs))
def mk_dump_dir_if_not_exist(f):
"""
Make dir to dump f if that dir does not exist.
:param f: path of file to dump
"""
dumpdir = os.path.dirname(f)
if not os.path.exists(dumpdir):
logging.debug("Creating output dir as it's not found: " + dumpdir)
os.makedirs(dumpdir)
class ConfigParser(object):
_type = None
_priority = 0 # 0 (lowest priority) .. 99 (highest priority)
_extensions = []
_container = D.MergeableDict
_supported = False
_load_opts = []
_dump_opts = []
@classmethod
def type(cls):
return cls._type
@classmethod
def priority(cls):
return cls._priority
@classmethod
def extensions(cls):
return cls._extensions
@classmethod
def supports(cls, config_file=None):
if config_file is None:
return cls._supported
else:
return cls._supported and \
U.get_file_extension(config_file) in cls._extensions
@classmethod
def container(cls):
return cls._container
@classmethod
def set_container(cls, container):
cls._container = container
@classmethod
def load_impl(cls, config_fp, **kwargs):
"""
:param config_fp: Config file object
:param kwargs: backend-specific optional keyword parameters :: dict
:return: dict object holding config parameters
"""
raise NotImplementedError("Inherited class should implement this")
@classmethod
def loads(cls, config_content, **kwargs):
"""
:param config_content: Config file content
:param kwargs: optional keyword parameters to be sanitized :: dict
:return: cls.container() object holding config parameters
"""
config_fp = StringIO(config_content)
create = cls.container().create
return create(cls.load_impl(config_fp,
**mk_opt_args(cls._load_opts, kwargs)))
@classmethod
def load(cls, config_path, **kwargs):
"""
:param config_path: Config file path
:param kwargs: optional keyword parameters to be sanitized :: dict
:return: cls.container() object holding config parameters
"""
create = cls.container().create
return create(cls.load_impl(open(config_path),
**mk_opt_args(cls._load_opts, kwargs)))
@classmethod
def dumps_impl(cls, data, **kwargs):
"""
:param data: Data to dump :: dict
:param kwargs: backend-specific optional keyword parameters :: dict
:return: string represents the configuration
"""
raise NotImplementedError("Inherited class should implement this")
@classmethod
def dump_impl(cls, data, config_path, **kwargs):
"""
:param data: Data to dump :: dict
:param config_path: Dump destination file path
:param kwargs: backend-specific optional keyword parameters :: dict
"""
open(config_path, "w").write(cls.dumps_impl(data, **kwargs))
@classmethod
def dumps(cls, data, **kwargs):
"""
:param data: Data to dump :: cls.container()
:param kwargs: optional keyword parameters to be sanitized :: dict
:return: string represents the configuration
"""
convert_to = cls.container().convert_to
return cls.dumps_impl(convert_to(data),
**mk_opt_args(cls._dump_opts, kwargs))
@classmethod
def dump(cls, data, config_path, **kwargs):
"""
:param data: Data to dump :: cls.container()
:param config_path: Dump destination file path
:param kwargs: optional keyword parameters to be sanitized :: dict
"""
convert_to = cls.container().convert_to
mk_dump_dir_if_not_exist(config_path)
cls.dump_impl(convert_to(data), config_path,
**mk_opt_args(cls._dump_opts, kwargs))
# vim:sw=4:ts=4:et:
| gpl-3.0 | 8,987,969,855,856,452,000 | 28.114458 | 75 | 0.602938 | false |
gratefulfrog/ArduGuitar | Ardu2/design/POC-3_MAX395/pyboard/DraftDevt/acc.py | 1 | 4083 | # acc.py
# tests of accellerometers
# currently seems to work with the values as they are!
from pyb import Accel, LED, delay, millis
red = LED(1)
green = LED(2)
yellow = LED(3)
blue = LED(4)
def xtest(p=5,m=-5, d= 20, timeOut= 1000):
"""
this uses the pyboard leds to indicate movement in the x,y,z directions.
x-axis: the shorter dimension of the board (BLUE led)
y-axis: the longer dimension of the board (YELLOW led)
z-axis: vertical (GREEN led)
How it works:
1. define Zones, when the acceleration is greater than a positive threshold
or less than a negative threshold. These thresholds are the arguments
'p' and 'm',
2. the zones are 1 if >= pos threshold, -1 if <= neg threshold,
and 0, i.e. the deadband, otherwise.
3. base values for the acclerations are taken to determine the 'zero'.
4. a vector of last times a led was changed is maintained for timeouts
5. the 'd' argument is the delay between accelerometer readings.
6. the timeOut is the time to wait before turning off a led for which
there has been no activity.
Loop:
0. wait for the appropriate time to pass,
1. if any new value is out of range, skip the iteration
2. for each axis value,
0. find its zone, after subtracting the base val
1. If we are in a detection zone not the deadband,
0. if it has changed zones, then:
toggle led,
update timeout timer,
update last zone
2. if its timeout has expired, then
turn off the corresponding led
"""
a = Accel()
delay(50)
global red,green, yellow, blue
leds = [[blue,0],[yellow,0],[green,0]]
ledsOff(leds)
zp = [0,0,0]
zc = [0,0,0]
base= [0,0,0]
vc= [0,0,0]
init = False
while not init:
delay(5)
init = readAcc(a,base)
t = millis()
lastActionTime = [t,t,t]
print ('Initialized!')
while True:
delay(d)
if not readAcc(a,vc): # then read error, skip this iteration
print ('continuing...')
continue
for i in range(3):
zc[i] = DetectionZone(vc[i]-base[i],p,m)
if zc[i]: # we are in a detection zone
if zc[i] != zp[i]:
toggle(leds[i])
lastActionTime[i] = millis()
zp[i] = zc[i]
if millis()-lastActionTime[i] > timeOut:
off(leds[i])
lastActionTime[i] = millis()
def readAcc(ac, valVect,):
"""
reads ac in 3-axis,
if all values are ok, updates valVect & returns True
if not returns False and does not update
"""
vc = [ac.x(),ac.y(),ac.z()]
if any([v>31 or v< -32 for v in vc]): # error!
return False
else:
for i in range(3):
valVect[i]=vc[i]
return True
def ledsOff(ls):
[off(l) for l in ls]
def off(ldPair):
ldPair[0].off()
ldPair[1] = 0
print(str(ldPair) + ': off')
def DetectionZone(val, posLim, negLim):
res = 0
if val >= posLim:
res = 1
elif val <= negLim:
res = -1
return res
def toggle(ldPair):
if ldPair[1]: # it's on, turn off
ldPair[0].off()
ldPair[1] = 0
print(str(ldPair) + ': toggle: off')
else: # it's off, turn on
ldPair[0].on()
ldPair[1] = 1
print(str(ldPair) + ': toggle: on')
def sign(x):
if x==0:
return 1
else:
return x/abs(x)
"""
Test function for ShaeControl1
def testXAc():
led = [LED(4),0]
def oFunc():
led[0].off()
led[1]=0
print('oFunc: off')
def tFunc():
if led[1]:
led[1]=0
led[0].off()
print('tFunc: off')
else:
led[1]=1
led[0].on()
print('tFunc: on')
a = Accel()
sc = ShakeControl1(a.x,tFunc,oFunc)
while True:
sc.update()
"""
| gpl-2.0 | -9,215,248,256,882,599,000 | 27.552448 | 79 | 0.533676 | false |
rwl/PyCIM | CIM15/IEC61970/Generation/GenerationDynamics/SteamSupply.py | 1 | 3051 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class SteamSupply(PowerSystemResource):
"""Steam supply for steam turbineSteam supply for steam turbine
"""
def __init__(self, steamSupplyRating=0.0, SteamTurbines=None, *args, **kw_args):
"""Initialises a new 'SteamSupply' instance.
@param steamSupplyRating: Rating of steam supply
@param SteamTurbines: Steam turbines may have steam supplied by a steam supply
"""
#: Rating of steam supply
self.steamSupplyRating = steamSupplyRating
self._SteamTurbines = []
self.SteamTurbines = [] if SteamTurbines is None else SteamTurbines
super(SteamSupply, self).__init__(*args, **kw_args)
_attrs = ["steamSupplyRating"]
_attr_types = {"steamSupplyRating": float}
_defaults = {"steamSupplyRating": 0.0}
_enums = {}
_refs = ["SteamTurbines"]
_many_refs = ["SteamTurbines"]
def getSteamTurbines(self):
"""Steam turbines may have steam supplied by a steam supply
"""
return self._SteamTurbines
def setSteamTurbines(self, value):
for p in self._SteamTurbines:
filtered = [q for q in p.SteamSupplys if q != self]
self._SteamTurbines._SteamSupplys = filtered
for r in value:
if self not in r._SteamSupplys:
r._SteamSupplys.append(self)
self._SteamTurbines = value
SteamTurbines = property(getSteamTurbines, setSteamTurbines)
def addSteamTurbines(self, *SteamTurbines):
for obj in SteamTurbines:
if self not in obj._SteamSupplys:
obj._SteamSupplys.append(self)
self._SteamTurbines.append(obj)
def removeSteamTurbines(self, *SteamTurbines):
for obj in SteamTurbines:
if self in obj._SteamSupplys:
obj._SteamSupplys.remove(self)
self._SteamTurbines.remove(obj)
| mit | -1,539,395,530,458,653,400 | 39.68 | 86 | 0.691577 | false |
mjourdan/paperwork | src/paperwork/frontend/util/scanner.py | 1 | 3924 | # Paperwork - Using OCR to grep dead trees the easy way
# Copyright (C) 2014 Jerome Flesch
#
# Paperwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Paperwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paperwork. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import pyinsane.abstract_th as pyinsane
logger = logging.getLogger(__name__)
def _set_scanner_opt(scanner_opt_name, scanner_opt, possible_values):
value = possible_values[0]
regexs = [re.compile(x, flags=re.IGNORECASE) for x in possible_values]
if (scanner_opt.constraint_type ==
pyinsane.SaneConstraintType.STRING_LIST):
value = None
for regex in regexs:
for constraint in scanner_opt.constraint:
if regex.match(constraint):
value = constraint
break
if value is not None:
break
if value is None:
raise pyinsane.SaneException(
"%s are not a valid values for option %s"
% (str(possible_values), scanner_opt_name))
logger.info("Setting scanner option '%s' to '%s'"
% (scanner_opt_name, str(value)))
scanner_opt.value = value
def set_scanner_opt(scanner_opt_name, scanner_opt, possible_values):
"""
Set one of the scanner options
Arguments:
scanner_opt_name --- for verbose
scanner_opt --- the scanner option (its value, its constraints, etc)
possible_values --- a list of values considered valid (the first one
being the preferred one)
"""
if not scanner_opt.capabilities.is_active():
logger.warning("Unable to set scanner option '%s':"
" Option is not active"
% scanner_opt_name)
return False
# WORKAROUND(Jflesch): For some reason, my crappy scanner returns
# I/O errors randomly for fun
for t in xrange(0, 5):
try:
_set_scanner_opt(scanner_opt_name, scanner_opt, possible_values)
break
except Exception, exc:
logger.warning("Warning: Failed to set scanner option"
" %s=%s: %s (try %d/5)"
% (scanner_opt_name, possible_values, str(exc), t))
return True
def __set_scan_area_pos(options, opt_name, select_value_func, missing_options):
if opt_name not in options:
missing_options.append(opt_name)
else:
if not options[opt_name].capabilities.is_active():
logger.warning("Unable to set scanner option '%s':"
" Option is not active" % opt_name)
return
constraint = options[opt_name].constraint
if isinstance(constraint, tuple):
value = select_value_func(constraint[0], constraint[1])
else: # is an array
value = select_value_func(constraint)
options[opt_name].value = value
def maximize_scan_area(scanner):
opts = scanner.options
missing_opts = []
__set_scan_area_pos(opts, "tl-x", min, missing_opts)
__set_scan_area_pos(opts, "tl-y", min, missing_opts)
__set_scan_area_pos(opts, "br-x", max, missing_opts)
__set_scan_area_pos(opts, "br-y", max, missing_opts)
if missing_opts:
logger.warning("Failed to maximize the scan area. Missing options: %s"
% ", ".join(missing_opts))
| gpl-3.0 | 8,677,825,845,826,105,000 | 36.730769 | 79 | 0.610601 | false |
ogrady/GoodMorning | audio.py | 1 | 4200 | import os
import random
import pygame as pg
import util
'''
Audio components.
version: 1.0
author: Daniel O'Grady
'''
MAX_SOUND_CHANNELS = 4
class AudioMixer(object):
'''
Mixes ambient and effect sounds, where ambient
sounds actually loop indefinitely while effects
are just queued. There can be multiple effect channels
to create a cacophony of effects for when you just really
can't get out of bed...
'''
def __init__(self, basedir = ".", sound_groups = []):
'''
Constructor
basedir: lowest common ancestor of sound_dir and ambient_dir
sound_dir: subdirectory in which effect sounds are stored
ambient_dir: subdirectory in which ambient sounds are stored
channels: channel count. First channel is always for ambient, all further channels are for effects
'''
channels = len(sound_groups)
if channels < 2:
raise AudioException("Invalid channel count '%d', expected at least 2 channels" % (channels,))
if channels > MAX_SOUND_CHANNELS:
raise AudioException("Too many audio channels (%d), expected a maximum of %d channels" % (channels, util.MAX_SOUND_CHANNELS))
self.ambients = sound_groups[0] #self.load_sounds("/".join((basedir, ambient_dir)))
self.sounds = sound_groups[1:]
# self.sounds = self.load_sounds("/".join((basedir, sound_dir)))
pg.mixer.init(channels = channels)
self.ambient_chan = pg.mixer.Channel(0)
self.sound_chans = [pg.mixer.Channel(i) for i in range(1,channels)]
self.ambient_chan.set_endevent(util.Event.SOUND_ENDED.value)
for i in range(0, len(self.sound_chans)):
self.sound_chans[i].set_endevent(util.Event.SOUND_ENDED.value + i + 1) # skip i = 0 as it is the ambient channel
if not self.sounds:
raise AudioException("No sounds were loaded")
if not self.ambients:
raise AudioException("No ambients were loaded")
def start(self):
util.PygameEventListener.instance.dispatcher.add_listener(self)
self.mix()
def stop(self):
util.PygameEventListener.instance.dispatcher.remove_listener(self)
for c in self.sound_chans:
c.stop()
self.ambient_chan.stop()
def load_sounds_from_dir(self, dir, extensions = (".ogg", ".wav")):
'''
Loads all files with the passed extension
from a directory and attempty to load them
as audio files, which is the resulting list.
This method is obsolete.
'''
sounds = list(map(lambda f: "%s/%s" % (dir, f), [f for f in os.listdir(dir) if f.endswith(extensions)]))
if not sounds:
raise AudioException("No audio files could be loaded from '%s' with extensions %s" % (dir,extensions))
return sounds
def next_sound(self, channel = 0, fade_ms = 1000):
'''
Plays the next random sound in the
given channel.
'''
if not (0 <= channel < len(self.sound_chans)):
raise AudioException("Invalid channel %s, must be between 0 and %s" % (channel,len(self.sound_chans)))
s = pg.mixer.Sound(random.choice(self.sounds[channel]))
self.sound_chans[channel].play(s, fade_ms = fade_ms)
def next_ambient(self, fade_ms = 10000):
'''
Plays a random ambient, looping forever.
'''
s = pg.mixer.Sound(random.choice(self.ambients))
self.ambient_chan.play(s, loops = -1, fade_ms = fade_ms)
def mix(self):
'''
Plays a random, looping ambient and a
random effect in each effect channel.
'''
self.next_ambient()
for i in range(0, len(self.sound_chans)):
self.next_sound(channel = i)
def on_pygame_event(self, e):
if e.type >= util.Event.SOUND_ENDED.value and e.type <= util.Event.SOUND_ENDED.value + len(self.sound_chans):
self.next_sound(e.type - util.Event.SOUND_ENDED.value - 1) # -1 to compensate for the 0st channel which is ambient
class Mute(AudioMixer):
def mix(self):
pass
| gpl-3.0 | 7,531,481,858,215,642,000 | 39 | 137 | 0.615 | false |
hippysurfer/geotypes | tests/WKTParser_Test.py | 1 | 3634 | #!/usr/bin/python
import TestConfig
import _OGAbstractFactory
import _WKTParser
import _OGGeoTypeFactory
################################################################################
# Copyright (c) QinetiQ Plc 2003
#
# Licensed under the LGPL. For full license details see the LICENSE file.
################################################################################
class OGTestFactory(_OGAbstractFactory.OGAbstractFactory):
def __init__(self):
print "__init__"
def abortGeometryCollection(self):
print "abortGeometryCollection"
def abortLinearRing(self):
print "abortLinearRing"
def abortLineString(self):
print "abortLineString"
def abortMultiLineString(self):
print "abortMultiLineString"
def abortMultiPoint(self):
print "abortMultiPoint"
def abortMultiPolygon(self):
print "abortMultiPolygon"
def abortPoint(self):
print "abortPoint"
def abortPolygon(self):
print "abortPolygon"
def abortUnit(self):
print "abortUnit"
def abortWork(self):
print "abortWork"
def addPoints(self,x,y):
print "addPoints x=%f, y=%f" % (x,y)
def addPoints3D(self,x,y,z):
print "addPoints3D x=%f, y=%f, z=%f" % (x,y,z)
def beginGeometryCollection(self):
print "beginGeometryCollection"
def beginLinearRing(self):
print "beginLinearRing"
def beginLineString(self):
print "beginLineString"
def beginMultiLineString(self):
print "beginMultiLineString"
def beginMultiPoint(self):
print "beginMultiPoint"
def beginMultiPolygon(self):
print "beginMultiPolygon"
def beginPoint(self):
print "beginPoint"
def beginPolygon(self):
print "beginPolygon"
def beginUnit(self, srid):
print "beginUnit"
## def beginUnit(java.lang.String[] words, int[] values):
## print ""
def beginWork(self):
print "beginWork"
def endGeometryCollection(self):
print "endGeometryCollection"
def endLinearRing(self):
print "endLinearRing"
def endLineString(self):
print "endLineString"
def endMultiLineString(self):
print "endMultiLineString"
def endMultiPoint(self):
print "endMultiPoint"
def endMultiPolygon(self):
print "endMultiPolygon"
def endPoint(self):
print "endPoint"
def endPolygon(self):
print "endPolygon"
def endUnit(self):
print "endUnit"
def endWork(self):
print "endWork"
def reset(self):
print "reset"
#fac = OGTestFactory()
##par = _WKTParser.WKTParser(fac)
##par.parseGeometry('SRID=128;GEOMETRYCOLLECTION(POINT(2 3 9),LINESTRING(2 3 4,3 4 5))')
##par.parseGeometry('SRID=128;MULTILINESTRING((0.5 0 0,1 1 0,1 2 1),(2 3 1,3 2 1,5 4 1))')
##par.parseGeometry('SRID=128;MULTILINESTRING(((0.5 0 0),(1 1 0),(1 2 1)),((2 3 1),(3 2 1),(5 4 1)))')
##par.parseGeometry('SRID=128;MULTIPOINT(0 0 0,1 2 1)')
##par.parseGeometry('SRID=128;MULTIPOINT((0 0 0),(1 2 1))')
##par.parseGeometry('SRID=128;POLYGON((0 0 0,4 0 0,4 4 0,0 4 0,0 0 10.5665),(1 1 0,2 1 0,2 2 0,1 2 0,55 66))')
##par.parseGeometry('SRID=128;MULTIPOLYGON(((1 2 3,4 5 6,7 8 9,10 11 12,13 14 15),(1 1 0,2 1 0,2 2 0,1 2 0,1 1 0)),((-1 -1 0,-1 -2 0,-2 -2 0,-2 -1 0,-1 -1 0)))')
fac = _OGGeoTypeFactory.OGGeoTypeFactory()
par = _WKTParser.WKTParser(fac)
par.parseGeometry('SRID=129;POINT(1 2 3)')
print fac.getGeometry()
| lgpl-2.1 | 1,693,860,445,491,580,700 | 24.77305 | 161 | 0.590259 | false |
mrachinskiy/blender-addon-booltron | object_utils.py | 1 | 1558 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# Booltron super add-on for super fast booleans.
# Copyright (C) 2014-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import random
import bpy
class ObjectUtils:
def object_add(self, name):
me = bpy.data.meshes.new(name)
ob = bpy.data.objects.new(name, me)
bpy.context.collection.objects.link(ob)
return ob
def object_remove(self, ob):
me = ob.data
bpy.data.objects.remove(ob)
bpy.data.meshes.remove(me)
def object_pos_correct(self, obs):
for ob in obs:
x = random.uniform(-self.pos_offset, self.pos_offset)
y = random.uniform(-self.pos_offset, self.pos_offset)
z = random.uniform(-self.pos_offset, self.pos_offset)
ob.matrix_world[0][3] += x
ob.matrix_world[1][3] += y
ob.matrix_world[2][3] += z
| mit | 7,013,193,063,922,342,000 | 31.458333 | 73 | 0.654044 | false |
fabricematrat/py-macaroon-bakery | macaroonbakery/tests/test_client.py | 1 | 17944 | # Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
import base64
import datetime
import json
from unittest import TestCase
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from http.server import HTTPServer, BaseHTTPRequestHandler
import threading
import pymacaroons
from httmock import (
HTTMock,
urlmatch
)
import pytz
import requests
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.request import Request
import macaroonbakery as bakery
import macaroonbakery.httpbakery as httpbakery
import macaroonbakery.checkers as checkers
from macaroonbakery import utils
AGES = pytz.UTC.localize(datetime.datetime.utcnow() + datetime.timedelta(days=1))
TEST_OP = bakery.Op(entity='test', action='test')
class TestClient(TestCase):
def test_single_service_first_party(self):
b = new_bakery('loc', None, None)
def handler(*args):
GetHandler(b, None, None, None, None, AGES, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
srv_macaroon = b.oven.macaroon(
version=bakery.LATEST_VERSION, expiry=AGES,
caveats=None, ops=[TEST_OP])
self.assertEquals(srv_macaroon.macaroon.location, 'loc')
client = httpbakery.Client()
client.cookies.set_cookie(requests.cookies.create_cookie(
'macaroon-test', base64.b64encode(json.dumps([
srv_macaroon.to_dict().get('m')
]).encode('utf-8')).decode('utf-8')
))
resp = requests.get(
url='http://' + httpd.server_address[0] + ':' +
str(httpd.server_address[1]),
cookies=client.cookies, auth=client.auth())
resp.raise_for_status()
self.assertEquals(resp.text, 'done')
finally:
httpd.shutdown()
def test_single_party_with_header(self):
b = new_bakery('loc', None, None)
def handler(*args):
GetHandler(b, None, None, None, None, AGES, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
srv_macaroon = b.oven.macaroon(
version=bakery.LATEST_VERSION,
expiry=AGES, caveats=None, ops=[TEST_OP])
self.assertEquals(srv_macaroon.macaroon.location, 'loc')
headers = {
'Macaroons': base64.b64encode(json.dumps([
srv_macaroon.to_dict().get('m')
]).encode('utf-8'))
}
resp = requests.get(
url='http://' + httpd.server_address[0] + ':' +
str(httpd.server_address[1]),
headers=headers)
resp.raise_for_status()
self.assertEquals(resp.text, 'done')
finally:
httpd.shutdown()
def test_expiry_cookie_is_set(self):
class _DischargerLocator(bakery.ThirdPartyLocator):
def __init__(self):
self.key = bakery.generate_key()
def third_party_info(self, loc):
if loc == 'http://1.2.3.4':
return bakery.ThirdPartyInfo(
public_key=self.key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
b = new_bakery('loc', d, None)
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
content = {q: qs[q][0] for q in qs}
m = httpbakery.discharge(checkers.AuthContext(), content, d.key, d,
alwaysOK3rd)
return {
'status_code': 200,
'content': {
'Macaroon': m.to_dict()
}
}
ages = pytz.UTC.localize(
datetime.datetime.utcnow() + datetime.timedelta(days=1))
def handler(*args):
GetHandler(b, 'http://1.2.3.4', None, None, None, ages, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
client = httpbakery.Client()
with HTTMock(discharge):
resp = requests.get(
url='http://' + httpd.server_address[0] + ':' +
str(httpd.server_address[1]),
cookies=client.cookies,
auth=client.auth())
resp.raise_for_status()
m = bakery.Macaroon.from_dict(json.loads(
base64.b64decode(client.cookies.get('macaroon-test')))[0])
t = bakery.checkers.macaroons_expiry_time(
bakery.checkers.Namespace(), [m.macaroon])
self.assertEquals(ages, t)
self.assertEquals(resp.text, 'done')
finally:
httpd.shutdown()
def test_expiry_cookie_set_in_past(self):
class _DischargerLocator(bakery.ThirdPartyLocator):
def __init__(self):
self.key = bakery.generate_key()
def third_party_info(self, loc):
if loc == 'http://1.2.3.4':
return bakery.ThirdPartyInfo(
public_key=self.key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
b = new_bakery('loc', d, None)
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
content = {q: qs[q][0] for q in qs}
m = httpbakery.discharge(checkers.AuthContext(), content, d.key, d,
alwaysOK3rd)
return {
'status_code': 200,
'content': {
'Macaroon': m.to_dict()
}
}
ages = pytz.UTC.localize(
datetime.datetime.utcnow() - datetime.timedelta(days=1))
def handler(*args):
GetHandler(b, 'http://1.2.3.4', None, None, None, ages, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
client = httpbakery.Client()
with HTTMock(discharge):
with self.assertRaises(httpbakery.BakeryException) as ctx:
requests.get(
url='http://' + httpd.server_address[0] + ':' +
str(httpd.server_address[1]),
cookies=client.cookies,
auth=client.auth())
self.assertEqual(ctx.exception.args[0],
'too many (3) discharge requests')
finally:
httpd.shutdown()
def test_too_many_discharge(self):
class _DischargerLocator(bakery.ThirdPartyLocator):
def __init__(self):
self.key = bakery.generate_key()
def third_party_info(self, loc):
if loc == 'http://1.2.3.4':
return bakery.ThirdPartyInfo(
public_key=self.key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
b = new_bakery('loc', d, None)
@urlmatch(path='.*/discharge')
def discharge(url, request):
wrong_macaroon = bakery.Macaroon(
root_key=b'some key', id=b'xxx',
location='some other location',
version=bakery.VERSION_0)
return {
'status_code': 200,
'content': {
'Macaroon': wrong_macaroon.to_dict()
}
}
def handler(*args):
GetHandler(b, 'http://1.2.3.4', None, None, None, AGES, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
client = httpbakery.Client()
with HTTMock(discharge):
with self.assertRaises(httpbakery.BakeryException) as ctx:
requests.get(
url='http://' + httpd.server_address[0] + ':' +
str(httpd.server_address[1]),
cookies=client.cookies,
auth=client.auth())
self.assertEqual(ctx.exception.args[0],
'too many (3) discharge requests')
finally:
httpd.shutdown()
def test_third_party_discharge_refused(self):
class _DischargerLocator(bakery.ThirdPartyLocator):
def __init__(self):
self.key = bakery.generate_key()
def third_party_info(self, loc):
if loc == 'http://1.2.3.4':
return bakery.ThirdPartyInfo(
public_key=self.key.public_key,
version=bakery.LATEST_VERSION,
)
def check(cond, arg):
raise bakery.ThirdPartyCaveatCheckFailed('boo! cond' + cond)
d = _DischargerLocator()
b = new_bakery('loc', d, None)
@urlmatch(path='.*/discharge')
def discharge(url, request):
qs = parse_qs(request.body)
content = {q: qs[q][0] for q in qs}
httpbakery.discharge(checkers.AuthContext(), content, d.key, d,
ThirdPartyCaveatCheckerF(check))
def handler(*args):
GetHandler(b, 'http://1.2.3.4', None, None, None, AGES, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
client = httpbakery.Client()
with HTTMock(discharge):
with self.assertRaises(bakery.ThirdPartyCaveatCheckFailed):
requests.get(
url='http://' + httpd.server_address[0] + ':' +
str(httpd.server_address[1]),
cookies=client.cookies,
auth=client.auth())
finally:
httpd.shutdown()
def test_discharge_with_interaction_required_error(self):
class _DischargerLocator(bakery.ThirdPartyLocator):
def __init__(self):
self.key = bakery.generate_key()
def third_party_info(self, loc):
if loc == 'http://1.2.3.4':
return bakery.ThirdPartyInfo(
public_key=self.key.public_key,
version=bakery.LATEST_VERSION,
)
d = _DischargerLocator()
b = new_bakery('loc', d, None)
@urlmatch(path='.*/discharge')
def discharge(url, request):
return {
'status_code': 401,
'content': {
'Code': httpbakery.ERR_INTERACTION_REQUIRED,
'Message': 'interaction required',
'Info': {
'WaitURL': 'http://0.1.2.3/',
'VisitURL': 'http://0.1.2.3/',
},
}
}
def handler(*args):
GetHandler(b, 'http://1.2.3.4', None, None, None, AGES, *args)
try:
httpd = HTTPServer(('', 0), handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
class MyInteractor(httpbakery.LegacyInteractor):
def legacy_interact(self, ctx, location, visit_url):
raise httpbakery.InteractionError('cannot visit')
def interact(self, ctx, location, interaction_required_err):
pass
def kind(self):
return httpbakery.WEB_BROWSER_INTERACTION_KIND
client = httpbakery.Client(interaction_methods=[MyInteractor()])
with HTTMock(discharge):
with self.assertRaises(httpbakery.InteractionError):
requests.get(
'http://' + httpd.server_address[0] + ':' + str(
httpd.server_address[1]),
cookies=client.cookies,
auth=client.auth())
finally:
httpd.shutdown()
def test_extract_macaroons_from_request(self):
def encode_macaroon(m):
macaroons = '[' + utils.macaroon_to_json_string(m) + ']'
return base64.urlsafe_b64encode(utils.to_bytes(macaroons)).decode('ascii')
req = Request('http://example.com')
m1 = pymacaroons.Macaroon(version=pymacaroons.MACAROON_V2, identifier='one')
req.add_header('Macaroons', encode_macaroon(m1))
m2 = pymacaroons.Macaroon(version=pymacaroons.MACAROON_V2, identifier='two')
jar = requests.cookies.RequestsCookieJar()
jar.set_cookie(utils.cookie(
name='macaroon-auth',
value=encode_macaroon(m2),
url='http://example.com',
))
jar.add_cookie_header(req)
macaroons = httpbakery.extract_macaroons(req)
self.assertEquals(len(macaroons), 2)
macaroons.sort(key=lambda ms: ms[0].identifier)
self.assertEquals(macaroons[0][0].identifier, m1.identifier)
self.assertEquals(macaroons[1][0].identifier, m2.identifier)
class GetHandler(BaseHTTPRequestHandler):
'''A mock HTTP server that serves a GET request'''
def __init__(self, bakery, auth_location, mutate_error,
caveats, version, expiry, *args):
'''
@param bakery used to check incoming requests and macaroons
for discharge-required errors.
@param auth_location holds the location of any 3rd party
authorizer. If this is not None, a 3rd party caveat will be
added addressed to this location.
@param mutate_error if non None, will be called with any
discharge-required error before responding to the client.
@param caveats called to get caveats to add to the returned
macaroon.
@param version holds the version of the bakery that the
server will purport to serve.
@param expiry holds the expiry for the macaroon that will be created
in _write_discharge_error
'''
self._bakery = bakery
self._auth_location = auth_location
self._mutate_error = mutate_error
self._caveats = caveats
self._server_version = version
self._expiry = expiry
BaseHTTPRequestHandler.__init__(self, *args)
def do_GET(self):
'''do_GET implements a handler for the HTTP GET method'''
ctx = checkers.AuthContext()
auth_checker = self._bakery.checker.auth(
httpbakery.extract_macaroons(self.headers))
try:
auth_checker.allow(ctx, [TEST_OP])
except (bakery.PermissionDenied,
bakery.VerificationError) as exc:
return self._write_discharge_error(exc)
self.send_response(200)
self.end_headers()
content_len = int(self.headers.get('content-length', 0))
content = 'done'
if self.path != '/no-body'and content_len > 0:
body = self.rfile.read(content_len)
content = content + ' ' + body
self.wfile.write(content.encode('utf-8'))
return
def _write_discharge_error(self, exc):
version = httpbakery.request_version(self.headers)
if version < bakery.LATEST_VERSION:
self._server_version = version
caveats = []
if self._auth_location != '':
caveats = [
checkers.Caveat(location=self._auth_location,
condition='is-ok')
]
if self._caveats is not None:
caveats.extend(self._caveats)
m = self._bakery.oven.macaroon(
version=bakery.LATEST_VERSION, expiry=self._expiry,
caveats=caveats, ops=[TEST_OP])
content, headers = httpbakery.discharge_required_response(
m, '/', 'test', exc.args[0])
self.send_response(401)
for h in headers:
self.send_header(h, headers[h])
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(content)
def new_bakery(location, locator, checker):
'''Return a new bakery instance.
@param location Location of the bakery {str}.
@param locator Locator for third parties {ThirdPartyLocator or None}
@param checker Caveat checker {FirstPartyCaveatChecker or None}
@return {Bakery}
'''
if checker is None:
c = checkers.Checker()
c.namespace().register('testns', '')
c.register('is', 'testns', check_is_something)
checker = c
key = bakery.generate_key()
return bakery.Bakery(
location=location,
locator=locator,
key=key,
checker=checker,
)
def is_something_caveat():
return checkers.Caveat(condition='is something', namespace='testns')
def check_is_something(ctx, cond, arg):
if arg != 'something':
return '{} doesn\'t match "something"'.format(arg)
return None
class ThirdPartyCaveatCheckerF(bakery.ThirdPartyCaveatChecker):
def __init__(self, check):
self._check = check
def check_third_party_caveat(self, ctx, info):
cond, arg = checkers.parse_caveat(info.condition)
return self._check(cond, arg)
alwaysOK3rd = ThirdPartyCaveatCheckerF(lambda cond, arg: [])
| lgpl-3.0 | -2,682,470,697,162,732,000 | 36.539749 | 86 | 0.539958 | false |
gangchill/nip-convnet | scripts/train_cnn.py | 1 | 8422 | import tensorflow as tf
import numpy as np
import os
import scripts.from_github.cifar10_input as cifar10_input
CIFAR_LOCATION = 'cifar10_data/cifar-10-batches-bin'
def train_cnn(sess, cnn, data, x, y, keep_prob, dropout_k_p, batch_size, init_iteration, max_iterations, chk_iterations, writer, fine_tuning_only, save_prefix = None, best_accuracy_so_far = 0, num_test_images = 1024, test_batch_size = 1024, evaluate_using_test_set = False, final_test_evaluation = True, best_model_for_test = True):
print("Training CNN for {} iterations with batchsize {}".format(max_iterations, batch_size))
sess.run(cnn.set_global_step_op, feed_dict = {cnn.global_step_setter_input: init_iteration})
print('Set gobal step to {}'.format(init_iteration))
if evaluate_using_test_set and final_test_evaluation:
print('Attention: we are currently using the test set during training time.')
print(' Therefore, the last test iteration is not needed and will not be executed.')
print('Consider using the train / validation set to track progress during training and evaluate the accuracy using the test set in the end.')
final_test_evaluation = False
if data == 'cifar_10':
coord = tf.train.Coordinator()
image_batch, label_batch = cifar10_input.distorted_inputs(CIFAR_LOCATION, batch_size)
iteration_evaluation_image_node, iteration_evaluation_label_node = cifar10_input.inputs(evaluate_using_test_set, CIFAR_LOCATION, test_batch_size)
if final_test_evaluation:
test_image_node, test_label_node = cifar10_input.inputs(True, CIFAR_LOCATION, test_batch_size)
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# determine test set size (cifar10 test set is 10000)
if num_test_images <= 0:
total_test_images = 10000
else:
total_test_images = min(num_test_images, 10000)
if evaluate_using_test_set:
iteration_evaluation_name = 'test'
else:
iteration_evaluation_name = 'train'
else:
# choose dataset used for testing every chk_iterations-th iteration
if evaluate_using_test_set:
iteration_evaluation_set = data.test
iteration_evaluation_name = 'test'
else:
iteration_evaluation_set = data.validation
iteration_evaluation_name = 'validation'
if final_test_evaluation:
test_set = data.test
max_test_images = iteration_evaluation_set.images.shape[0]
if num_test_images <= 0:
total_test_images = max_test_images
else:
total_test_images = min(num_test_images, max_test_images)
current_top_accuracy = best_accuracy_so_far
# create two different savers (always store the model from the last 5 check iterations and the current model with the best accuracy)
chk_it_saver = tf.train.Saver(cnn.all_variables_dict, max_to_keep = 1)
best_it_saver = tf.train.Saver(cnn.all_variables_dict, max_to_keep = 1)
#
total_test_set_accuracy = tf.Variable(0, '{}_set_accuracy'.format(iteration_evaluation_name))
for i in range(init_iteration, max_iterations):
if chk_iterations > 100 and i % 100 == 0:
print('...iteration {}'.format(i))
if data == 'cifar_10':
batch_xs, batch_ys = sess.run([image_batch, label_batch])
else:
batch_xs, batch_ys = data.train.next_batch(batch_size)
if i % chk_iterations == 0:
# batch the test data (prevent memory overflow)
last_batch_size = total_test_images % test_batch_size
num_batches = total_test_images / test_batch_size + int(last_batch_size > 0)
if last_batch_size == 0:
last_batch_size = test_batch_size
print('---> Test Iteration')
if fine_tuning_only:
print('BE AWARE: we are currently only optimizing the dense layer weights, convolution weights and biases stay unchanged')
print('Current performance is evaluated using the {}-set'.format(iteration_evaluation_name))
print('Test batch size is {}'.format(test_batch_size))
print('We want to average over {} test images in total'.format(total_test_images))
print('This gives us {} batches, the last one having only {} images'.format(num_batches, last_batch_size))
total_accuracy = 0
for batch_indx in range(num_batches):
print('...treating batch {}'.format(batch_indx))
if batch_indx == num_batches - 1:
current_batch_size = last_batch_size
else:
current_batch_size = test_batch_size
if data == 'cifar_10':
test_images, test_labels = sess.run([iteration_evaluation_image_node, iteration_evaluation_label_node])
else:
test_images, test_labels = iteration_evaluation_set.next_batch(current_batch_size)
avg_accuracy, summary = sess.run([cnn.accuracy, cnn.merged], feed_dict={x: test_images, y: test_labels, keep_prob: 1.0})
total_accuracy += avg_accuracy * current_batch_size
total_accuracy = total_accuracy / total_test_images
print('it {} accuracy {}'.format(i, total_accuracy))
# always keep the models from the last 5 iterations stored
if save_prefix is not None:
file_path = os.path.join(save_prefix, 'CNN-acc-{}'.format(total_accuracy))
print('...save current iteration weights to file ')
cnn.store_model_to_file(sess, file_path, i, saver=chk_it_saver)
if total_accuracy > current_top_accuracy:
print('...new top accuracy found')
current_top_accuracy = total_accuracy
if save_prefix is not None:
file_path = os.path.join(save_prefix, 'best', 'CNN-acc-{}'.format(current_top_accuracy))
print('...save new found best weights to file ')
cnn.store_model_to_file(sess, file_path, i, saver=best_it_saver)
with tf.name_scope('CNN'):
total_batch_acc_summary = tf.Summary()
total_batch_acc_summary.value.add(tag='acc_over_all_ {}_batches'.format(iteration_evaluation_name), simple_value=total_accuracy)
writer.add_summary(total_batch_acc_summary, i)
writer.add_summary(summary, i)
# perform one training step
if fine_tuning_only:
sess.run([cnn.optimize_dense_layers, cnn.increment_global_step_op], feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout_k_p})
else:
sess.run([cnn.optimize, cnn.increment_global_step_op], feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout_k_p})
print('...finished training')
if final_test_evaluation:
print('The network was trained without presence of the test set.')
print('...Performing test set evaluation')
print('loading best model')
best_model_folder = os.path.join(save_prefix, 'best')
print('looking for best weights in {}'.format(best_model_folder))
latest_checkpoint = tf.train.latest_checkpoint(best_model_folder)
best_it_saver.restore(sess, latest_checkpoint)
if data == 'cifar_10':
total_test_images = 10000
print('Treating cifar10: the total test set size is {} images'.format(num_test_images))
else:
total_test_images = test_set.images.shape[0]
print('Test set size is {}'.format(total_test_images))
# batch the test data (prevent memory overflow)
last_batch_size = total_test_images % test_batch_size
num_batches = total_test_images / test_batch_size + int(last_batch_size > 0)
if last_batch_size == 0:
last_batch_size = test_batch_size
print('-------------------------------------')
print('---> FINAL TEST SET EVALUATION <-----')
print('-------------------------------------')
print('Test batch size is {}'.format(test_batch_size))
print('We want to average over {} test images in total'.format(num_test_images))
print('This gives us {} batches, the last one having only {} images'.format(num_batches, last_batch_size))
total_accuracy = 0
for batch_indx in range(num_batches):
print('...treating batch {}'.format(batch_indx))
if batch_indx == num_batches - 1:
current_batch_size = last_batch_size
else:
current_batch_size = test_batch_size
if data == 'cifar_10':
test_images, test_labels = sess.run([test_image_node, test_label_node])
else:
test_images, test_labels = test_set.next_batch(current_batch_size)
avg_accuracy, summary = sess.run([cnn.accuracy, cnn.merged], feed_dict={x: test_images, y: test_labels, keep_prob: 1.0})
total_accuracy += avg_accuracy * current_batch_size
total_accuracy = total_accuracy / total_test_images
with tf.name_scope('CNN'):
total_batch_acc_summary = tf.Summary()
total_batch_acc_summary.value.add(tag='final_test_set_accuracy', simple_value=total_accuracy)
writer.add_summary(total_batch_acc_summary, max_iterations)
if data == 'cifar_10':
coord.request_stop()
coord.join(threads)
| apache-2.0 | 221,510,190,195,742,100 | 35.781659 | 332 | 0.696272 | false |
jnishi/chainer | chainer/functions/connection/depthwise_convolution_2d.py | 1 | 2945 | import chainer
def depthwise_convolution_2d(x, W, b=None, stride=1, pad=0):
"""Two-dimensional depthwise convolution function.
This is an implementation of two-dimensional depthwise convolution.
It takes two or three variables: the input image ``x``, the filter weight
``W``, and optionally, the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input.
- :math:`c_M` is the channel multiplier.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_M, c_I, k_H, k_W)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_M * c_I` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable:
Output variable. Its shape is :math:`(n, c_I * c_M, h_O, w_O)`.
Like ``Convolution2D``, ``DepthwiseConvolution2D`` function computes
correlations between filters and patches of size :math:`(k_H, k_W)` in
``x``.
But unlike ``Convolution2D``, ``DepthwiseConvolution2D`` does not add up
input channels of filters but concatenates them.
For that reason, the shape of outputs of depthwise convolution are
:math:`(n, c_I * c_M, h_O, w_O)`, :math:`c_M` is called channel_multiplier.
:math:`(h_O, w_O)` is determined by the equivalent equation of
``Convolution2D``.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
See: `L. Sifre. Rigid-motion scattering for image classification\
<https://www.di.ens.fr/data/publications/papers/phd_sifre.pdf>`_
.. seealso:: :class:`~chainer.links.DepthwiseConvolution2D`
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (2, 3, 4, 7))
>>> W = np.random.uniform(0, 1, (2, 3, 3, 3))
>>> b = np.random.uniform(0, 1, (6,))
>>> y = F.depthwise_convolution_2d(x, W, b)
>>> y.shape
(2, 6, 2, 5)
"""
multiplier, in_channels, kh, kw = W.shape
F = chainer.functions
W = F.transpose(W, (1, 0, 2, 3))
W = F.reshape(W, (multiplier * in_channels, 1, kh, kw))
return F.convolution_2d(x, W, b, stride, pad, groups=in_channels)
| mit | 1,446,797,509,145,635,600 | 39.342466 | 79 | 0.611885 | false |
zengchunyun/s12 | day10/homework/rabbitmq_management/modules/rabbitmq_client.py | 1 | 5848 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
import pika
import subprocess
import threading
import time
import sys
class RabbitMQClient(object):
def __init__(self, host="localhost", port=5672, timeout=15, host_id=None, binding_keys=None):
"""
:param host: rabbitmq服务器IP
:param port: 服务器端口
:param timeout: 任务最大超时时间
:param host_id: 唯一主机ID,用于标识处理任务的主机
:param binding_keys: 绑定不同的路由key,用于接收不同的事件任务
:return:
"""
self.host = host
self.port = port
self.response = None
self.queue_name = None
self.exchange = "topic_os" # 设置交换器名称
self.exchange_type = "topic" # 设置交换器类型
self.binding_keys = binding_keys
self.id = self.get_id() # 设置客户端的唯一ID,一般以客户端IP为唯一ID
if host_id:
self.id = host_id # 如果配置文件设置了ID属性,则以配置文件为优先
self.connection = self.connect_server()
self.channel = self.connection.channel()
self.create_exchange() # 创建交换器
self.create_queue() # 创建队列
self.bind()
self.timeout = timeout # 设置一个任务最长执行的时间,超过这个设置时间则返回超时提示
def connect_server(self):
"""
连接到rabbitmq服务器
:return:
"""
return pika.BlockingConnection(pika.ConnectionParameters(
host=self.host,
port=self.port,
))
def get_id(self):
"""
通过获取系统的IP来定义一个客户端的ID
:return: 返回最终确定的IP
"""
import re
self.exec_call("ip addr 2> /dev/null ||ifconfig")
get_ip = self.response
result = re.findall("(\d+\.\d+\.\d+\.\d+)", str(get_ip, "utf-8"))
for ip in result:
if ip != "127.0.0.1" and not (ip.endswith("255") or ip.startswith("255")):
return ip
def create_queue(self):
"""
创建队列
:return:
"""
self.queue_name = self.channel.queue_declare(exclusive=True).method.queue
def create_exchange(self):
"""
创建交换器,避免客户端先启动,而连接的交换器不存在异常
:return:
"""
self.channel.exchange_declare(exchange=self.exchange, type=self.exchange_type)
def bind(self):
"""
绑定路由key,方便接收不同类型的任务
:return:
"""
print("Routing key {}".format(self.binding_keys))
for binding_key in self.binding_keys:
self.channel.queue_bind(queue=self.queue_name,
exchange=self.exchange,
routing_key=binding_key)
def exec_call(self, command):
"""
执行命令,并把错误或正确结果赋值给self.response
:param command: 从rabbitmq服务器获取任务命令
:return:
"""
if type(command) == bytes:
command = str(command, "utf-8")
result = subprocess.Popen(args=command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
self.response = (result.stdout.read() or result.stderr.read())
def callback(self, ch, method, properties, body):
"""
回调方法,用于执行任务并返回结果到rabbitmq服务器
:param ch: 相当于self.channel
:param method:
:param properties:接收到任务带的额外属性
:param body:任务消息
:return:
"""
before = time.monotonic() # 纪录代码执行到这所花费时间
exec_cmd = threading.Thread(target=self.exec_call, args=(body,))
exec_cmd.start()
exec_cmd.join(self.timeout)
after = time.monotonic() # 代码执行完到这所花费时间,用于计算执行过程是否超时
if (after - before) > self.timeout: # 当执行任务大于设定的默认超时时间,则说明任务已经超时了,将返回超时信息给服务器
self.response = bytes("command exec timeout", "utf8")
print(" [*] Got a task {}".format(str(body, "utf8)")))
message = {"host": self.id, "data": self.response}
ch.basic_publish(exchange="",
routing_key=properties.reply_to,
properties=pika.BasicProperties(
correlation_id=properties.correlation_id,),
body=bytes(str(message), "utf-8"))
ch.basic_ack(delivery_tag=method.delivery_tag)
def start(self):
"""
启动客户端,进入等待接收任务状态,且每次只接收一个任务
:return:
"""
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(self.callback,
queue=self.queue_name)
print(" [x] Awaiting RPC request")
self.channel.start_consuming()
def main():
"""
新建客户端实例,用于处理任务请求,并返回处理结果给rabbitMQ服务器
:return:
"""
try:
from config.settings import server, port, timeout, host_id, binding_keys
except ImportError:
server = "localhost"
port = 5672
timeout = 15
host_id = None
binding_keys = ["remote.call"]
binding_list = sys.argv[1:] # 路由KEY支持接收控制台输入,优先级最高
if binding_list:
binding_keys = binding_list
client = RabbitMQClient(host=server, port=port, timeout=timeout, host_id=host_id, binding_keys=binding_keys)
client.start()
if __name__ == "__main__":
main()
| gpl-2.0 | -1,399,667,292,976,872,700 | 30.974522 | 112 | 0.565538 | false |
hvnsweeting/GitPython | git/test/lib/helper.py | 1 | 10757 | # helper.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import print_function
import os
import sys
from unittest import TestCase
import time
import tempfile
import shutil
import io
from git import Repo, Remote, GitCommandError, Git
from git.compat import string_types
GIT_REPO = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
__all__ = (
'fixture_path', 'fixture', 'absolute_project_path', 'StringProcessAdapter',
'with_rw_repo', 'with_rw_and_rw_remote_repo', 'TestBase', 'TestCase', 'GIT_REPO'
)
#{ Routines
def fixture_path(name):
test_dir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(test_dir, "fixtures", name)
def fixture(name):
return open(fixture_path(name), 'rb').read()
def absolute_project_path():
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
#} END routines
#{ Adapters
class StringProcessAdapter(object):
"""Allows to use strings as Process object as returned by SubProcess.Popen.
Its tailored to work with the test system only"""
def __init__(self, input_string):
self.stdout = io.BytesIO(input_string)
self.stderr = io.BytesIO()
def wait(self):
return 0
poll = wait
#} END adapters
#{ Decorators
def _mktemp(*args):
"""Wrapper around default tempfile.mktemp to fix an osx issue
:note: the OSX special case was removed as it was unclear why that was needed in the first place. It seems
to be just fine without it. However, if we leave this special case, and if TMPDIR is set to something custom,
prefixing /private/ will lead to incorrect paths on OSX."""
tdir = tempfile.mktemp(*args)
# See :note: above to learn why this is comented out.
# if sys.platform == 'darwin':
# tdir = '/private' + tdir
return tdir
def _rmtree_onerror(osremove, fullpath, exec_info):
"""
Handle the case on windows that read-only files cannot be deleted by
os.remove by setting it to mode 777, then retry deletion.
"""
if os.name != 'nt' or osremove is not os.remove:
raise
os.chmod(fullpath, 0o777)
os.remove(fullpath)
def with_rw_repo(working_tree_ref, bare=False):
"""
Same as with_bare_repo, but clones the rorepo as non-bare repository, checking
out the working tree at the given working_tree_ref.
This repository type is more costly due to the working copy checkout.
To make working with relative paths easier, the cwd will be set to the working
dir of the repository.
"""
assert isinstance(working_tree_ref, string_types), "Decorator requires ref name for working tree checkout"
def argument_passer(func):
def repo_creator(self):
prefix = 'non_'
if bare:
prefix = ''
# END handle prefix
repo_dir = _mktemp("%sbare_%s" % (prefix, func.__name__))
rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=bare, n=True)
rw_repo.head.commit = rw_repo.commit(working_tree_ref)
if not bare:
rw_repo.head.reference.checkout()
# END handle checkout
prev_cwd = os.getcwd()
os.chdir(rw_repo.working_dir)
try:
try:
return func(self, rw_repo)
except:
print("Keeping repo after failure: %s" % repo_dir, file=sys.stderr)
repo_dir = None
raise
finally:
os.chdir(prev_cwd)
rw_repo.git.clear_cache()
if repo_dir is not None:
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
# END rm test repo if possible
# END cleanup
# END rw repo creator
repo_creator.__name__ = func.__name__
return repo_creator
# END argument passer
return argument_passer
def with_rw_and_rw_remote_repo(working_tree_ref):
"""
Same as with_rw_repo, but also provides a writable remote repository from which the
rw_repo has been forked as well as a handle for a git-daemon that may be started to
run the remote_repo.
The remote repository was cloned as bare repository from the rorepo, wheras
the rw repo has a working tree and was cloned from the remote repository.
remote_repo has two remotes: origin and daemon_origin. One uses a local url,
the other uses a server url. The daemon setup must be done on system level
and should be an inetd service that serves tempdir.gettempdir() and all
directories in it.
The following scetch demonstrates this::
rorepo ---<bare clone>---> rw_remote_repo ---<clone>---> rw_repo
The test case needs to support the following signature::
def case(self, rw_repo, rw_remote_repo)
This setup allows you to test push and pull scenarios and hooks nicely.
See working dir info in with_rw_repo
:note: We attempt to launch our own invocation of git-daemon, which will be shutdown at the end of the test.
"""
assert isinstance(working_tree_ref, string_types), "Decorator requires ref name for working tree checkout"
def argument_passer(func):
def remote_repo_creator(self):
remote_repo_dir = _mktemp("remote_repo_%s" % func.__name__)
repo_dir = _mktemp("remote_clone_non_bare_repo")
rw_remote_repo = self.rorepo.clone(remote_repo_dir, shared=True, bare=True)
# recursive alternates info ?
rw_repo = rw_remote_repo.clone(repo_dir, shared=True, bare=False, n=True)
rw_repo.head.commit = working_tree_ref
rw_repo.head.reference.checkout()
# prepare for git-daemon
rw_remote_repo.daemon_export = True
# this thing is just annoying !
crw = rw_remote_repo.config_writer()
section = "daemon"
try:
crw.add_section(section)
except Exception:
pass
crw.set(section, "receivepack", True)
# release lock
crw.release()
del(crw)
# initialize the remote - first do it as local remote and pull, then
# we change the url to point to the daemon. The daemon should be started
# by the user, not by us
d_remote = Remote.create(rw_repo, "daemon_origin", remote_repo_dir)
d_remote.fetch()
remote_repo_url = "git://localhost%s" % remote_repo_dir
d_remote.config_writer.set('url', remote_repo_url)
temp_dir = os.path.dirname(_mktemp())
# On windows, this will fail ... we deal with failures anyway and default to telling the user to do it
try:
gd = Git().daemon(temp_dir, enable='receive-pack', as_process=True)
# yes, I know ... fortunately, this is always going to work if sleep time is just large enough
time.sleep(0.5)
except Exception:
gd = None
# end
# try to list remotes to diagnoes whether the server is up
try:
rw_repo.git.ls_remote(d_remote)
except GitCommandError as e:
# We assume in good faith that we didn't start the daemon - but make sure we kill it anyway
# Of course we expect it to work here already, but maybe there are timing constraints
# on some platforms ?
if gd is not None:
os.kill(gd.proc.pid, 15)
print(str(e))
if os.name == 'nt':
msg = "git-daemon needs to run this test, but windows does not have one. "
msg += 'Otherwise, run: git-daemon "%s"' % temp_dir
raise AssertionError(msg)
else:
msg = 'Please start a git-daemon to run this test, execute: git daemon --enable=receive-pack "%s"'
msg %= temp_dir
raise AssertionError(msg)
# END make assertion
# END catch ls remote error
# adjust working dir
prev_cwd = os.getcwd()
os.chdir(rw_repo.working_dir)
try:
return func(self, rw_repo, rw_remote_repo)
finally:
# gd.proc.kill() ... no idea why that doesn't work
if gd is not None:
os.kill(gd.proc.pid, 15)
os.chdir(prev_cwd)
rw_repo.git.clear_cache()
rw_remote_repo.git.clear_cache()
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
shutil.rmtree(remote_repo_dir, onerror=_rmtree_onerror)
if gd is not None:
gd.proc.wait()
# END cleanup
# END bare repo creator
remote_repo_creator.__name__ = func.__name__
return remote_repo_creator
# END remote repo creator
# END argument parsser
return argument_passer
#} END decorators
class TestBase(TestCase):
"""
Base Class providing default functionality to all tests such as:
- Utility functions provided by the TestCase base of the unittest method such as::
self.fail("todo")
self.failUnlessRaises(...)
- Class level repository which is considered read-only as it is shared among
all test cases in your type.
Access it using::
self.rorepo # 'ro' stands for read-only
The rorepo is in fact your current project's git repo. If you refer to specific
shas for your objects, be sure you choose some that are part of the immutable portion
of the project history ( to assure tests don't fail for others ).
"""
@classmethod
def setUpClass(cls):
"""
Dynamically add a read-only repository to our actual type. This way
each test type has its own repository
"""
cls.rorepo = Repo(GIT_REPO)
@classmethod
def tearDownClass(cls):
cls.rorepo.git.clear_cache()
cls.rorepo.git = None
def _make_file(self, rela_path, data, repo=None):
"""
Create a file at the given path relative to our repository, filled
with the given data. Returns absolute path to created file.
"""
repo = repo or self.rorepo
abs_path = os.path.join(repo.working_tree_dir, rela_path)
fp = open(abs_path, "w")
fp.write(data)
fp.close()
return abs_path
| bsd-3-clause | 7,220,957,404,321,188,000 | 34.737542 | 118 | 0.601934 | false |
aam-at/tensorflow | tensorflow/python/keras/layers/preprocessing/category_encoding_test.py | 1 | 29699 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text category_encoding preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import category_encoding_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return category_encoding.CategoryEncoding
else:
return category_encoding_v1.CategoryEncoding
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CategoryEncodingInputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_dense_input_sparse_output(self):
input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1]
# [1, X, X, X]
# [X, X, X, 2]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]
expected_values = [1, 1, 1, 1, 2]
max_tokens = 6
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = get_layer_class()(
max_tokens=max_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input(self):
input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
sparse_tensor_data = sparse_ops.from_dense(input_array)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sparse_tensor_data, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_sparse_input_with_weights(self):
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)
weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])
sparse_tensor_data = sparse_ops.from_dense(input_array)
sparse_weight_data = sparse_ops.from_dense(weights_array)
# pyformat: disable
expected_output = [[0, .1, .2, .3, .4, 0],
[0, .4, 0, .1, .5, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.COUNT)
int_data = layer(input_data, count_weights=weight_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
output_dataset = model.predict([sparse_tensor_data, sparse_weight_data],
steps=1)
self.assertAllClose(expected_output, output_dataset)
def test_sparse_input_sparse_output(self):
sp_inp = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]],
values=[0, 2, 1, 1, 0],
dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [1, 1, 2, 1]
max_tokens = 6
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = get_layer_class()(
max_tokens=max_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(sp_inp, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_input_sparse_output_with_weights(self):
indices = [[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]]
sp_inp = sparse_tensor.SparseTensor(
indices=indices, values=[0, 2, 1, 1, 0], dense_shape=[4, 2])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
sp_weight = sparse_tensor.SparseTensor(
indices=indices, values=[.1, .2, .4, .3, .2], dense_shape=[4, 2])
weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)
# The expected output should be (X for missing value):
# [[1, X, X, X]
# [X, X, 1, X]
# [X, 2, X, X]
# [1, X, X, X]]
expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]
expected_values = [.1, .2, .7, .2]
max_tokens = 6
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data, count_weights=weight_data)
model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
sp_output_dataset = model.predict([sp_inp, sp_weight], steps=1)
self.assertAllClose(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
def test_ragged_input(self):
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 1]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_input_sparse_output(self):
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 3]])
# The expected output should be (X for missing value):
# [[X, 1, 1, 1]
# [X, X, X, 2]]
expected_indices = [[0, 1], [0, 2], [0, 3], [1, 3]]
expected_values = [1, 1, 1, 2]
max_tokens = 6
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
sp_output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_values, sp_output_dataset.values)
self.assertAllEqual(expected_indices, sp_output_dataset.indices)
# Assert sparse output is same as dense output.
layer = get_layer_class()(
max_tokens=max_tokens,
output_mode=category_encoding.COUNT,
sparse=False)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),
output_dataset)
def test_sparse_output_and_dense_layer(self):
input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])
max_tokens = 4
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
encoding_layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.COUNT,
sparse=True)
int_data = encoding_layer(input_data)
dense_layer = keras.layers.Dense(units=1)
output_data = dense_layer(int_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array, steps=1)
def test_dense_oov_input(self):
input_array = constant_op.constant([[1, 2, 3], [4, 3, 4]])
max_tokens = 3
expected_output_shape = [None, max_tokens]
encoder_layer = get_layer_class()(max_tokens)
input_data = keras.Input(shape=(3,), dtype=dtypes.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
".*must be in the range 0 <= values < max_tokens.*"):
_ = model.predict(input_array, steps=1)
def test_dense_negative(self):
input_array = constant_op.constant([[1, 2, 0], [2, 2, -1]])
max_tokens = 3
expected_output_shape = [None, max_tokens]
encoder_layer = get_layer_class()(max_tokens)
input_data = keras.Input(shape=(3,), dtype=dtypes.int32)
int_data = encoder_layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
".*must be in the range 0 <= values < max_tokens.*"):
_ = model.predict(input_array, steps=1)
@keras_parameterized.run_all_keras_modes
class CategoryEncodingAdaptTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_sparse_adapt(self):
vocab_data = sparse_ops.from_dense(
np.array([[1, 1, 0, 1, 1, 2, 2, 0, 2, 3, 3, 0, 4]], dtype=np.int64))
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
input_array = sparse_ops.from_dense(
np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64))
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.BINARY)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_adapt(self):
vocab_data = ragged_factory_ops.constant(
np.array([[1, 1, 0, 1, 1], [2, 2], [0, 2, 3], [0, 4]]))
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 1]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.BINARY)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(expected_output, output_dataset)
def test_hard_maximum_set_state_variables_after_build(self):
state_variables = {category_encoding._NUM_ELEMENTS_NAME: 5}
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
layer._set_state_variables(state_variables)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_soft_maximum_set_state_after_build(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.BINARY)
layer.build(input_data.shape)
layer.set_num_elements(max_tokens)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_set_weights_fails_on_wrong_size_weights(self):
tfidf_data = [.05, .5, .25, .2, .125]
layer = get_layer_class()(max_tokens=6, output_mode=category_encoding.TFIDF)
with self.assertRaisesRegex(ValueError, ".*Layer weight shape.*"):
layer.set_weights([np.array(tfidf_data)])
def test_set_num_elements_after_call_fails(self):
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.BINARY)
layer.adapt([1, 2])
_ = layer(input_data)
with self.assertRaisesRegex(
RuntimeError, ".*'max_tokens' arg must be set to None."):
layer.set_num_elements(5)
def test_set_state_variables_after_call_fails(self):
state_variables = {category_encoding._NUM_ELEMENTS_NAME: 5}
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.BINARY)
layer.adapt([1, 2])
_ = layer(input_data)
with self.assertRaisesRegex(RuntimeError, "Cannot update states.*"):
layer._set_state_variables(state_variables)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_all_keras_modes
class CategoryEncodingOutputTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_binary_output_hard_maximum(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=max_tokens, output_mode=category_encoding.BINARY)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_binary_output_soft_maximum(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.BINARY)
layer.set_num_elements(max_tokens)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_hard_maximum(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(max_tokens=6, output_mode=category_encoding.COUNT)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_soft_maximum(self):
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0],
[2, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.COUNT)
layer.set_num_elements(max_tokens)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_tfidf_output_hard_maximum(self):
tfidf_data = [.05, .5, .25, .2, .125]
input_array = np.array([[1, 2, 3, 1], [0, 4, 1, 0]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, 1, .25, .2, 0, 0],
[.1, .5, 0, 0, .125, 0]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(max_tokens=6, output_mode=category_encoding.TFIDF)
layer.set_tfidf_data(tfidf_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_soft_maximum(self):
tfidf_data = [.05, .5, .25, .2, .125]
input_array = np.array([[1, 2, 3, 1], [0, 4, 1, 0]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, 1, .25, .2, 0],
[.1, .5, 0, 0, .125]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(
max_tokens=None, output_mode=category_encoding.TFIDF)
layer.set_num_elements(max_tokens)
layer.set_tfidf_data(tfidf_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
class CategoryEncodingModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_hard_max",
"max_tokens": 5,
"output_mode": category_encoding.COUNT
}, {
"testcase_name": "count_soft_max",
"max_tokens": None,
"output_mode": category_encoding.COUNT
}, {
"testcase_name": "binary_hard_max",
"max_tokens": 5,
"output_mode": category_encoding.BINARY
}, {
"testcase_name": "binary_soft_max",
"max_tokens": None,
"output_mode": category_encoding.BINARY
}, {
"testcase_name": "tfidf_hard_max",
"max_tokens": 5,
"output_mode": category_encoding.TFIDF
}, {
"testcase_name": "tfidf_soft_max",
"max_tokens": None,
"output_mode": category_encoding.TFIDF
})
def test_end_to_end_bagged_modeling(self, output_mode, max_tokens):
tfidf_data = np.array([.03, .5, .25, .2, .125])
input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
layer = get_layer_class()(max_tokens=max_tokens, output_mode=output_mode)
weights = []
if max_tokens is None:
layer.set_num_elements(5)
if output_mode == category_encoding.TFIDF:
weights.append(tfidf_data)
layer.set_weights(weights)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
@keras_parameterized.run_all_keras_modes
class CategoryEncodingCombinerTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def compare_idf_accumulators(self, a, b, msg=None):
if a is None or b is None:
self.assertAllEqual(a, b, msg=msg)
self.assertAllEqual(a.data, b.data, msg=msg)
if a.per_doc_count_dict is not None:
def per_doc_counts(accumulator):
count_values = [
count_dict["count"]
for count_dict in accumulator.per_doc_count_dict.values()
]
return dict(zip(accumulator.per_doc_count_dict.keys(), count_values))
self.assertAllEqual(per_doc_counts(a), per_doc_counts(b), msg=msg)
compare_accumulators = compare_idf_accumulators
def update_accumulator(self, accumulator, data):
accumulator.data[1] = data["num_documents"]
accumulator.data[0] = data["max_element"]
if "document_counts" in data:
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_dict = {}
for i, count in enumerate(data["document_counts"]):
if count > 0:
idf_dict[i] = create_dict(count)
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def test_combiner_api_compatibility_int_mode(self):
data = np.array([[1, 2, 3, 4], [1, 2, 3, 0]])
combiner = category_encoding._CategoryEncodingCombiner(compute_idf=False)
expected_accumulator_output = {
"max_element": np.array(4),
"num_documents": np.array(2),
}
expected_extract_output = {
"num_elements": np.array(5),
}
expected_accumulator = combiner._create_accumulator()
expected_accumulator = self.update_accumulator(expected_accumulator,
expected_accumulator_output)
self.validate_accumulator_serialize_and_deserialize(combiner, data,
expected_accumulator)
self.validate_accumulator_uniqueness(combiner, data)
self.validate_accumulator_extract(combiner, data, expected_extract_output)
def test_combiner_api_compatibility_tfidf_mode(self):
data = np.array([[1, 2, 3, 4], [1, 2, 3, 0]])
combiner = category_encoding._CategoryEncodingCombiner(compute_idf=True)
expected_accumulator_output = {
"max_element": np.array(4),
"document_counts": np.array([1, 2, 2, 2, 1]),
"num_documents": np.array(2),
}
expected_extract_output = {
"num_elements": np.array(5),
"idf": np.array([0.693147, 0.510826, 0.510826, 0.510826, 0.693147]),
}
expected_accumulator = combiner._create_accumulator()
expected_accumulator = self.update_accumulator(expected_accumulator,
expected_accumulator_output)
self.validate_accumulator_serialize_and_deserialize(combiner, data,
expected_accumulator)
self.validate_accumulator_uniqueness(combiner, data)
self.validate_accumulator_extract(combiner, data, expected_extract_output)
# TODO(askerryryan): Add tests confirming equivalence to behavior of
# existing tf.keras.preprocessing.text.Tokenizer.
@parameterized.named_parameters(
{
"testcase_name": "no_top_k",
"data": np.array([[1, 2], [4, 2], [3], [4, 2]]),
"expected_accumulator_output": {
"max_element": np.array(4),
"document_counts": np.array([0, 1, 3, 1, 2]),
"num_documents": np.array(4),
},
"expected_extract_output": {
"num_elements":
np.array(5),
"idf":
np.array([1.609438, 1.098612, 0.693147, 1.098612, 0.847298]),
},
}, {
"testcase_name": "single_element_per_row",
"data": np.array([[1], [2], [4], [2], [3]]),
"expected_accumulator_output": {
"max_element": np.array(4),
"document_counts": np.array([0, 1, 2, 1, 1]),
"num_documents": np.array(5),
},
"expected_extract_output": {
"num_elements":
np.array(5),
"idf":
np.array([1.791759, 1.252763, 0.980829, 1.252763, 1.252763]),
},
})
def test_combiner_computation(self,
data,
expected_accumulator_output,
expected_extract_output,
compute_idf=True):
combiner = category_encoding._CategoryEncodingCombiner(
compute_idf=compute_idf)
expected_accumulator = combiner._create_accumulator()
expected_accumulator = self.update_accumulator(expected_accumulator,
expected_accumulator_output)
self.validate_accumulator_computation(combiner, data, expected_accumulator)
self.validate_accumulator_extract(combiner, data, expected_extract_output)
def test_1d_data(self):
data = [1, 2, 3]
cls = get_layer_class()
layer = cls()
layer.adapt(data)
output = layer(data)
self.assertListEqual(output.shape.as_list(), [3, 4])
def test_no_adapt_exception(self):
cls = get_layer_class()
layer = cls()
with self.assertRaisesRegex(
RuntimeError, r".*you need to call.*"):
_ = layer([1, 2, 3])
def test_saving_loading(self):
encoder = category_encoding.CategoryEncoding()
encoder.adapt([1, 2, 3])
model = keras.Sequential([encoder])
model.save("/tmp/model", save_format="tf")
loaded_model = keras.models.load_model("/tmp/model")
self.assertAllClose(model.predict([[1]]), loaded_model.predict([[1]]))
def test_serialize(self):
encoder = category_encoding.CategoryEncoding()
encoder.adapt([1, 2, 3])
model = keras.Sequential([encoder])
_ = keras.models.clone_model(model)
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,046,602,601,785,108,400 | 37.520104 | 81 | 0.634095 | false |
kunz07/fyp2017 | Ubidots-Finaltest.py | 1 | 4091 | # FYP2017
# Program to establish ZigBee communication between raspberry Pi and arduino
# Complete control of HVAC elements based on commands sent from the Pi
# Author: Kunal Jagadeesh
# License: Public Domain
import time
import serial
from ubidots import ApiClient
one = 1
zero = 0
f = open('Ubidots_APIkey.txt', 'r')
apikey = f.readline().strip()
f.close()
api = ApiClient(token = apikey)
try:
roomtemp = api.get_variable("58d763b8762542260a851bd1")
roomhumidity = api.get_variable("58d763c57625422609b8d088")
cooler = api.get_variable("58d768e0762542260a855c7a")
heater = api.get_variable("58d768eb7625422609b91152")
humidifier = api.get_variable("58d768f8762542260cf3b292")
exhaust = api.get_variable("58d76907762542260dfad769")
except ValueError:
print('Unable to obtain variable')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
hour = 3600
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
def getSensorData():
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
ser.write('s'.encode())
time.sleep(2)
response = ser.readline().strip().decode()
hum = float(response[:5])
temp = float(response[5:])
try:
roomtemp.save_value({'value': temp})
roomhumidity.save_value({'value': hum})
print('Value',temp,'and',hum, 'sent')
time.sleep(2)
except:
print('Value not sent')
return (hum, temp)
def level_1():
h, t = getSensorData()
if (t > 35):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 15):
heater.save_value({'value': one})
time.sleep(2)
if (h < 25):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 80):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def level_2():
h, t = getSensorData()
if (t > 32):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 18):
heater.save_value({'value': one})
time.sleep(2)
if (h < 30):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 70):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def level_3():
h, t = getSensorData()
if (t > 30):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 20):
heater.save_value({'value': one})
time.sleep(2)
if (h < 40):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 60):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def level_4():
h, t = getSensorData()
if (t > 27):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 22):
heater.save_value({'value': one})
time.sleep(2)
if (h < 25):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 30):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def getLevel():
return 4
if __name__ == "__main__":
level = getLevel()
while True:
if (level == 1):
level_1()
elif (level == 2):
level_2()
elif (level == 3):
level_3()
elif (level == 4):
level_4()
else:
ser.write('x'.encode())
break
| mit | -7,591,887,865,722,326,000 | 24.72956 | 76 | 0.565143 | false |
poeschlr/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Buzzer_Beeper/cq_parameters_StarMicronics_HMB_06_HMB_12.py | 1 | 10133 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad main_generator.py modelName
## e.g. c:\freecad\bin\freecad main_generator.py DIP8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
import cq_parameters # modules parameters
from cq_parameters import *
class cq_parameters_StarMicronics_HMB_06_HMB_12():
def __init__(self):
x = 0
def get_dest_3D_dir(self):
return 'Buzzer_Beeper.3dshapes'
def model_exist(self, modelName):
for n in self.all_params:
if n == modelName:
return True
return False
def get_list_all(self):
list = []
for n in self.all_params:
list.append(n)
return list
def make_3D_model(self, modelName):
destination_dir = self.get_dest_3D_dir()
case = self.make_case(self.all_params[modelName])
pins = self.make_pins(self.all_params[modelName])
show(case)
show(pins)
doc = FreeCAD.ActiveDocument
objs=GetListOfObjects(FreeCAD, doc)
body_color_key = self.all_params[modelName].body_color_key
pin_color_key = self.all_params[modelName].pin_color_key
body_color = shaderColors.named_colors[body_color_key].getDiffuseFloat()
pin_color = shaderColors.named_colors[pin_color_key].getDiffuseFloat()
Color_Objects(Gui,objs[0],body_color)
Color_Objects(Gui,objs[1],pin_color)
col_body=Gui.ActiveDocument.getObject(objs[0].Name).DiffuseColor[0]
col_pin=Gui.ActiveDocument.getObject(objs[1].Name).DiffuseColor[0]
material_substitutions={
col_body[:-1]:body_color_key,
col_pin[:-1]:pin_color_key,
}
expVRML.say(material_substitutions)
while len(objs) > 1:
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
del objs
objs = GetListOfObjects(FreeCAD, doc)
return material_substitutions
def make_case(self, params):
D = params.D # package length
E = params.E # body overall width
H = params.H # body overall height
A1 = params.A1 # package height
pin = params.pin # Pins
npthhole = params.npthhole # NPTH hole diameter
rotation = params.rotation # Rotation if required
center = params.center # Body center
#
#
#
x = center[0]
y = center[1]
case = cq.Workplane("XY").workplane(offset=A1).moveTo(x, y).circle(D / 2.0, False).extrude(H)
case = case.faces("<Z").edges(">Y").fillet(D / 40.0)
case1 = cq.Workplane("XY").workplane(offset=A1 + H - 2.5).moveTo(x, y).circle(D / 2.0 + 2.0, False).extrude(4.0)
case2 = cq.Workplane("XY").workplane(offset=A1 + H - 2.5).moveTo(x, y).circle(7.0, False).extrude(4.0)
case1 = case1.cut(case2)
case = case.cut(case1)
#
# hole in top
#
case1 = cq.Workplane("XY").workplane(offset=A1 + (H - 2.0)).moveTo(x, y).circle(npthhole / 2.0, False).extrude(4.0)
case = case.cut(case1)
#
# Cut bottom
#
case1 = cq.Workplane("XY").workplane(offset=A1).moveTo(0, 0).rect(4.0, 30.0).extrude(0.5)
case1 = case1.rotate((0,0,0), (0,0,1), 60)
case1 = case1.translate((x, y))
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=A1).moveTo(0, 0).rect(4.0, 30.0).extrude(0.5)
case1 = case1.rotate((0,0,0), (0,0,1), 120)
case1 = case1.translate((x, y))
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=A1).moveTo(0, 0).rect(4.0, 30.0).extrude(0.5)
case1 = case1.rotate((0,0,0), (0,0,1), 180)
case1 = case1.translate((x, y))
case = case.cut(case1)
#
case = case.faces(">Z").edges(">Y").fillet(D / 40.0)
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins(self, params):
D = params.D # package length
H = params.H # body overall height
A1 = params.A1 # Body seperation height
b = params.b # pin diameter or pad size
ph = params.ph # pin length
rotation = params.rotation # rotation if required
pin = params.pin # pin/pad cordinates
center = params.center # Body center
p = pin[0]
pins = cq.Workplane("XY").workplane(offset=A1 + 1.0).moveTo(p[0], -p[1]).circle(b / 2.0, False).extrude(0 - (ph + A1 + 1.0))
pins = pins.faces("<Z").fillet(b / 5.0)
for i in range(1, len(pin)):
p = pin[i]
pint = cq.Workplane("XY").workplane(offset=A1 + 1.0).moveTo(p[0], -p[1]).circle(b / 2.0, False).extrude(0 - (ph + A1 + 1.0))
pint = pint.faces("<Z").fillet(b / 5.0)
pins = pins.union(pint)
if (rotation != 0):
pins = pins.rotate((0,0,0), (0,0,1), rotation)
return (pins)
##enabling optional/default values to None
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
Params = namedtuple_with_defaults("Params", [
'modelName', # modelName
'D', # Body width/diameter
'E', # Body length
'H', # Body height
'A1', # Body PCB seperation
'b', # pin width
'center', # Body center
'npthhole', # NPTH hole diameter
'ph', # Pin length
'pin', # Pins
'body_color_key', # Body colour
'pin_color_key', # Pin color
'rotation', # Rotation if required
'dest_dir_prefix' # Destination directory
])
all_params = {
'MagneticBuzzer_StarMicronics_HMB-06_HMB-12': Params(
#
# Valve
# This model have been auto generated based on the foot print file
# A number of parameters have been fixed or guessed, such as A2
#
# The foot print that uses this 3D model is MagneticBuzzer_StarMicronics_HMB-06_HMB-12.kicad_mod
#
modelName = 'MagneticBuzzer_StarMicronics_HMB-06_HMB-12', # modelName
D = 16.00, # Body width/diameter
H = 14.00, # Body height
A1 = 0.03, # Body-board separation
b = 0.70, # Pin diameter
center = (3.81, 0.00), # Body center
npthhole = 3.2, # NPTH hole diameter
ph = 6.0, # Pin length
pin = [(0.0, 0.0), (7.61, 0.00)], # Pins
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = '../Buzzer_Beeper.3dshapes', # destination directory
),
}
| gpl-2.0 | -1,566,509,898,174,940,400 | 38.585938 | 136 | 0.509425 | false |
lafranceinsoumise/api-django | agir/events/migrations/0012_event_coordinates_type.py | 1 | 1120 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-28 14:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("events", "0011_notifications_enabled_fields")]
operations = [
migrations.AddField(
model_name="event",
name="coordinates_type",
field=models.PositiveSmallIntegerField(
choices=[
(0, "Coordonnées manuelles"),
(10, "Coordonnées automatiques précises"),
(20, "Coordonnées automatiques approximatives (niveau rue)"),
(30, "Coordonnées automatiques approximatives (ville)"),
(50, "Coordonnées automatiques (qualité inconnue)"),
(255, "Coordonnées introuvables"),
],
editable=False,
help_text="Comment les coordonnées ci-dessus ont-elle été acquéries",
null=True,
verbose_name="type de coordonnées",
),
)
]
| agpl-3.0 | 6,383,591,920,982,683,000 | 34.709677 | 85 | 0.551942 | false |
zimeon/iiif | tests/test_error.py | 1 | 2530 | """Test code for iiif.error."""
import unittest
import re
from iiif.error import IIIFError
class TestAll(unittest.TestCase):
"""Tests."""
def test01_str(self):
"""Test str method."""
e = IIIFError()
self.assertEqual(str(e), 'UNKNOWN_ERROR')
e = IIIFError(text='aa', parameter='bb', code=404)
self.assertEqual(str(e), 'aa, parameter=bb')
def test02_xml(self):
"""Test xml output used in Image API 1.0."""
# Just do the trivial XML test
e = IIIFError()
# Encoding value should be capital UTF-8 per
# http://www.w3.org/TR/2006/REC-xml11-20060816/#NT-EncName
# but in python3 it comes out at utf-8
xml = re.sub(r'utf-8', 'UTF-8', e.as_xml())
self.assertEqual(xml,
'<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n'
'<error xmlns="http://library.stanford.edu/iiif/image-api/ns/">\n'
'<parameter>unknown</parameter>\n</error>')
e.code = '501'
e.parameter = 'size'
e.text = 'Negative size not implemented'
xml = re.sub(r'utf-8', 'UTF-8', e.as_xml())
self.assertEqual(xml,
'<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n'
'<error xmlns="http://library.stanford.edu/iiif/image-api/ns/">\n'
'<parameter>size</parameter>\n'
'<text>Negative size not implemented</text>\n</error>')
def test03_txt(self):
"""Test txt output."""
e = IIIFError()
msg = 'IIIF Image Server Error\n\nUNKNOWN_ERROR\n\nparameter=unknown\ncode=500\n\n'
self.assertEqual(e.as_txt(), msg)
e = IIIFError(headers={'cc': 'dd', 'a': 'b'})
self.assertEqual(e.as_txt(), msg + 'header a=b\nheader cc=dd\n')
def test04_image_server_response(self):
"""Test image_server_response."""
e = IIIFError(headers={'x': 'y'})
(response, status, headers) = e.image_server_response(
api_version='1.0')
self.assertTrue(re.match(r'''<\?xml version''', response))
self.assertEqual(status, 500)
self.assertEqual(headers, {'x': 'y', 'Content-Type': 'text/xml'})
(response, status, headers) = e.image_server_response(
api_version='2.0')
self.assertTrue(re.match(r'''IIIF Image Server Error\n''', response))
self.assertEqual(status, 500)
self.assertEqual(headers, {'x': 'y', 'Content-Type': 'text/plain'})
| gpl-3.0 | 181,528,450,185,453,440 | 41.166667 | 91 | 0.551779 | false |
MTG/essentia | test/src/unittests/highlevel/test_sbic.py | 1 | 8806 | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from numpy import array
from essentia_test import *
class TestSBic(TestCase):
def atestNotEnoughFrames(self):
self.assertComputeFails( SBic(), array([[]]) )
self.assertComputeFails( SBic(), array([[1]]) )
self.assertComputeFails( SBic(), array([[1], [1], [1]]) )
def atestOneFeature(self):
features = array([[0, 1, 2, 3, 4]])
self.assertEqualVector(SBic(minLength=1)(features), [0, len(features[0])-1])
# the following test is commented, as it fails due to rounding errors.
# to solve this problem a quick solution would be to use a threshold higher
# than 1e-10 (i.e. log10 = diag_cov > 1e-6 ? logd : -6
def atestConstantFeature(self):
features = array([ [10]*1000 ])
self.assertEqualVector(SBic()(features), [0, len(features[0])-1])
def atestTwoSegments(self):
# Half 1s and half 0s
# [ [ 1, ..., 1, 0, ..., 0],
# [ 1, ..., 1, 0, ..., 0] ]
features = array( [ [1]*200 + [0]*200 ] +
[ [1]*200 + [0]*200 ])
segments = SBic()(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .2)
# The following test is commented because for some reason reducing the
# increment parameters create a lot of false positives (incorrect
# segmentation points). This is probably due to the fact that the BIC is
# trying to overfit the given data.
def atestSmallIncs(self):
# Half 1s and half 0s
# [ [ 1, ..., 1, 0, ..., 0],
# [ 1, ..., 1, 0, ..., 0] ]
# This test causes duplicates in the segmentation array, and these
# duplicates caused a crash due to empty subarrays being created
# (because from one segment to the next is zero length, because they
# are the same position (sorry if that didn't make any sense)).
features = array( [ [1]*200 + [0]*200 ] +
[ [1]*200 + [0]*200 ])
segments = SBic(inc1=4, inc2=2)(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .1)
def atestSmallMinLength(self):
features = array( [ [1]*200 + [0]*200 ] +
[ [1]*200 + [0]*200 ])
segments = SBic(minLength=1)(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .2)
def atestLargeMinLength(self):
loader = MonoLoader(filename = join(testdata.audio_dir, 'recorded',
'Vivaldi_Sonata_5_II_Allegro.wav'),
downmix='left', sampleRate=441000)
if sys.platform == 'win32' and getattr(loader, 'hasDoubleCompute', False):
print('WARNING: skipping this test as Windows seems to do weird things with memory...')
return
audio = loader()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
if isSilent(frame):
continue
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
features_transpose = array(features_transpose)
nFrames = len(features)
segments = SBic(minLength=nFrames*2, cpw=1.5, size1=1000,
inc1=300, size2=600, inc2=50)(features_transpose)
# since the minLength is so high, the entire audio signal should be
# considered as one segment
expected = [0, nFrames-1]
self.assertEqualVector(segments, expected)
def testRegression(self):
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded',\
'Vivaldi_Sonata_5_II_Allegro.wav'),
downmix='left', sampleRate=44100)()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
features_transpose = array(features_transpose)
segments = SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50)(features_transpose)
# The expected values were recomputed from commit
# 68548001e93c094537b7364c99e63c5402fdf744
expected = [0., 49., 997., 1746., 2895., 3344., 3943., 4196.]
self.assertEqualVector(segments, expected)
def atestMinLengthEqualToAudioFrames(self):
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded',\
'britney.wav'),
downmix='left', sampleRate=441000)()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
if isSilent(frame):
continue
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
bands, nFrames = numpy.shape(features_transpose)
features_transpose = array(features_transpose)
sbic = SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50, minLength=nFrames)
segments = sbic(features_transpose)
expected = [0., nFrames-1]
self.assertEqualVector(segments, expected)
def atestMinLengthLargerThanAudioFrames(self):
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded',\
'britney.wav'),
downmix='left', sampleRate=441000)()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
if isSilent(frame):
continue
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
bands, nFrames = numpy.shape(features_transpose)
features_transpose = array(features_transpose)
sbic = SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50, minLength=nFrames+2)
segments = sbic(features_transpose)
expected = [0., nFrames-1]
self.assertEqualVector(segments, expected)
def atestSize2LargerThanSize1(self):
# Half 1s and half 0s
# [ [ 1, ..., 1, 0, ..., 0],
# [ 1, ..., 1, 0, ..., 0] ]
from numpy.random import normal
features = zeros([2, 400])
for i in range(200):
features[0][i] = normal()
features[1][i] = normal()
segments = SBic(size1=25, size2=50)(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .15)
suite = allTests(TestSBic)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -3,824,229,601,442,909,700 | 35.845188 | 99 | 0.590166 | false |
Bismarrck/tensorflow | tensorflow/python/keras/optimizer_v2/adadelta.py | 1 | 5855 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adadelta for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adadelta', v1=[])
class Adadelta(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based on
adaptive learning rate per dimension to address two drawbacks:
1) the continual decay of learning rates throughout training
2) the need for a manually selected global learning rate
Two accumulation steps are required:
1) the accumulation of gradients squared,
2) the accumulation of updates squared.
Initialization:
$$accum_g_0 := 0 \text{(Initialize gradient 2nd order moment vector)}$$
$$accum_x_0 := 0 \text{(Initialize variable update 2nd order moment vector)}$$
$$t := t + 1$$
$$accum_g_t := rho * accum_g_{t-1} + (1 - rho) * g * g$$
$$delta = -\sqrt{accum_x_{t-1}} / (\sqrt{accum_g_{t-1}} + \epsilon)$$
$$accum_x_t := rho * accum_x_{t-1} + (1 - rho) * delta * delta$$
References
See [M. D. Zeiler](http://arxiv.org/abs/1212.5701)
([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))
"""
def __init__(self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
name='Adadelta',
**kwargs):
"""Construct a new Adadelta optimizer.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
To match the exact form in the original paper use 1.0.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
**kwargs: keyword arguments. Allowed to be {`decay`}
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `rho`, and `epsilon` can
each be a callable that takes no arguments and returns the actual value to
use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
super(Adadelta, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('rho', rho)
self._set_hyper('epsilon', epsilon)
def _create_slots(self, var_list):
# Separate for-loops to respect the ordering of slot variables from v1.
for v in var_list:
self.add_slot(v, 'accum_grad')
for v in var_list:
self.add_slot(v, 'accum_var')
def set_weights(self, weights):
params = self.weights
# Override set_weights for backward compatibility of Keras V1 optimizer
# since it does not include iteration at head of the weight list. Set
# iteration to 0.
if len(params) == len(weights) + 1:
weights = [np.array(0)] + weights
super(Adadelta, self).set_weights(weights)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
accum_grad = self.get_slot(var, 'accum_grad')
accum_var = self.get_slot(var, 'accum_var')
return training_ops.resource_apply_adadelta(
var.handle,
accum_grad.handle,
accum_var.handle,
lr_t,
self._get_hyper('rho', var_dtype),
self._get_hyper('epsilon', var_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
accum_grad = self.get_slot(var, 'accum_grad')
accum_var = self.get_slot(var, 'accum_var')
return training_ops.resource_sparse_apply_adadelta(
var.handle,
accum_grad.handle,
accum_var.handle,
lr_t,
self._get_hyper('rho', var_dtype),
self._get_hyper('epsilon', var_dtype),
grad,
indices,
use_locking=self._use_locking)
def get_config(self):
config = super(Adadelta, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'rho': self._serialize_hyperparameter('rho'),
'epsilon': self._serialize_hyperparameter('epsilon'),
})
return config
| apache-2.0 | 1,039,043,600,235,876,000 | 38.033333 | 80 | 0.663877 | false |
noironetworks/group-based-policy | gbpservice/neutron/services/grouppolicy/common/constants.py | 1 | 1627 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import constants
constants.GROUP_POLICY = "GROUP_POLICY"
constants.SERVICECHAIN = "SERVICECHAIN"
GBP_PREFIXES = {
constants.GROUP_POLICY: "/grouppolicy",
constants.SERVICECHAIN: "/servicechain",
}
GP_ACTION_ALLOW = 'allow'
GP_ACTION_REDIRECT = 'redirect'
GP_DIRECTION_IN = 'in'
GP_DIRECTION_OUT = 'out'
GP_DIRECTION_BI = 'bi'
GP_NETWORK_SVC_PARAM_TYPE = 'type'
GP_NETWORK_SVC_PARAM_NAME = 'name'
GP_NETWORK_SVC_PARAM_VALUE = 'value'
GP_NETWORK_SVC_PARAM_TYPE_IP_SINGLE = 'ip_single'
GP_NETWORK_SVC_PARAM_TYPE_IP_POOL = 'ip_pool'
GP_NETWORK_SVC_PARAM_TYPE_QOS_MAX = 'qos_maxrate'
GP_NETWORK_SVC_PARAM_TYPE_QOS_BURST = 'qos_burstrate'
GP_NETWORK_SVC_PARAM_TYPE_STRING = 'string'
GP_NETWORK_SVC_PARAM_VALUE_SELF_SUBNET = 'self_subnet'
GP_NETWORK_SVC_PARAM_VALUE_NAT_POOL = 'nat_pool'
STATUS_ACTIVE = 'ACTIVE'
STATUS_BUILD = 'BUILD'
STATUS_ERROR = 'ERROR'
PRE_COMMIT = 'pre_commit'
POST_COMMIT = 'post_commit'
STATUS_STATES = [STATUS_ACTIVE, STATUS_BUILD, STATUS_ERROR]
PRECOMMIT_POLICY_DRIVERS = ['aim_mapping']
| apache-2.0 | 628,923,062,867,018,500 | 29.698113 | 78 | 0.72649 | false |
vmahuli/contrail-controller | src/config/schema-transformer/test/test_service.py | 1 | 69059 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
sys.path.append("../common/tests")
from testtools.matchers import Equals, Contains, Not
from test_utils import *
import test_common
import test_case
from vnc_api.vnc_api import *
try:
import to_bgp
except ImportError:
from schema_transformer import to_bgp
from gevent import sleep
def retry_exc_handler(tries_remaining, exception, delay):
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (exception, tries_remaining, delay)
def retries(max_tries, delay=1, backoff=1, exceptions=(Exception,), hook=None):
def dec(func):
def f2(*args, **kwargs):
mydelay = delay
tries = range(max_tries)
tries.reverse()
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
if hook is not None:
hook(tries_remaining, e, mydelay)
sleep(mydelay)
mydelay = mydelay * backoff
else:
raise
else:
break
return f2
return dec
class TestPolicy(test_case.STTestCase):
@retries(5, hook=retry_exc_handler)
def check_ri_asn(self, fq_name, rt_target):
ri = self._vnc_lib.routing_instance_read(fq_name)
rt_refs = ri.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
for rt_ref in rt_refs:
if rt_ref['to'][0] == rt_target:
return
raise Exception('rt_target %s not found in ri %s' % (rt_target, fq_name))
@retries(5, hook=retry_exc_handler)
def check_bgp_asn(self, fq_name, asn):
router = self._vnc_lib.bgp_router_read(fq_name)
params = router.get_bgp_router_parameters()
if not params:
print "retrying ... ", test_common.lineno()
raise Exception('bgp params is None for %s' % fq_name)
self.assertEqual(params.get_autonomous_system(), asn)
@retries(5, hook=retry_exc_handler)
def check_lr_asn(self, fq_name, rt_target):
router = self._vnc_lib.logical_router_read(fq_name)
rt_refs = router.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
self.assertEqual(rt_refs[0]['to'][0], rt_target)
@retries(5, hook=retry_exc_handler)
def check_service_chain_prefix_match(self, fq_name, prefix):
ri = self._vnc_lib.routing_instance_read(fq_name)
sci = ri.get_service_chain_information()
if sci is None:
print "retrying ... ", test_common.lineno()
raise Exception('Service chain info not found for %s' % fq_name)
self.assertEqual(sci.prefix[0], prefix)
@retries(5, hook=retry_exc_handler)
def check_service_chain_info(self, fq_name, ri_fq, si, src_ri):
ri = self._vnc_lib.routing_instance_read(fq_name)
sci = ri.get_service_chain_information()
if sci is None:
print "retrying ... ", test_common.lineno()
raise Exception('Service chain info not found for %s' % fq_name)
self.assertEqual(sci.routing_instance, ri_fq)
self.assertEqual(sci.source_routing_instance, src_ri)
self.assertEqual(sci.service_instance, si)
@retries(5, hook=retry_exc_handler)
def check_service_chain_pbf_rules(self, service_fq_name, vmi_fq_name, macs):
vmi = self._vnc_lib.virtual_machine_interface_read(vmi_fq_name)
ri_refs = vmi.get_routing_instance_refs()
for ri_ref in ri_refs:
sc_name = ri_ref['to']
if sc_name == service_fq_name:
pbf_rule = ri_ref['attr']
self.assertTrue(pbf_rule.service_chain_address != None)
self.assertTrue(pbf_rule.vlan_tag != None)
self.assertTrue(pbf_rule.direction == 'both')
self.assertTrue(pbf_rule.src_mac == macs[0])
self.assertTrue(pbf_rule.dst_mac == macs[1])
return
raise Exception('Service chain pbf rules not found for %s' % service_fq_name)
@retries(5, hook=retry_exc_handler)
def check_service_chain_ip(self, sc_name):
_SC_IP_CF = 'service_chain_ip_address_table'
cf = CassandraCFs.get_cf(_SC_IP_CF)
ip = cf.get(sc_name)['ip_address']
@retries(5, hook=retry_exc_handler)
def check_ri_rt_state_vn_policy(self, fq_name, to_fq_name, expect_to_find):
ri = self._vnc_lib.routing_instance_read(fq_name)
rt_refs = ri.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
found = False
for rt_ref in rt_refs:
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
if ri_ref['to'] == to_fq_name:
found = True
break
if found == True:
break
self.assertTrue(found == expect_to_find)
@retries(5, hook=retry_exc_handler)
def check_ri_state_vn_policy(self, fq_name, to_fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
ri_refs = ri.get_routing_instance_refs()
if not ri_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
found = False
for ri_ref in ri_refs:
if ri_ref['to'] == to_fq_name:
found = True
break
self.assertTrue(found)
@retries(5, hook=retry_exc_handler)
def check_ri_refs_are_deleted(self, fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
ri_refs = ri.get_routing_instance_refs()
if ri_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs still exist for %s' % fq_name)
@retries(5, hook=retry_exc_handler)
def delete_vn(self, fq_name):
try:
self._vnc_lib.virtual_network_delete(fq_name=fq_name)
print 'vn deleted'
except RefsExistError:
print "retrying ... ", test_common.lineno()
raise Exception('virtual network %s still exists' % str(fq_name))
@retries(5, hook=retry_exc_handler)
def check_vn_is_deleted(self, uuid):
try:
self._vnc_lib.virtual_network_read(id=uuid)
print "retrying ... ", test_common.lineno()
raise Exception('virtual network %s still exists' % uuid)
except NoIdError:
print 'vn deleted'
@retries(5, hook=retry_exc_handler)
def check_ri_is_deleted(self, fq_name):
try:
self._vnc_lib.routing_instance_read(fq_name)
print "retrying ... ", test_common.lineno()
raise Exception('routing instance %s still exists' % fq_name)
except NoIdError:
print 'ri deleted'
@retries(5, hook=retry_exc_handler)
def check_ri_is_present(self, fq_name):
self._vnc_lib.routing_instance_read(fq_name)
@retries(5, hook=retry_exc_handler)
def check_link_in_ifmap_graph(self, fq_name_str, links):
self._vnc_lib.routing_instance_read(fq_name)
@retries(5, hook=retry_exc_handler)
def wait_to_get_sc(self):
sc = [x for x in to_bgp.ServiceChain]
if len(sc) == 0:
print "retrying ... ", test_common.lineno()
raise Exception('Service chain not found')
return sc
@retries(5, hook=retry_exc_handler)
def wait_to_get_link(self, ident_name, link_fq_name):
self.assertThat(str(FakeIfmapClient._graph[ident_name]['links']), Contains(link_fq_name))
@retries(5, hook=retry_exc_handler)
def wait_to_remove_link(self, ident_name, link_fq_name):
self.assertThat(str(FakeIfmapClient._graph[ident_name]['links']), Not(Contains(link_fq_name)))
@retries(5, hook=retry_exc_handler)
def wait_to_get_sg_id(self, sg_fq_name):
sg_obj = self._vnc_lib.security_group_read(sg_fq_name)
if sg_obj.get_security_group_id() is None:
raise Exception('Security Group Id is none %s' % str(sg_fq_name))
@retries(5, hook=retry_exc_handler)
def check_acl_match_dst_cidr(self, fq_name, ip_prefix, ip_len):
acl = self._vnc_lib.access_control_list_read(fq_name)
for rule in acl.access_control_list_entries.acl_rule:
if (rule.match_condition.dst_address.subnet is not None and
rule.match_condition.dst_address.subnet.ip_prefix == ip_prefix and
rule.match_condition.dst_address.subnet.ip_prefix_len == ip_len):
return
raise Exception('prefix %s/%d not found in ACL rules for %s' %
(ip_prefix, ip_len, fq_name))
@retries(5, hook=retry_exc_handler)
def check_acl_match_nets(self, fq_name, vn1_fq_name, vn2_fq_name):
acl = self._vnc_lib.access_control_list_read(fq_name)
for rule in acl.access_control_list_entries.acl_rule:
if (rule.match_condition.src_address.virtual_network == vn1_fq_name and
rule.match_condition.dst_address.virtual_network == vn2_fq_name):
return
raise Exception('nets %s/%s not found in ACL rules for %s' %
(vn1_fq_name, vn2_fq_name, fq_name))
@retries(5, hook=retry_exc_handler)
def check_acl_match_sg(self, fq_name, acl_name, sg_id, is_all_rules = False):
sg_obj = self._vnc_lib.security_group_read(fq_name)
acls = sg_obj.get_access_control_lists()
acl = None
for acl_to in acls or []:
if (acl_to['to'][-1] == acl_name):
acl = self._vnc_lib.access_control_list_read(id=acl_to['uuid'])
break
self.assertTrue(acl != None)
match = False
for rule in acl.access_control_list_entries.acl_rule:
if acl_name == 'egress-access-control-list':
if rule.match_condition.dst_address.security_group != sg_id:
if is_all_rules:
raise Exception('sg %s/%s not found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
else:
match = True
break
if acl_name == 'ingress-access-control-list':
if rule.match_condition.src_address.security_group != sg_id:
if is_all_rules:
raise Exception('sg %s/%s not found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
else:
match = True
break
if match == False:
raise Exception('sg %s/%s not found in %s' %
(str(fq_name), str(sg_id), acl_name))
return
@retries(5, hook=retry_exc_handler)
def check_no_policies_for_sg(self, fq_name):
try:
sg_obj = self._vnc_lib.security_group_read(fq_name)
sg_entries = sg_obj.get_security_group_entries()
if sg_entries.get_policy_rule():
raise Exception('sg %s found policies' % (str(fq_name)))
except NoIdError:
pass
@retries(5, hook=retry_exc_handler)
def check_acl_not_match_sg(self, fq_name, acl_name, sg_id):
try:
sg_obj = self._vnc_lib.security_group_read(fq_name)
acls = sg_obj.get_access_control_lists()
acl = None
for acl_to in acls or []:
if (acl_to['to'][-1] != acl_name):
continue
acl = self._vnc_lib.access_control_list_read(id=acl_to['uuid'])
if acl == None:
return
for rule in acl.access_control_list_entries.acl_rule:
if acl_name == 'egress-access-control-list':
if rule.match_condition.dst_address.security_group == sg_id:
raise Exception('sg %s/%s found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
if acl_name == 'ingress-access-control-list':
if rule.match_condition.src_address.security_group == sg_id:
raise Exception('sg %s/%s found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
except NoIdError:
pass
@retries(5, hook=retry_exc_handler)
def check_acl_not_match_nets(self, fq_name, vn1_fq_name, vn2_fq_name):
acl = None
try:
acl = self._vnc_lib.access_control_list_read(fq_name)
except NoIdError:
return
found = False
for rule in acl.access_control_list_entries.acl_rule:
if (rule.match_condition.src_address.virtual_network == vn1_fq_name and
rule.match_condition.dst_address.virtual_network == vn2_fq_name):
found = True
if found == True:
raise Exception('nets %s/%s found in ACL rules for %s' %
(vn1_fq_name, vn2_fq_name, fq_name))
return
@retries(5, hook=retry_exc_handler)
def check_acl_not_match_mirror_to_ip(self, fq_name):
acl = None
try:
acl = self._vnc_lib.access_control_list_read(fq_name)
except NoIdError:
return
for rule in acl.access_control_list_entries.acl_rule:
if (rule.action_list.mirror_to.analyzer_ip_address is not None):
raise Exception('mirror to ip %s found in ACL rules for %s' % (fq_name))
return
@retries(5, hook=retry_exc_handler)
def check_acl_match_mirror_to_ip(self, fq_name):
acl = self._vnc_lib.access_control_list_read(fq_name)
for rule in acl.access_control_list_entries.acl_rule:
if (rule.action_list.mirror_to.analyzer_ip_address is not None):
return
raise Exception('mirror to ip not found in ACL rules for %s' % (fq_name))
@retries(5, hook=retry_exc_handler)
def check_route_target_in_routing_instance(self, ri_name, rt_list):
ri_obj = self._vnc_lib.routing_instance_read(fq_name=ri_name)
ri_rt_refs = set([ref['to'][0] for ref in ri_obj.get_route_target_refs() or []])
self.assertTrue(set(rt_list) <= ri_rt_refs)
def get_ri_name(self, vn, ri_name=None):
return vn.get_fq_name() + [ri_name or vn.name]
def test_basic_policy(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np = self.create_network_policy(vn1_obj, vn2_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn2_obj))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj),
self.get_ri_name(vn1_obj))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn2_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_basic_policy
def test_multiple_policy(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np1 = self.create_network_policy(vn1_obj, vn2_obj)
np2 = self.create_network_policy(vn2_obj, vn1_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np1, vnp)
vn2_obj.set_network_policy(np2, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn2_obj))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj),
self.get_ri_name(vn1_obj))
np1.network_policy_entries.policy_rule[0].action_list.simple_action = 'deny'
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
expr =("('contrail:connection contrail:routing-instance:%s' in FakeIfmapClient._graph['contrail:routing-instance:%s']['links'])"
% (':'.join(self.get_ri_name(vn2_obj)),
':'.join(self.get_ri_name(vn1_obj))))
self.assertTill(expr)
np1.network_policy_entries.policy_rule[0].action_list.simple_action = 'pass'
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
np2.network_policy_entries.policy_rule[0].action_list.simple_action = 'deny'
np2.set_network_policy_entries(np2.network_policy_entries)
self._vnc_lib.network_policy_update(np2)
expr = ("('contrail:connection contrail:routing-instance:%s' in FakeIfmapClient._graph['contrail:routing-instance:%s']['links'])"
% (':'.join(self.get_ri_name(vn1_obj)),
':'.join(self.get_ri_name(vn2_obj))))
self.assertTill(expr)
vn1_obj.del_network_policy(np1)
vn2_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn2_obj))
self.delete_network_policy(np1)
self.delete_network_policy(np2)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
# end test_multiple_policy
def test_policy_in_policy(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn3_name = self.id() + 'vn3'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np1 = self.create_network_policy(vn1_obj, vn2_obj)
np2 = self.create_network_policy(vn2_obj, vn1_obj)
np1.network_policy_entries.policy_rule[0].dst_addresses[0].virtual_network = None
np1.network_policy_entries.policy_rule[0].dst_addresses[0].network_policy = np2.get_fq_name_str()
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
np2.network_policy_entries.policy_rule[0].src_addresses[0].virtual_network = 'local'
np2.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np2)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np1, vnp)
vn2_obj.set_network_policy(np2, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn2_obj))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj),
self.get_ri_name(vn1_obj))
vn3_obj = VirtualNetwork(vn3_name)
vn3_obj.set_network_policy(np2, vnp)
vn3_uuid = self._vnc_lib.virtual_network_create(vn3_obj)
self.check_ri_state_vn_policy(self.get_ri_name(vn3_obj),
self.get_ri_name(vn1_obj))
vn3_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn3_obj)
@retries(5, hook=retry_exc_handler)
def _match_acl_rule():
acl = self._vnc_lib.access_control_list_read(
fq_name=self.get_ri_name(vn1_obj))
for rule in acl.get_access_control_list_entries().get_acl_rule():
if rule.match_condition.dst_address.virtual_network == vn3_obj.get_fq_name_str():
raise Exception("ACL rule still present")
_match_acl_rule()
vn1_obj.del_network_policy(np1)
vn2_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.delete_network_policy(np1)
self.delete_network_policy(np2)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn3_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
# end test_multiple_policy
def test_service_policy(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_' + service_name
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
self.check_service_chain_prefix_match(fq_name=self.get_ri_name(vn2_obj, sc_ri_name),
prefix='10.0.0.0/24')
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_service_policy
def test_service_policy_no_vm(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
np.network_policy_entries.policy_rule[0].action_list.apply_service = ["default-domain:default-project:"+service_name]
np.set_network_policy_entries(np.network_policy_entries)
self._vnc_lib.network_policy_update(np)
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_' + service_name
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_name))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
np.network_policy_entries.policy_rule[0].action_list.apply_service = []
np.set_network_policy_entries(np.network_policy_entries)
self._vnc_lib.network_policy_update(np)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_service_policy_no_vm
def test_multi_service_in_policy(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_names = [self.id() + 's1', self.id() + 's2', self.id() + 's3']
np = self.create_network_policy(vn1_obj, vn2_obj, service_names, "in-network", auto_policy=False)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_update(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_update(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
sc = self.wait_to_get_sc()
sc_ri_names = ['service-'+sc[0]+'-default-domain_default-project_' + s for s in service_names]
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_names[2]),
self.get_ri_name(vn2_obj))
self.check_service_chain_prefix_match(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[2]),
prefix='20.0.0.0/24')
si_name = 'default-domain:default-project:test.test_service.TestPolicy.test_multi_service_in_policys3'
self.check_service_chain_info(self.get_ri_name(vn1_obj, sc_ri_names[2]),
':'.join(self.get_ri_name(vn2_obj)), si_name, ':'.join(self.get_ri_name(vn1_obj)))
self.check_service_chain_info(self.get_ri_name(vn2_obj, sc_ri_names[2]),
':'.join(self.get_ri_name(vn1_obj)), si_name, ':'.join(self.get_ri_name(vn2_obj)))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj, sc_ri_names[0]))
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self.delete_vn(fq_name=vn1_obj.get_fq_name())
self.delete_vn(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_multi_service_in_policy
def test_multi_service_policy(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_names = [self.id() + 's1', self.id() + 's2', self.id() + 's3']
np = self.create_network_policy(vn1_obj, vn2_obj, service_names)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
sc = self.wait_to_get_sc()
sc_ri_names = ['service-'+sc[0]+'-default-domain_default-project_' + s for s in service_names]
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_names[-1]),
self.get_ri_name(vn2_obj))
self.check_service_chain_prefix_match(fq_name=self.get_ri_name(vn2_obj, sc_ri_names[0]),
prefix='10.0.0.0/24')
self.check_service_chain_ip(sc_ri_names[0])
self.check_service_chain_ip(sc_ri_names[1])
self.check_service_chain_ip(sc_ri_names[2])
sc_fq_names = [
self.get_ri_name(vn1_obj, sc_ri_names[0]),
self.get_ri_name(vn2_obj, sc_ri_names[0]),
self.get_ri_name(vn1_obj, sc_ri_names[1]),
self.get_ri_name(vn2_obj, sc_ri_names[1]),
self.get_ri_name(vn1_obj, sc_ri_names[2]),
self.get_ri_name(vn2_obj, sc_ri_names[2])
]
vmi_fq_names = [
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys1__1__left__1'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys1__1__right__2'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys2__1__left__1'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys2__1__right__2'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys3__1__left__1'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys3__1__right__2']
]
mac1 = '02:00:00:00:00:01'
mac2 = '02:00:00:00:00:02'
self.check_service_chain_pbf_rules(sc_fq_names[0], vmi_fq_names[0], [mac1, mac2])
self.check_service_chain_pbf_rules(sc_fq_names[1], vmi_fq_names[1], [mac2, mac1])
self.check_service_chain_pbf_rules(sc_fq_names[2], vmi_fq_names[2], [mac1, mac2])
self.check_service_chain_pbf_rules(sc_fq_names[3], vmi_fq_names[3], [mac2, mac1])
self.check_service_chain_pbf_rules(sc_fq_names[4], vmi_fq_names[4], [mac1, mac2])
self.check_service_chain_pbf_rules(sc_fq_names[5], vmi_fq_names[5], [mac2, mac1])
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[1]))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[2]))
vn1_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_service_policy
# end class TestPolicy
#class TestRouteTable(test_case.STTestCase):
def test_add_delete_route(self):
lvn_name = self.id() + 'lvn'
rvn_name = self.id() + 'rvn'
lvn = self.create_virtual_network(lvn_name, "10.0.0.0/24")
rvn = self.create_virtual_network(rvn_name, "20.0.0.0/24")
service_name = self.id() + 's1'
np = self.create_network_policy(lvn, rvn, [service_name], "in-network")
vn_name = self.id() + 'vn100'
vn = self.create_virtual_network(vn_name, "1.0.0.0/24")
rtgt_list = RouteTargetList(route_target=['target:1:1'])
vn.set_route_target_list(rtgt_list)
self._vnc_lib.virtual_network_update(vn)
rt = RouteTable("rt1")
self._vnc_lib.route_table_create(rt)
vn.add_route_table(rt)
self._vnc_lib.virtual_network_update(vn)
routes = RouteTableType()
route = RouteType(prefix="0.0.0.0/0",
next_hop="default-domain:default-project:"+service_name)
routes.add_route(route)
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
@retries(5, hook=retry_exc_handler)
def _match_route_table(rtgt_list):
sc = [x for x in to_bgp.ServiceChain]
if len(sc) == 0:
raise Exception("sc has 0 len")
sc_ri_name = ('service-'+sc[0] +
'-default-domain_default-project_' + service_name)
lri = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(lvn, sc_ri_name))
sr = lri.get_static_route_entries()
if sr is None:
raise Exception("sr is None")
route = sr.route[0]
self.assertEqual(route.prefix, "0.0.0.0/0")
self.assertEqual(route.next_hop, "10.0.0.252")
for rtgt in rtgt_list:
self.assertIn(rtgt, route.route_target)
ri100 = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(vn))
rt100 = ri100.get_route_target_refs()[0]['to']
for rt_ref in lri.get_route_target_refs() or []:
if rt100 == rt_ref['to']:
return sc_ri_name, rt100
raise Exception("rt100 route-target ref not found")
sc_ri_name, rt100 = _match_route_table(rtgt_list.get_route_target())
rtgt_list.add_route_target('target:1:2')
vn.set_route_target_list(rtgt_list)
self._vnc_lib.virtual_network_update(vn)
_match_route_table(rtgt_list.get_route_target())
rtgt_list.delete_route_target('target:1:1')
vn.set_route_target_list(rtgt_list)
self._vnc_lib.virtual_network_update(vn)
_match_route_table(rtgt_list.get_route_target())
routes.set_route([])
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
@retries(5, hook=retry_exc_handler)
def _match_route_table_cleanup(sc_ri_name, rt100):
lri = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(lvn, sc_ri_name))
sr = lri.get_static_route_entries()
if sr and sr.route:
raise Exception("sr has route")
ri = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(lvn))
rt_refs = ri.get_route_target_refs()
for rt_ref in ri.get_route_target_refs() or []:
if rt100 == rt_ref['to']:
raise Exception("rt100 route-target ref found")
_match_route_table_cleanup(sc_ri_name, rt100)
# add the route again, then delete the network without deleting the
# link to route table
routes.add_route(route)
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
_match_route_table(rtgt_list.get_route_target())
self._vnc_lib.virtual_network_delete(fq_name=vn.get_fq_name())
_match_route_table_cleanup(sc_ri_name, rt100)
self._vnc_lib.route_table_delete(fq_name=rt.get_fq_name())
self.delete_network_policy(np, auto_policy=True)
gevent.sleep(2)
self._vnc_lib.virtual_network_delete(fq_name=lvn.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=rvn.get_fq_name())
# test_add_delete_route
def test_vn_delete(self):
vn_name = self.id() + 'vn'
vn = self.create_virtual_network(vn_name, "10.1.1.0/24")
gevent.sleep(2)
for obj in [vn]:
ident_name = self.get_obj_imid(obj)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_vn_ri_state(fq_name=self.get_ri_name(vn))
# stop st
self._st_greenlet.kill()
gevent.sleep(5)
# delete vn in api server
self._vnc_lib.virtual_network_delete(fq_name=vn.get_fq_name())
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self._api_server_ip, self._api_server_port)
gevent.sleep(2)
# check if vn is deleted
self.check_vn_is_deleted(uuid=vn.uuid)
# check if ri is deleted
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn))
# test_vn_delete
@retries(5, hook=retry_exc_handler)
def check_vn_ri_state(self, fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
def test_policy_with_cidr(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn1 = self.create_virtual_network(vn1_name, "10.1.1.0/24")
vn2 = self.create_virtual_network(vn2_name, "10.2.1.0/24")
rules = []
rule1 = { "protocol": "icmp",
"direction": "<>",
"src-port": "any",
"src": {"type": "vn", "value": vn1},
"dst": {"type": "cidr", "value": "10.2.1.1/32"},
"dst-port": "any",
"action": "deny"
}
rule2 = { "protocol": "icmp",
"direction": "<>",
"src-port": "any",
"src": {"type": "vn", "value": vn1},
"dst": {"type": "cidr", "value": "10.2.1.2/32"},
"dst-port": "any",
"action": "deny"
}
rules.append(rule1)
rules.append(rule2)
np = self.create_network_policy_with_multiple_rules(rules)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1)
for obj in [vn1]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_vn_ri_state(fq_name=self.get_ri_name(vn1))
self.check_acl_match_dst_cidr(fq_name=self.get_ri_name(vn1),
ip_prefix="10.2.1.1", ip_len=32)
self.check_acl_match_dst_cidr(fq_name=self.get_ri_name(vn1),
ip_prefix="10.2.1.2", ip_len=32)
#cleanup
self.delete_network_policy(np, auto_policy=True)
self._vnc_lib.virtual_network_delete(fq_name=vn1.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2.get_fq_name())
# check if vn is deleted
self.check_vn_is_deleted(uuid=vn1.uuid)
# test st restart while service chain is configured
def test_st_restart_service_chain_delete(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn1_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn1_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.clear_pending_updates()
vn2_obj.clear_pending_updates()
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
sc = self.wait_to_get_sc()
sc_ri_name = ('service-' + sc[0] + '-default-domain_default-project_'
+ service_name)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
# stop st
test_common.kill_schema_transformer(self._st_greenlet)
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
gevent.sleep(3)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self._api_server_ip, self._api_server_port)
#check if all ri's are deleted
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj, sc_ri_name))
#end
# test service chain configuration while st is restarted
def test_st_restart_service_chain(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
# stop st and wait for sometime
test_common.kill_schema_transformer(self._st_greenlet)
gevent.sleep(5)
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self._api_server_ip, self._api_server_port)
#check service chain state
sc = self.wait_to_get_sc()
sc_ri_name = ('service-' + sc[0] + '-default-domain_default-project_'
+ service_name)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
#cleanup
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
#check if all ri's are deleted
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj, sc_ri_name))
#end
# test logical router functionality
def test_logical_router(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create virtual machine interface
vmi_name = self.id() + 'vmi1'
vmi = VirtualMachineInterface(vmi_name, parent_type='project', fq_name=['default-domain', 'default-project', vmi_name])
vmi.add_virtual_network(vn1_obj)
self._vnc_lib.virtual_machine_interface_create(vmi)
# create logical router
lr_name = self.id() + 'lr1'
lr = LogicalRouter(lr_name)
rtgt_list = RouteTargetList(route_target=['target:1:1'])
lr.set_configured_route_target_list(rtgt_list)
lr.add_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_create(lr)
ri_name = self.get_ri_name(vn1_obj)
self.check_route_target_in_routing_instance(ri_name, rtgt_list.get_route_target())
rtgt_list.add_route_target('target:1:2')
lr.set_configured_route_target_list(rtgt_list)
self._vnc_lib.logical_router_update(lr)
self.check_route_target_in_routing_instance(ri_name, rtgt_list.get_route_target())
rtgt_list.delete_route_target('target:1:1')
lr.set_configured_route_target_list(rtgt_list)
self._vnc_lib.logical_router_update(lr)
self.check_route_target_in_routing_instance(ri_name, rtgt_list.get_route_target())
lr.del_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_update(lr)
self._vnc_lib.virtual_machine_interface_delete(id=vmi.uuid)
self._vnc_lib.virtual_network_delete(id=vn1_obj.uuid)
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self._vnc_lib.logical_router_delete(id=lr.uuid)
@retries(5, hook=retry_exc_handler)
def check_bgp_peering(self, router1, router2, length):
r1 = self._vnc_lib.bgp_router_read(fq_name=router1.get_fq_name())
ref_names = [ref['to'] for ref in r1.get_bgp_router_refs() or []]
self.assertEqual(len(ref_names), length)
self.assertThat(ref_names, Contains(router2.get_fq_name()))
def create_bgp_router(self, name, vendor, asn=None):
ip_fabric_ri = self._vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project', 'ip-fabric', '__default__'])
router = BgpRouter(name, parent_obj=ip_fabric_ri)
params = BgpRouterParams()
params.vendor = 'contrail'
params.autonomous_system = asn
router.set_bgp_router_parameters(params)
self._vnc_lib.bgp_router_create(router)
return router
def test_ibgp_auto_mesh(self):
# create router1
r1_name = self.id() + 'router1'
router1 = self.create_bgp_router(r1_name, 'contrail')
# create router2
r2_name = self.id() + 'router2'
router2 = self.create_bgp_router(r2_name, 'contrail')
self.check_bgp_peering(router1, router2, 1)
r3_name = self.id() + 'router3'
router3 = self.create_bgp_router(r3_name, 'juniper', 1)
self.check_bgp_peering(router1, router2, 1)
params = router3.get_bgp_router_parameters()
params.autonomous_system = 64512
router3.set_bgp_router_parameters(params)
self._vnc_lib.bgp_router_update(router3)
self.check_bgp_peering(router1, router3, 2)
r4_name = self.id() + 'router4'
router4 = self.create_bgp_router(r4_name, 'juniper', 1)
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc.set_autonomous_system(1)
self.check_bgp_peering(router1, router4, 3)
self._vnc_lib.bgp_router_delete(id=router1.uuid)
self._vnc_lib.bgp_router_delete(id=router2.uuid)
self._vnc_lib.bgp_router_delete(id=router3.uuid)
self._vnc_lib.bgp_router_delete(id=router4.uuid)
gevent.sleep(1)
@retries(10, hook=retry_exc_handler)
def check_vrf_assign_table(self, vmi_fq_name, floating_ip, is_present = True):
vmi = self._vnc_lib.virtual_machine_interface_read(vmi_fq_name)
if is_present:
self.assertEqual(vmi.get_vrf_assign_table().vrf_assign_rule[1].match_condition.src_address.subnet.ip_prefix, floating_ip)
else:
try:
self.assertEqual(vmi.get_vrf_assign_table().vrf_assign_rule[1].match_condition.src_address.subnet.ip_prefix, floating_ip)
raise Exception('floating is still present: ' + floating_ip)
except:
pass
def test_analyzer(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name], 'transparent', 'analyzer', action_type = 'mirror-to')
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_update(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_update(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
svc_ri_fq_name = 'default-domain:default-project:svc-vn-left:svc-vn-left'.split(':')
self.check_ri_state_vn_policy(svc_ri_fq_name, self.get_ri_name(vn1_obj))
self.check_ri_state_vn_policy(svc_ri_fq_name, self.get_ri_name(vn2_obj))
self.check_acl_match_mirror_to_ip(self.get_ri_name(vn1_obj))
self.check_acl_match_nets(self.get_ri_name(vn1_obj), ':'.join(vn1_obj.get_fq_name()), ':'.join(vn2_obj.get_fq_name()))
self.check_acl_match_nets(self.get_ri_name(vn2_obj), ':'.join(vn2_obj.get_fq_name()), ':'.join(vn1_obj.get_fq_name()))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_acl_not_match_mirror_to_ip(self.get_ri_name(vn1_obj))
self.check_acl_not_match_nets(self.get_ri_name(vn1_obj), ':'.join(vn1_obj.get_fq_name()), ':'.join(vn2_obj.get_fq_name()))
self.check_acl_not_match_nets(self.get_ri_name(vn2_obj), ':'.join(vn2_obj.get_fq_name()), ':'.join(vn1_obj.get_fq_name()))
@retries(5, hook=retry_exc_handler)
def check_security_group_id(self, sg_fq_name, verify_sg_id = None):
sg = self._vnc_lib.security_group_read(sg_fq_name)
sg_id = sg.get_security_group_id()
if sg_id is None:
raise Exception('sg id is not present for %s' % sg_fq_name)
if verify_sg_id is not None and str(sg_id) != str(verify_sg_id):
raise Exception('sg id is not same as passed value (%s, %s)' % (str(sg_id), str(verify_sg_id)))
def _security_group_rule_build(self, rule_info, sg_uuid):
protocol = rule_info['protocol']
port_min = rule_info['port_min'] or 0
port_max = rule_info['port_max'] or 65535
direction = rule_info['direction'] or 'ingress'
ip_prefix = rule_info['ip_prefix']
ether_type = rule_info['ether_type']
sg_id = rule_info['sg_id']
if ip_prefix:
cidr = ip_prefix.split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
endpt = [AddressType(subnet=SubnetType(pfx, pfx_len))]
elif sg_id:
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_uuid)
except NoIdError:
raise Exception('SecurityGroupNotFound %s' % sg_uuid)
endpt = [AddressType(security_group=sg_obj.get_fq_name_str())]
local = None
remote = None
if direction == 'ingress':
dir = '>'
local = endpt
remote = [AddressType(security_group='local')]
else:
dir = '>'
remote = endpt
local = [AddressType(security_group='local')]
if not protocol:
protocol = 'any'
if protocol.isdigit():
protocol = int(protocol)
if protocol < 0 or protocol > 255:
raise Exception('SecurityGroupRuleInvalidProtocol-%s' % protocol)
else:
if protocol not in ['any', 'tcp', 'udp', 'icmp']:
raise Exception('SecurityGroupRuleInvalidProtocol-%s' % protocol)
if not ip_prefix and not sg_id:
if not ether_type:
ether_type = 'IPv4'
sgr_uuid = str(uuid.uuid4())
rule = PolicyRuleType(rule_uuid=sgr_uuid, direction=dir,
protocol=protocol,
src_addresses=local,
src_ports=[PortType(0, 65535)],
dst_addresses=remote,
dst_ports=[PortType(port_min, port_max)],
ethertype=ether_type)
return rule
#end _security_group_rule_build
def _security_group_rule_append(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
if rules is None:
rules = PolicyEntriesType([sg_rule])
else:
for sgr in rules.get_policy_rule() or []:
sgr_copy = copy.copy(sgr)
sgr_copy.rule_uuid = sg_rule.rule_uuid
if sg_rule == sgr_copy:
raise Exception('SecurityGroupRuleExists %s' % sgr.rule_uuid)
rules.add_policy_rule(sg_rule)
sg_obj.set_security_group_entries(rules)
#end _security_group_rule_append
def _security_group_rule_remove(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
if rules is None:
raise Exception('SecurityGroupRuleNotExists %s' % sgr.rule_uuid)
else:
for sgr in rules.get_policy_rule() or []:
if sgr.rule_uuid == sg_rule.rule_uuid:
rules.delete_policy_rule(sgr)
sg_obj.set_security_group_entries(rules)
return
raise Exception('SecurityGroupRuleNotExists %s' % sg_rule.rule_uuid)
#end _security_group_rule_append
def security_group_create(self, sg_name, project_fq_name):
project_obj = self._vnc_lib.project_read(project_fq_name)
sg_obj = SecurityGroup(name=sg_name, parent_obj=project_obj)
self._vnc_lib.security_group_create(sg_obj)
return sg_obj
#end security_group_create
def test_sg(self):
#create sg and associate egress rule and check acls
sg1_obj = self.security_group_create('sg-1', [u'default-domain', u'default-project'])
self.wait_to_get_sg_id(sg1_obj.get_fq_name())
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
rule1 = {}
rule1['port_min'] = 0
rule1['port_max'] = 65535
rule1['direction'] = 'egress'
rule1['ip_prefix'] = None
rule1['protocol'] = 'any'
rule1['ether_type'] = 'IPv4'
rule1['sg_id'] = sg1_obj.get_security_group_id()
sg_rule1 = self._security_group_rule_build(rule1, sg1_obj.get_uuid())
self._security_group_rule_append(sg1_obj, sg_rule1)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name())
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
sg1_obj.set_configured_security_group_id(100)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name(), 100)
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
#create another sg and associate ingress rule and check acls
sg2_obj = self.security_group_create('sg-2', [u'default-domain', u'default-project'])
self.wait_to_get_sg_id(sg2_obj.get_fq_name())
sg2_obj = self._vnc_lib.security_group_read(sg2_obj.get_fq_name())
rule2 = {}
rule2['port_min'] = 0
rule2['port_max'] = 65535
rule2['direction'] = 'ingress'
rule2['ip_prefix'] = None
rule2['protocol'] = 'any'
rule2['ether_type'] = 'IPv4'
rule2['sg_id'] = sg2_obj.get_security_group_id()
sg_rule2 = self._security_group_rule_build(rule2, sg2_obj.get_uuid())
self._security_group_rule_append(sg2_obj, sg_rule2)
self._vnc_lib.security_group_update(sg2_obj)
self.check_security_group_id(sg2_obj.get_fq_name())
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'ingress-access-control-list',
sg2_obj.get_security_group_id())
#add ingress and egress rules to same sg and check for both
rule1['sg_id'] = sg2_obj.get_security_group_id()
sg_rule3 = self._security_group_rule_build(rule1, sg2_obj.get_uuid())
self._security_group_rule_append(sg2_obj, sg_rule3)
self._vnc_lib.security_group_update(sg2_obj)
self.check_security_group_id(sg2_obj.get_fq_name())
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'egress-access-control-list',
sg2_obj.get_security_group_id())
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'ingress-access-control-list',
sg2_obj.get_security_group_id())
#add one more ingress and egress
rule1['direction'] = 'ingress'
rule1['port_min'] = 1
rule1['port_max'] = 100
self._security_group_rule_append(sg2_obj, self._security_group_rule_build(rule1, sg2_obj.get_uuid()))
rule1['direction'] = 'egress'
rule1['port_min'] = 101
rule1['port_max'] = 200
self._security_group_rule_append(sg2_obj, self._security_group_rule_build(rule1, sg2_obj.get_uuid()))
self._vnc_lib.security_group_update(sg2_obj)
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'egress-access-control-list',
sg2_obj.get_security_group_id(), True)
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'ingress-access-control-list',
sg2_obj.get_security_group_id(), True)
# duplicate security group id configured, vnc api allows
# isn't this a problem?
sg2_obj.set_configured_security_group_id(100)
self._vnc_lib.security_group_update(sg2_obj)
self.check_security_group_id(sg2_obj.get_fq_name(), 100)
#sg id '0' is not allowed, should not get modified
sg1_obj.set_configured_security_group_id(0)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name(), 8000001)
# -ve security group id not allowed, should not get modified
sg1_obj.set_configured_security_group_id(-100)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name(), -100)
#end test_sg
def test_delete_sg(self):
#create sg and associate egress rule and check acls
sg1_obj = self.security_group_create('sg-1', [u'default-domain', u'default-project'])
self.wait_to_get_sg_id(sg1_obj.get_fq_name())
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
rule1 = {}
rule1['ip_prefix'] = None
rule1['protocol'] = 'any'
rule1['ether_type'] = 'IPv4'
rule1['sg_id'] = sg1_obj.get_security_group_id()
rule1['direction'] = 'ingress'
rule1['port_min'] = 1
rule1['port_max'] = 100
rule_in_obj = self._security_group_rule_build(rule1, sg1_obj.get_uuid())
rule1['direction'] = 'egress'
rule1['port_min'] = 101
rule1['port_max'] = 200
rule_eg_obj = self._security_group_rule_build(rule1, sg1_obj.get_uuid())
self._security_group_rule_append(sg1_obj, rule_in_obj)
self._security_group_rule_append(sg1_obj, rule_eg_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self._security_group_rule_remove(sg1_obj, rule_in_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self._security_group_rule_append(sg1_obj, rule_in_obj)
self._security_group_rule_remove(sg1_obj, rule_eg_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self._security_group_rule_remove(sg1_obj, rule_in_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self.check_no_policies_for_sg(sg1_obj.get_fq_name())
self._vnc_lib.security_group_delete(fq_name=sg1_obj.get_fq_name())
#end test_sg
def test_asn(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
for obj in [vn1_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_ri_asn(self.get_ri_name(vn1_obj), 'target:64512:8000001')
# create router1
r1_name = self.id() + 'router1'
router1 = self.create_bgp_router(r1_name, 'contrail')
self.check_bgp_asn(router1.get_fq_name(), 64512)
# create virtual machine interface
vmi_name = self.id() + 'vmi1'
vmi = VirtualMachineInterface(vmi_name, parent_type='project', fq_name=['default-domain', 'default-project', vmi_name])
vmi.add_virtual_network(vn1_obj)
self._vnc_lib.virtual_machine_interface_create(vmi)
# create logical router
lr_name = self.id() + 'lr1'
lr = LogicalRouter(lr_name)
lr.add_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_create(lr)
self.check_lr_asn(lr.get_fq_name(), 'target:64512:8000002')
#update global system config but dont change asn value for equality path
gs = self._vnc_lib.global_system_config_read(fq_name=[u'default-global-system-config'])
gs.set_autonomous_system(64512)
self._vnc_lib.global_system_config_update(gs)
# check route targets
self.check_ri_asn(self.get_ri_name(vn1_obj), 'target:64512:8000001')
self.check_bgp_asn(router1.get_fq_name(), 64512)
self.check_lr_asn(lr.get_fq_name(), 'target:64512:8000002')
#update ASN value
gs = self._vnc_lib.global_system_config_read(fq_name=[u'default-global-system-config'])
gs.set_autonomous_system(50000)
self._vnc_lib.global_system_config_update(gs)
# check new route targets
self.check_ri_asn(self.get_ri_name(vn1_obj), 'target:50000:8000001')
self.check_bgp_asn(router1.get_fq_name(), 50000)
self.check_lr_asn(lr.get_fq_name(), 'target:50000:8000002')
#end test_asn
def test_fip(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name], 'in-network')
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_' + service_name
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
vmi_fq_name = 'default-domain:default-project:default-domain__default-project__test.test_service.TestPolicy.test_fips1__1__left__1'
vmi = self._vnc_lib.virtual_machine_interface_read(vmi_fq_name.split(':'))
vn3_name = 'vn-public'
vn3_obj = VirtualNetwork(vn3_name)
vn3_obj.set_router_external(True)
ipam3_obj = NetworkIpam('ipam3')
self._vnc_lib.network_ipam_create(ipam3_obj)
vn3_obj.add_network_ipam(ipam3_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("192.168.7.0", 24))]))
vn3_uuid = self._vnc_lib.virtual_network_create(vn3_obj)
fip_pool_name = 'vn_public_fip_pool'
fip_pool = FloatingIpPool(fip_pool_name, vn3_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool)
fip_obj = FloatingIp("fip1", fip_pool)
default_project = self._vnc_lib.project_read(fq_name=[u'default-domain', u'default-project'])
fip_obj.set_project(default_project)
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
fip_obj.set_virtual_machine_interface(vmi)
self._vnc_lib.floating_ip_update(fip_obj)
fip_obj = self._vnc_lib.floating_ip_read(fip_obj.get_fq_name())
for obj in [fip_obj]:
ident_name = self.get_obj_imid(obj)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.wait_to_get_link(ident_name, vmi_fq_name)
fip = fip_obj.get_floating_ip_address()
self.check_vrf_assign_table(vmi.get_fq_name(), fip, True)
fip_fq_name = fip_obj.get_fq_name()
self._vnc_lib.floating_ip_delete(fip_fq_name)
self.wait_to_remove_link(self.get_obj_imid(vmi), fip_fq_name)
self.check_vrf_assign_table(vmi.get_fq_name(), fip, False)
# end class TestRouteTable
| apache-2.0 | 1,834,423,797,736,134,400 | 43.872645 | 139 | 0.583993 | false |
yanikou19/pymatgen | pymatgen/io/gwwrapper/GWworkflows.py | 1 | 15024 | # coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Workflows for GW calculations:
VaspGWFWWorkFlow fireworks wf for vasp
SingleAbinitGWWorkFlow workflow for abinit
Under construction:
general GW workflow that should manage all the code independent logic
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "[email protected]"
__date__ = "May 2014"
import os
import os.path
import copy
from pymatgen.io.abinitio.abiobjects import asabistructure
from pymatgen.io.abinitio.calculations import g0w0_extended
from pymatgen.io.abinitio.flows import AbinitFlow
from pymatgen.io.abinitio.tasks import TaskManager
from pymatgen.io.abinitio.pseudos import PseudoTable
from pymatgen.io.gwwrapper.GWtasks import *
from pymatgen.io.gwwrapper.helpers import now, s_name, expand, read_grid_from_file, is_converged
from pymatgen.io.gwwrapper.helpers import read_extra_abivars
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
class GWWorkflow(object):
"""
UNDER CONSTRUCTION
Base class for GW workflows. the code specific implementations should extend this one.
the base class should contain the convergence calculations structure
"""
@property
def grid(self):
return self._grid
@property
def all_done(self):
return self._all_done
@property
def workdir(self):
return self._workdir
def set_status(self, structure):
self._grid = 0
self._all_done = False
self._workdir = None
self._converged = is_converged(False, structure)
try:
self._grid = read_grid_from_file(s_name(structure)+".full_res")['grid']
self._all_done = read_grid_from_file(s_name(structure)+".full_res")['all_done']
self._workdir = os.path.join(s_name(structure), 'work_'+str(self.grid))
except (IOError, OSError):
pass
class VaspGWFWWorkFlow():
"""
Object containing a VASP GW workflow for a single structure
"""
def __init__(self):
self.work_list = []
self.connections = {}
self.fw_id = 1
self.prep_id = 1
self.wf = []
def add_work(self, parameters):
from fireworks.core.firework import FireWork
tasks = []
job = parameters['job']
print('adding job ' + job + ' to the workslist as ', self.fw_id)
if job == 'prep':
launch_spec = {'task_type': 'Preparation job', '_category': 'cluster', '_queueadapter': 'qadapterdict'}
task = VaspGWInputTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
task = VaspGWToDiagTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
fw = FireWork(tasks, spec=launch_spec, name=job, created_on=now(), fw_id=self.fw_id)
self.connections[self.fw_id] = []
self.prep_id = self.fw_id
self.fw_id += 1
print(self.connections)
elif job in ['G0W0', 'GW0', 'scGW0']:
launch_spec = {'task_type': 'GW job', '_category': 'cluster', '_queueadapter': 'qadapterdict'}
task = VaspGWInputTask(parameters)
tasks.append(task)
task = VaspGWGetPrepResTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
if parameters['spec']['converge']:
task = VaspGWWriteConDatTask(parameters)
tasks.append(task)
task = VaspGWTestConTask(parameters)
tasks.append(task)
fw = FireWork(tasks, spec=launch_spec, name=job, created_on=now(), fw_id=self.fw_id)
self.connections[self.fw_id] = []
self.connections[self.prep_id].append(self.fw_id)
self.fw_id += 1
else:
fw = []
print('unspecified job, this should have been captured before !!')
exit()
self.work_list.append(fw)
def create(self):
from fireworks.core.firework import Workflow
self.wf = Workflow(self.work_list, self.connections, name='VaspGWFWWorkFlow', created_on=now())
print('creating workflow')
def add_to_db(self):
from fireworks.core.launchpad import LaunchPad
launchpad_file = os.path.join(os.environ['FW_CONFIG_DIR'], 'my_launchpad.yaml')
lp = LaunchPad.from_file(launchpad_file)
lp.add_wf(self.wf)
class SingleAbinitGWWorkFlow():
"""
GW workflow for Abinit
"""
RESPONSE_MODELS = ["cd", "godby", "hybersten", "linden", "farid"]
TESTS = {'ecuteps': {'test_range': (10, 14), 'method': 'direct', 'control': "gap", 'level': "sigma"},
'nscf_nbands': {'test_range': (30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"},
'response_model': {'test_range': RESPONSE_MODELS, 'method': 'direct', 'control': 'gap', 'level': 'screening'}}
# scf level test are run independently, the last value will be used in the nscf and sigma tests
#'test': {'test_range': (1, 2, 3), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
CONVS = {'ecut': {'test_range': (28, 32, 36, 40, 44), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
'ecuteps': {'test_range': (4, 8, 12, 16, 20), 'method': 'direct', 'control': "gap", 'level': "sigma"},
'nscf_nbands': {'test_range': (5, 10, 20, 30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"}}
def __init__(self, structure, spec, option=None):
self.structure = structure
self.spec = spec
self.option = option
self.tests = self.__class__.get_defaults_tests()
self.convs = self.__class__.get_defaults_convs()
self.response_models = self.__class__.get_response_models()
if self.option is None:
self.all_converged = False
elif len(self.option) == len(self.convs):
self.all_converged = True
else:
self.all_converged = False
path_add = '.conv' if self.all_converged else ''
self.work_dir = s_name(self.structure)+path_add
abi_pseudo = os.environ['ABINIT_PS_EXT']
abi_pseudo_dir = os.environ['ABINIT_PS']
pseudos = []
for element in self.structure.composition.element_composition:
pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
pseudos.append(pseudo)
self.pseudo_table = PseudoTable(pseudos)
@classmethod
def get_defaults_tests(cls):
return copy.deepcopy(cls.TESTS)
@classmethod
def get_defaults_convs(cls):
return copy.deepcopy(cls.CONVS)
@classmethod
def get_response_models(cls):
return copy.deepcopy(cls.RESPONSE_MODELS)
def get_electrons(self, structure):
"""
Method for retrieving the number of valence electrons
"""
electrons = 0
for element in structure.species:
entries = self.pseudo_table.pseudos_with_symbol(element.symbol)
assert len(entries) == 1
pseudo = entries[0]
electrons += pseudo.Z_val
return electrons
def get_bands(self, structure):
"""
Method for retrieving the standard number of bands
"""
bands = self.get_electrons(structure) / 2 + len(structure)
return int(bands)
def get_work_dir(self):
name = s_name(self.structure)
if not self.all_converged:
return str(name)+'_'+str(self.option['test'])+'_'+str(self.option['value'])
else:
return str(name)
def create(self):
"""
create single abinit G0W0 flow
"""
manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell'
# an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z
# this could also be pulled into the constructor of Abistructure
abi_structure = asabistructure(self.structure).get_sorted_structure()
manager = TaskManager.from_user_config()
# Initialize the flow.
flow = AbinitFlow(self.work_dir, manager, pickle_protocol=0)
# flow = AbinitFlow(self.work_dir, manager)
# kpoint grid defined over density 40 > ~ 3 3 3
if self.spec['converge'] and not self.all_converged:
# (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps
# if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence studie
if 'kp_in' in self.spec.keys():
if self.spec['kp_in'] > 9:
print('WARNING:\nkp_in should be < 10 to generate an n x n x n mesh\nfor larger values a grid with '
'density kp_in will be generated')
scf_kppa = self.spec['kp_in']
else:
scf_kppa = 2
else:
# use the specified density for the final calculation with the converged nbands and ecuteps of other
# stand alone calculations
scf_kppa = self.spec['kp_grid_dens']
gamma = True
# 'standard' parameters for stand alone calculation
nb = self.get_bands(self.structure)
nscf_nband = [10 * nb]
ecuteps = [8]
ecutsigx = 44
extra_abivars = dict(
paral_kgb=1,
inclvkb=2,
ecut=44,
pawecutdg=88,
gwmem='10',
getden=-1,
istwfk="*1",
timopt=-1,
nbdbuf=8
)
# read user defined extra abivars from file 'extra_abivars' should be dictionary
extra_abivars.update(read_extra_abivars())
response_models = ['godby']
if 'ppmodel' in extra_abivars.keys():
response_models = [extra_abivars.pop('ppmodel')]
if self.option is not None:
for k in self.option.keys():
if k in ['ecuteps', 'nscf_nbands']:
pass
else:
extra_abivars.update({k: self.option[k]})
if k == 'ecut':
extra_abivars.update({'pawecutdg': self.option[k]*2})
try:
grid = read_grid_from_file(s_name(self.structure)+".full_res")['grid']
all_done = read_grid_from_file(s_name(self.structure)+".full_res")['all_done']
workdir = os.path.join(s_name(self.structure), 'w'+str(grid))
except (IOError, OSError):
grid = 0
all_done = False
workdir = None
if not all_done:
if (self.spec['test'] or self.spec['converge']) and not self.all_converged:
if self.spec['test']:
print('| setting test calculation')
tests = SingleAbinitGWWorkFlow(self.structure, self.spec).tests
response_models = []
else:
if grid == 0:
print('| setting convergence calculations for grid 0')
tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs
else:
print('| extending grid')
tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid)
ecuteps = []
nscf_nband = []
for test in tests:
if tests[test]['level'] == 'scf':
if self.option is None:
extra_abivars.update({test + '_s': tests[test]['test_range']})
elif test in self.option:
extra_abivars.update({test: self.option[test]})
else:
extra_abivars.update({test + '_s': tests[test]['test_range']})
else:
for value in tests[test]['test_range']:
if test == 'nscf_nbands':
nscf_nband.append(value * self.get_bands(self.structure))
#scr_nband takes nscf_nbands if not specified
#sigma_nband takes scr_nbands if not specified
if test == 'ecuteps':
ecuteps.append(value)
if test == 'response_model':
response_models.append(value)
elif self.all_converged:
print('| setting up for testing the converged values at the high kp grid ')
# in this case a convergence study has already been performed.
# The resulting parameters are passed as option
ecuteps = [self.option['ecuteps'], self.option['ecuteps'] + self.convs['ecuteps']['test_range'][1] -
self.convs['ecuteps']['test_range'][0]]
nscf_nband = [self.option['nscf_nbands'], self.option['nscf_nbands'] + self.convs['nscf_nbands'][
'test_range'][1] - self.convs['nscf_nbands']['test_range'][0]]
# for option in self.option:
# if option not in ['ecuteps', 'nscf_nband']:
# extra_abivars.update({option + '_s': self.option[option]})
else:
print('| all is done for this material')
return
logger.info('ecuteps : ', ecuteps)
logger.info('extra : ', extra_abivars)
logger.info('nscf_nb : ', nscf_nband)
work = g0w0_extended(abi_structure, self.pseudo_table, scf_kppa, nscf_nband, ecuteps, ecutsigx,
accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models,
charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma,
**extra_abivars)
flow.register_work(work, workdir=workdir)
return flow.allocate()
def create_job_file(self, serial=True):
"""
Create the jobfile for starting all schedulers manually
serial = True creates a list that can be submitted as job that runs all schedulers a a batch job
(the job header needs to be added)
serial = False creates a list that can be used to start all schedulers on the frontend in the background
"""
job_file = open("job_collection", mode='a')
if serial:
job_file.write('abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log\n')
else:
job_file.write('nohup abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log & \n')
job_file.write('sleep 2\n')
job_file.close()
| mit | -4,241,482,951,457,405,000 | 40.617729 | 123 | 0.563632 | false |
vane/django_tornado | django_tornado.py | 1 | 1266 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = '[email protected]'
import os
import logging
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.wsgi
import sockjs.tornado
from django.core.handlers import wsgi
import pusher
import constraint
import logging as _
SETTINGS_PATH="django_tornado.settings"
_H = _.StreamHandler()
_F = _.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging = _.getLogger('')
logging.setLevel(_.DEBUG)
logging.addHandler(_H)
_H.setFormatter(_F)
def main():
wsgi_app = tornado.wsgi.WSGIContainer(wsgi.WSGIHandler())
Router = sockjs.tornado.SockJSRouter(pusher.PushClient, '/stream')
Router.urls.append((r'/static/(.*)$', tornado.web.StaticFileHandler, {'path': './static'}))
Router.urls.append(('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app)))
logging.debug("start")
ping = pusher.Pinger()
ping.start()
tornado_app = tornado.web.Application(Router.urls)
server = tornado.httpserver.HTTPServer(tornado_app)
server.listen(address=constraint.HOST, port=constraint.PORT)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", SETTINGS_PATH)
main()
| bsd-3-clause | -9,177,310,851,977,519,000 | 23.823529 | 95 | 0.699842 | false |
stormi/tsunami | src/secondaires/magie/editeurs/spedit/supprimer.py | 1 | 2985 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur Supprimer"""
from primaires.interpreteur.editeur.supprimer import Supprimer
class NSupprimer(Supprimer):
"""Classe définissant le contexte éditeur 'supprimer'.
Ce contexte permet spécifiquement de supprimer un prototype d'objet.
"""
def interpreter(self, msg):
"""Interprétation du contexte"""
msg = msg.lower()
sort = self.objet
if msg == "oui":
objet = type(self).importeur
for nom in self.action.split("."):
objet = getattr(objet, nom)
nb_persos = 0
for perso in type(self).importeur.connex.joueurs:
if sort.cle in perso.sorts and perso.sorts[sort.cle] > 0:
nb_persos += 1
if nb_persos > 0:
s = nb_persos > 1 and "s" or ""
i = nb_persos > 1 and "issen" or "î"
self.pere << "|err|{} personnage{s} conna{i}t ce sort. " \
"Opération annulée.|ff|".format(nb_persos, s=s, i=i)
self.migrer_contexte(self.opts.rci_ctx_prec)
else:
objet(sort.cle)
self.fermer()
self.pere << self.confirme
elif msg == "non":
self.migrer_contexte(self.opts.rci_ctx_prec)
else:
self.pere << "|err|Choix invalide.|ff|"
| bsd-3-clause | 6,382,476,464,064,139,000 | 42.779412 | 79 | 0.659053 | false |
alepulver/my-thesis | results-tables/aggregators/events.py | 1 | 1601 | from .order import Order
class Events:
def __init__(self, stage):
self.stage = stage
self.elements = stage.stage_elements()
def count_by_type(self, event_type):
stage = self.stage
counts = {}
for e in self.elements:
counts[e] = 0
events = stage._data['results']['drawing']['events']
for e in events:
if e['type'] == event_type:
counts[e['arg']] += 1
return counts
def time_spent(self):
times = {}
for e in self.elements:
times[e] = 0
current_element = None
last_timestamp = 0
events = self.stage._data['results']['drawing']['events']
for e in events:
if 'time' in e.keys():
timestamp = e['time']
elif 'time' in e['data'].keys():
timestamp = e['data']['time']
if current_element is not None:
times[current_element] += timestamp - last_timestamp
if e['type'] in ['add', 'select']:
current_element = e['arg']
last_timestamp = timestamp
return times
def selection_order(self):
result = []
events = self.stage._data['results']['choose']['events']
for e in events:
if e['type'] == 'choose':
result.append(e['arg'])
return result
def order_matching(self):
shown = self.stage._data['results']['choose']['show_order']
selected = self.selection_order()
return Order.matching_score(shown, selected)
| mit | -5,279,545,875,092,008,000 | 27.087719 | 68 | 0.511555 | false |
androguard/androguard | tools/test_androguard_apk_collection.py | 1 | 6221 | from androguard.core.bytecodes.apk import APK
from androguard.core.bytecodes.dvm import DalvikVMFormat
from androguard.core.analysis.analysis import Analysis
from androguard.decompiler.dad.decompile import DvMethod
import logging
import traceback
import sys
import os
"""
This is a script to call several functions on APK and DEX files
and run the decompiler on all methods inside the DEX files.
You just need some folder where you store APK files.
You can also adjust the function samples() by your needs.
This script will create a logfile in the current directory,
and log all errors regarding those files in there.
The list of functions to call, can be adjusted as well.
Note that you can currently only call functions that do not
take an argument.
This script is not intended to be used on CI platforms,
as it can take ages to run!
A single APK takes already several minutes.
"""
# Adjust those two variables to your own needs:
COLLECTION_PATH = r"G:\testset"
LOG_FILENAME = 'G:\ANDROGUARD_TESTS_BUGS.txt'
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
def samples():
for root, _, files in os.walk(COLLECTION_PATH):
for f in files:
yield os.path.join(root, f)
def main():
for path in samples():
print(path)
logging.error("Processing" + path)
tests_apk = ["is_valid_APK", "get_filename", "get_app_name", "get_app_icon",
"get_package", "get_androidversion_code", "get_androidversion_name",
"get_files", "get_files_types", "get_files_crc32", "get_files_information",
"get_raw", "get_dex", "get_all_dex", "get_main_activity",
"get_activities", "get_services", "get_receivers", "get_providers",
"get_permissions", "get_details_permissions", "get_requested_aosp_permissions",
"get_requested_aosp_permissions_details", "get_requested_third_party_permissions",
"get_declared_permissions", "get_declared_permissions_details", "get_max_sdk_version",
"get_min_sdk_version", "get_target_sdk_version", "get_libraries", "get_android_manifest_axml",
"get_android_manifest_xml", "get_android_resources", "get_signature_name", "get_signature_names",
"get_signature", "get_signatures"]
tests_dex = ["get_api_version", "get_classes_def_item", "get_methods_id_item", "get_fields_id_item",
"get_codes_item", "get_string_data_item",
"get_debug_info_item", "get_header_item", "get_class_manager", "show",
# "save", # FIXME broken
"get_classes_names", "get_classes",
"get_all_fields", "get_fields", "get_methods", "get_len_methods",
"get_strings", "get_format_type", "create_python_export",
"get_BRANCH_DVM_OPCODES", "get_determineNext",
"get_determineException", "print_classes_hierarchy",
"list_classes_hierarchy", "get_format"]
try:
# Testing APK
a = APK(path)
for t in tests_apk:
print(t)
x = getattr(a, t)
try:
x()
except Exception as aaa:
print(aaa)
traceback.print_exc()
print(path, aaa, file=sys.stderr)
logging.exception("{} .. {}".format(path, t))
# Testing DEX
dx = Analysis()
for dex in a.get_all_dex():
d = DalvikVMFormat(dex)
dx.add(d)
# Test decompilation
for c in d.get_classes():
for m in c.get_methods():
mx = dx.get_method(m)
ms = DvMethod(mx)
try:
ms.process(doAST=True)
except Exception as aaa:
print(aaa)
traceback.print_exc()
print(path, aaa, file=sys.stderr)
logging.exception("{} .. {} .. {}".format(path, c.get_name(), m.get_name()))
ms2 = DvMethod(mx)
try:
ms2.process(doAST=False)
except Exception as aaa:
print(aaa)
traceback.print_exc()
print(path, aaa, file=sys.stderr)
logging.exception("{} .. {} .. {}".format(path, c.get_name(), m.get_name()))
# DEX tests
for t in tests_dex:
print(t)
x = getattr(d, t)
try:
x()
except Exception as aaa:
print(aaa)
traceback.print_exc()
print(path, aaa, file=sys.stderr)
logging.exception("{} .. {}".format(path, t))
# Analysis Tests
try:
dx.create_xref()
except Exception as aaa:
print(aaa)
traceback.print_exc()
print(path, aaa, file=sys.stderr)
logging.exception("{} .. {} at Analysis".format(path, t))
# MethodAnalysis tests
for m in dx.methods.values():
for bb in m.get_basic_blocks():
try:
list(bb.get_instructions())
except Exception as aaa:
print(aaa)
traceback.print_exc()
print(path, aaa, file=sys.stderr)
logging.exception("{} .. {} at BasicBlock {}".format(path, t, m))
except KeyboardInterrupt:
raise
except FileNotFoundError:
pass
except Exception as e:
print(e)
traceback.print_exc()
print(path, e, file=sys.stderr)
logging.exception(path)
if __name__ == "__main__":
main()
| apache-2.0 | -7,387,598,208,298,263,000 | 38.373418 | 114 | 0.509082 | false |
brclark-usgs/flopy | examples/Tutorials/Tutorial01/tutorial01.py | 1 | 2026 |
import numpy as np
import flopy
# Assign name and create modflow model object
modelname = 'tutorial1'
mf = flopy.modflow.Modflow(modelname, exe_name='mf2005')
# Model domain and grid definition
Lx = 1000.
Ly = 1000.
ztop = 0.
zbot = -50.
nlay = 1
nrow = 10
ncol = 10
delr = Lx/ncol
delc = Ly/nrow
delv = (ztop - zbot) / nlay
botm = np.linspace(ztop, zbot, nlay + 1)
# Create the discretization object
dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
top=ztop, botm=botm[1:])
# Variables for the BAS package
ibound = np.ones((nlay, nrow, ncol), dtype=np.int32)
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
strt = np.ones((nlay, nrow, ncol), dtype=np.float32)
strt[:, :, 0] = 10.
strt[:, :, -1] = 0.
bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)
# Add LPF package to the MODFLOW model
lpf = flopy.modflow.ModflowLpf(mf, hk=10., vka=10., ipakcb=53)
# Add OC package to the MODFLOW model
spd = {(0, 0): ['print head', 'print budget', 'save head', 'save budget']}
oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, compact=True)
# Add PCG package to the MODFLOW model
pcg = flopy.modflow.ModflowPcg(mf)
# Write the MODFLOW model input files
mf.write_input()
# Run the MODFLOW model
success, buff = mf.run_model()
# Post process the results
import matplotlib.pyplot as plt
import flopy.utils.binaryfile as bf
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
hds = bf.HeadFile(modelname+'.hds')
times = hds.get_times()
head = hds.get_data(totim=times[-1])
levels = np.linspace(0, 10, 11)
cbb = bf.CellBudgetFile(modelname+'.cbc')
kstpkper_list = cbb.get_kstpkper()
frf = cbb.get_data(text='FLOW RIGHT FACE', totim=times[-1])[0]
fff = cbb.get_data(text='FLOW FRONT FACE', totim=times[-1])[0]
modelmap = flopy.plot.ModelMap(model=mf, layer=0)
qm = modelmap.plot_ibound()
lc = modelmap.plot_grid()
cs = modelmap.contour_array(head, levels=levels)
quiver = modelmap.plot_discharge(frf, fff, head=head)
plt.show()
| bsd-3-clause | 3,929,625,849,699,004,000 | 26.013333 | 74 | 0.6846 | false |
zetaops/ulakbus | ulakbus/views/ogrenci/kayit_silme.py | 1 | 10710 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
#
from zengine.forms import JsonForm
from ulakbus.models import OgrenciProgram, Ogrenci, Role, User, AbstractRole
from zengine.forms import fields
from zengine.views.crud import CrudView
from zengine.lib.translation import gettext as _
from ulakbus.lib.role import AbsRole
from ulakbus.lib.ogrenci import kaydi_silinmis_abs_role
ABSTRACT_ROLE_LIST = [
AbsRole.LISANS_OGRENCISI_AKTIF.name,
AbsRole.LISANS_OGRENCISI_KAYIT_DONDURMUS.name,
AbsRole.ON_LISANS_OGRENCISI_AKTIF.name,
AbsRole.ON_LISANS_OGRENCISI_KAYIT_DONDURMUS.name,
AbsRole.YUKSEK_LISANS_OGRENCISI_AKTIF.name,
AbsRole.YUKSEK_LISANS_OGRENCISI_KAYIT_DONDURMUS.name,
AbsRole.DOKTORA_OGRENCISI_AKTIF.name,
AbsRole.DOKTORA_OGRENCISI_KAYIT_DONDURMUS.name
]
ABSTRACT_ROLE_LIST_SILINMIS = [
AbsRole.LISANS_OGRENCISI_KAYIT_SILINMIS.name,
AbsRole.ON_LISANS_OGRENCISI_KAYIT_SILINMIS.name,
AbsRole.YUKSEK_LISANS_OGRENCISI_KAYIT_SILINMIS.name,
AbsRole.DOKTORA_OGRENCISI_KAYIT_SILINMIS.name
]
class KayitSil(CrudView):
""" Kayıt Silme İş Akışı
Kayıt silme iş akışı 8 adımdan oluşmaktadır.
* Kaydı Kontrol Et
* Kaydı Silinen Öğrenci
* Kayıt Silme İşlemini Onayla
* Kayıt Silme İşleminden Vazgeç
* Fakülte Karar No
* Ayrılma nedenini seç
* Öğrenci programı seç
* Bilgi ver
Kayıt silme iş akışında öğrencinin kayıtlı olduğu öğrenci programları silinmez,
öğrencinin kayıtlı olduğu öğrenci programlarının ayrılma nedeni ve öğrencilik
statüsü field'larına değerler atanır.
Bu iş akışında kullanılan metotlar şu şekildedir.
Kaydı Kontrol Et:
Öğrencinin kaydının silinip silinmediğini kontrol eder.
Kaydı Silinen Öğrenci:
Öğrencinin kaydı silinmişse kaydın silindiğine dair bilgi mesajı ekrana basılır.
Kayıt Silme İşlemini Onayla:
Personel kayıt silme işlemine devam etmek isteyip istemediği sorulur.
Kayıt Silme İşleminden Vazgeç:
Personelin kayıt silme işleminden vazgeçmesi durumunda ekrana silme
işlemin iptal edildiğine dair bilgi mesajı basılır.
Fakülte Karar No:
Fakülte Yönetim Kurulu tarafından belirlenen karar no girilir.
Ayrılma nedeni seç:
Öğrencinin ayrılma nedeni seçilir.
Öğrenci programı seç:
Öğrencinin kayıtlı olduğu öğrenci programlarının ayrılık nedeni ve öğrencilik statüsü
field'larına değerler atanır.
Bilgi ver:
Danışmana ve öğrenciye kayıt silme işlemi ile ilgili bilgi verilir.
Kayıt silme iş akışının son adımıdır. Bu adımdan sonra iş akışı sona erer.
Bu sınıf ``CrudView`` extend edilerek hazırlanmıştır. Temel model ``OgrenciProgram``
modelidir. Meta.model bu amaçla kullanılmıştır.
Adımlar arası geçiş manuel yürütülmektedir.
"""
class Meta:
model = 'OgrenciProgram'
def kontrol(self):
"""
Öğrencinin kaydının silinip silinmediğini kontrol eder.
"""
self.current.task_data['command'] = 'kaydi_silinen_ogrenci'
self.current.task_data['ogrenci_id'] = self.current.input['id']
ogrenci = Ogrenci.objects.get(self.current.task_data['ogrenci_id'])
programlar = OgrenciProgram.objects.filter(ogrenci=ogrenci)
self.current.task_data['roles'] = []
for program in programlar:
roles = Role.objects.filter(user=ogrenci.user, unit=program.program.birim)
for role in roles:
self.current.task_data['roles'].append(role.abstract_role.name)
name = role.abstract_role.key
if name not in ABSTRACT_ROLE_LIST_SILINMIS and name in ABSTRACT_ROLE_LIST:
self.current.task_data['command'] = 'kayit_silme_islemini_onayla'
break
def kaydi_silinen_ogrenci(self):
"""
Öğrencinin kaydı silinmiş ise öğrenci kaydının silindiğine dair bilgi
mesajı ekrana basılır.
"""
ogrenci = Ogrenci.objects.get(self.current.task_data['ogrenci_id'])
self.current.output['msgbox'] = {
'type': 'warning', "title": _(u'Kayıt Silme Başarılı'),
"msg": _(u' %s adlı öğrencinin kaydı daha önceden silinmiştir.') % ogrenci
}
def kayit_silme_islemini_onayla(self):
"""
Personele kayıt silme işlemine devam etmek isteyip istemediği sorulur.
"""
ogrenci = Ogrenci.objects.get(self.current.task_data['ogrenci_id'])
_form = JsonForm(current=self.current,
title=_(u'Kayıt Silme İşlemini Onaylayınız.'))
_form.help_text = _(u'%s adlı öğrencinin %s rollerini silmek üzerisiniz. Emin misiniz?') % (
ogrenci, '-'.join(
name for name in self.current.task_data['roles']))
_form.kaydet = fields.Button('Onayla', flow='fakulte_yonetim_karari')
_form.vazgecme = fields.Button('Vazgeç', flow='kayit_silme_isleminden_vazgec')
self.form_out(_form)
def kayit_silme_isleminden_vazgec(self):
"""
Personelin kayıt silme işleminden vazgeçmesi durumunda ekrana silme işleminin
iptal edildiğine dair bilgi mesajı basılır.
"""
self.current.output['msgbox'] = {
'type': 'warning', "title": _(u'Kayıt Silme İşlemi'),
"msg": _(u'Kayıt silme işlemi iptal edilmiştir.')
}
def fakulte_yonetim_karari(self):
"""
Fakülte Yönetim Kurulu tarafından belirlenen karar no girilir.
"""
# TODO: Fakülte yönetim kurulunun kararı loglanacak.
_form = JsonForm(current=self.current,
title=_(u'Fakülte Yönetim Kurulunun Karar Numarasını Giriniz.'))
_form.karar = fields.String(_(u'Karar No'), index=True)
_form.kaydet = fields.Button(_(u'Kaydet'))
self.form_out(_form)
def ayrilma_nedeni_sec(self):
"""
Ayrılma nedenlerini form içinde listelenir. Listelenen ayrılma nedenlerinden biri
kullanıcı tarafından seçilir.
"""
self.current.task_data['karar_no'] = self.input['form']['karar']
_form = JsonForm(current=self.current, title=_(u'Öğrencinin Ayrılma Nedenini Seçiniz'))
_form.ayrilma_nedeni = fields.Integer(choices=self.object.get_choices_for('ayrilma_nedeni'))
_form.aciklama = fields.Text(_(u"Açıklama Yazınız"), required=True)
_form.sec = fields.Button(_(u"Seç"))
self.form_out(_form)
def ogrenci_program_sec(self):
"""
Öğrencinin kayıtlı olduğu öğrenci programların ayrılma nedeni field'larına, ayrılma
nedeni seç adımındaki ayrılma nedeni atanır.
Öğrencinin kayıtlı olduğu öğrenci programların öğrencilik statüsüne, ``Kaydı silinmiştir``
statüsü eklenmiştir.
Öğrencinin rolü kayıtlı olduğu birimin tipine (program, lisans programı, doktora programı )
göre değiştirilir.
Eğer öğrencinin okulda başka bir rolü (kütüphane çalışanı,spor salonu çalışanı) var ise
admine bilgi mesajı yollanır.
"""
meta = {'user': self.current.user_id,
'role': self.current.role_id,
'wf_name': self.current.workflow_name,
'task_name': self.current.task_name,
'reason': 'FAKÜLTE_KARAR_NO_%s' % self.current.task_data['karar_no']}
index_fields = [('user', 'bin'), ('role', 'bin'), ('wf_name', 'bin'), ('reason', 'bin')]
ogrenci = Ogrenci.objects.get(self.current.task_data['ogrenci_id'])
programlar = OgrenciProgram.objects.filter(ogrenci_id=self.current.task_data['ogrenci_id'])
for program in programlar:
program.ayrilma_nedeni = self.current.input['form']['ayrilma_nedeni']
# todo: elle vermek yerine daha iyi bir yol dusunelim
program.ogrencilik_statusu = 21
program.save(meta=meta, index_fields=index_fields)
roles = Role.objects.filter(user=ogrenci.user, unit=program.program.birim)
for role in roles:
if role.abstract_role.key in ABSTRACT_ROLE_LIST:
abstract_role = kaydi_silinmis_abs_role(role)
role.abstract_role = abstract_role
role.save(meta=meta, index_fields=index_fields)
ogrenci_rolleri = Role.objects.filter(user=ogrenci.user)
for role in ogrenci_rolleri:
if role.abstract_role.key not in ABSTRACT_ROLE_LIST_SILINMIS:
title = _(u'Kayıt Silme')
msg = _(u"""%s adlı öğrencinin kaydı silinmiştir.
Öğrenci farklı rollere sahiptir.""") % ogrenci
# TODO: sistem yoneticisine bilgi ver.
abstract_role = AbstractRole.objects.get("BASEABSROLE")
role = Role.objects.get(abstract_role=abstract_role)
role.send_notification(message=msg, title=title, sender=self.current.user)
def bilgi_ver(self):
"""
Kayıt silme iş akışı tamamlandıktan sonra danışmana ve öğrenciye bilgi verilir.
Kayıt silme işleminin tamamlandığına dair ekrana çıktı verir.
"""
ogrenci = Ogrenci.objects.get(self.current.task_data['ogrenci_id'])
ogrenci_program = OgrenciProgram.objects.filter(ogrenci=ogrenci)
self.current.output['msgbox'] = {
'type': 'warning', "title": _(u'Kayıt Silme'),
"msg": _(u'Öğrencinin kaydı %s nedeniyle silinmiştir.') % self.current.input['form'][
'aciklama']
}
title = _(u'Kayıt Silme')
msg = _(u'%s adlı öğrencinin kaydı %s nedeniyle silinmiştir.') % (
ogrenci, self.current.input['form']['aciklama'])
for program in ogrenci_program:
abstract_role = AbstractRole.objects.get("DANISMAN")
for role in program.danisman.user.role_set:
if role.role.abstract_role == abstract_role:
role.role.send_notification(title=title, message=msg, sender=self.current.user)
for role in ogrenci.user.role_set:
abstract_role = kaydi_silinmis_abs_role(role.role)
if abstract_role.key in ABSTRACT_ROLE_LIST_SILINMIS:
role.role.send_notification(title=title, message=msg, sender=self.current.user)
| gpl-3.0 | -5,817,750,396,697,596,000 | 39.135659 | 100 | 0.653501 | false |
quequino/Revolution | script.tvguidedixie/resetChannels.py | 1 | 1281 | #
# Copyright (C) 2014 Richard Dean
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import xbmcgui
import xbmcaddon
import shutil
import os
def resetChannels():
path = xbmc.translatePath('special://profile/addon_data/script.tvguidedixie/')
chan = os.path.join(path, 'channels')
if os.path.exists(chan):
shutil.rmtree(chan)
d = xbmcgui.Dialog()
d.ok('On-Tapp.TV', 'On-Tapp.TV Channels successfully reset.', 'They will be re-created next time', 'you start the guide')
else:
pass
if __name__ == '__main__':
resetChannels()
| gpl-2.0 | -2,896,224,920,902,057,000 | 29.5 | 129 | 0.704137 | false |
Nexedi/neoppod | neo/storage/checker.py | 1 | 9034 | #
# Copyright (C) 2012-2019 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
from neo.lib import logging
from neo.lib.connection import ClientConnection, ConnectionClosed
from neo.lib.protocol import NodeTypes, Packets, ZERO_OID
from neo.lib.util import add64, dump
from .handlers.storage import StorageOperationHandler
# TODO: Use a dynamic value such that each chunk takes a few seconds to compute,
# because a too small value wastes network bandwidth. However, a too big
# one prevents the storage from replying quickly to other requests, so
# checkRange() must also be changed to process a chunk in several times,
# with a total time that must not cause timeouts.
CHECK_COUNT = 40000
class Checker(object):
def __init__(self, app):
self.app = app
self.queue = deque()
self.conn_dict = {}
def __call__(self, partition, source, min_tid, max_tid):
self.queue.append((partition, source, min_tid, max_tid))
if not self.conn_dict:
self._nextPartition()
def _nextPartition(self):
app = self.app
def connect(node, uuid=app.uuid, name=app.name):
if node.getUUID() == app.uuid:
return
if node.isConnected(connecting=True):
conn = node.getConnection()
conn.asClient()
else:
conn = ClientConnection(app, StorageOperationHandler(app), node)
conn.ask(Packets.RequestIdentification(NodeTypes.STORAGE,
uuid, app.server, name, app.id_timestamp, {}))
self.conn_dict[conn] = node.isIdentified()
conn_set = set(self.conn_dict)
conn_set.discard(None)
try:
self.conn_dict.clear()
while True:
try:
partition, (name, source), min_tid, max_tid = \
self.queue.popleft()
except IndexError:
return
cell = app.pt.getCell(partition, app.uuid)
if cell is None or cell.isOutOfDate():
msg = "discarded or out-of-date"
else:
try:
for cell in app.pt.getCellList(partition):
# XXX: Ignore corrupted cells for the moment
# because we're still unable to fix them
# (see also AdministrationHandler of master)
if cell.isReadable(): #if not cell.isOutOfDate():
connect(cell.getNode())
if source:
node = app.nm.getByAddress(source)
if name:
source = app.nm.createStorage(address=source) \
if node is None else node
connect(source, None, name)
elif (node.getUUID() == app.uuid or
node.isConnected(connecting=True) and
node.getConnection() in self.conn_dict):
source = node
else:
msg = "unavailable source"
if self.conn_dict:
break
msg = "no replica"
except ConnectionClosed:
msg = "connection closed"
finally:
conn_set.update(self.conn_dict)
self.conn_dict.clear()
logging.error("Failed to start checking partition %u (%s)",
partition, msg)
conn_set.difference_update(self.conn_dict)
finally:
for conn in conn_set:
app.closeClient(conn)
logging.debug("start checking partition %u from %s to %s",
partition, dump(min_tid), dump(max_tid))
self.min_tid = self.next_tid = min_tid
self.max_tid = max_tid
self.next_oid = None
self.partition = partition
self.source = source
def start():
if app.tm.isLockedTid(max_tid):
app.tm.read_queue.queueEvent(start)
return
args = partition, CHECK_COUNT, min_tid, max_tid
p = Packets.AskCheckTIDRange(*args)
for conn, identified in self.conn_dict.items():
self.conn_dict[conn] = conn.ask(p) if identified else None
self.conn_dict[None] = app.dm.checkTIDRange(*args)
start()
def connected(self, node):
conn = node.getConnection()
if self.conn_dict.get(conn, self) is None:
self.conn_dict[conn] = conn.ask(Packets.AskCheckTIDRange(
self.partition, CHECK_COUNT, self.next_tid, self.max_tid))
def connectionLost(self, conn):
try:
del self.conn_dict[conn]
except KeyError:
return
if self.source is not None and self.source.getConnection() is conn:
del self.source
elif len(self.conn_dict) > 1:
logging.warning("node lost but keep up checking partition %u",
self.partition)
return
logging.warning("check of partition %u aborted", self.partition)
self._nextPartition()
def _nextRange(self):
if self.next_oid:
args = self.partition, CHECK_COUNT, self.next_tid, self.max_tid, \
self.next_oid
p = Packets.AskCheckSerialRange(*args)
check = self.app.dm.checkSerialRange
else:
args = self.partition, CHECK_COUNT, self.next_tid, self.max_tid
p = Packets.AskCheckTIDRange(*args)
check = self.app.dm.checkTIDRange
for conn in self.conn_dict.keys():
self.conn_dict[conn] = check(*args) if conn is None else conn.ask(p)
def checkRange(self, conn, *args):
if self.conn_dict.get(conn, self) != conn.getPeerId():
# Ignore answers to old requests,
# because we did nothing to cancel them.
logging.info("ignored AnswerCheck*Range%r", args)
return
self.conn_dict[conn] = args
answer_set = set(self.conn_dict.itervalues())
if len(answer_set) > 1:
for answer in answer_set:
if type(answer) is not tuple:
return
# TODO: Automatically tell corrupted cells to fix their data
# if we know a good source.
# For the moment, tell master to put them in CORRUPTED state
# and keep up checking if useful.
uuid = self.app.uuid
args = None if self.source is None else self.conn_dict[
None if self.source.getUUID() == uuid
else self.source.getConnection()]
uuid_list = []
for conn, answer in self.conn_dict.items():
if answer != args:
del self.conn_dict[conn]
if conn is None:
uuid_list.append(uuid)
else:
uuid_list.append(conn.getUUID())
self.app.closeClient(conn)
p = Packets.NotifyPartitionCorrupted(self.partition, uuid_list)
self.app.master_conn.send(p)
if len(self.conn_dict) <= 1:
logging.warning("check of partition %u aborted", self.partition)
self.queue.clear()
self._nextPartition()
return
try:
count, _, max_tid = args
except ValueError: # AnswerCheckSerialRange
count, _, self.next_tid, _, max_oid = args
if count < CHECK_COUNT:
logging.debug("partition %u checked from %s to %s",
self.partition, dump(self.min_tid), dump(self.max_tid))
self._nextPartition()
return
self.next_oid = add64(max_oid, 1)
else: # AnswerCheckTIDRange
if count < CHECK_COUNT:
self.next_tid = self.min_tid
self.next_oid = ZERO_OID
else:
self.next_tid = add64(max_tid, 1)
self._nextRange()
| gpl-2.0 | -8,196,660,110,818,209,000 | 42.854369 | 80 | 0.538189 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.