repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ijzer/cwbot-ndy | kol/request/UseSkillRequest.py | 1 | 1026 | from GenericRequest import GenericRequest
from kol.database import SkillDatabase
from kol.manager import PatternManager
class UseSkillRequest(GenericRequest):
def __init__(self, session, skillId, numTimes=1, targetPlayer=None):
super(UseSkillRequest, self).__init__(session)
self.get = True
self.url = session.serverURL + "runskillz.php"
self.requestData["pwd"] = session.pwd
self.requestData["action"] = "Skillz"
self.requestData["whichskill"] = skillId
self.requestData["ajax"] = 1
self.requestData["quantity"] = numTimes
if targetPlayer != None:
self.requestData["targetplayer"] = targetPlayer
else:
self.requestData["targetplayer"] = session.userId
def parseResponse(self):
resultsPattern = PatternManager.getOrCompilePattern('results')
match = resultsPattern.search(self.responseText)
if match:
results = match.group(1)
self.responseData["results"] = results
| bsd-3-clause | 4,853,984,224,395,065,000 | 40.04 | 72 | 0.662768 | false |
simokivimaki/gtk | gtk/compose-parse.py | 1 | 34024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# compose-parse.py, version 1.3
#
# multifunction script that helps manage the compose sequence table in GTK+ (gtk/gtkimcontextsimple.c)
# the script produces statistics and information about the whole process, run with --help for more.
#
# You may need to switch your python installation to utf-8, if you get 'ascii' codec errors.
#
# Complain to Simos Xenitellis ([email protected], http://simos.info/blog) for this craft.
from re import findall, match, split, sub
from string import atoi
from unicodedata import normalize
from urllib import urlretrieve
from os.path import isfile, getsize
from copy import copy
import sys
import getopt
# We grab files off the web, left and right.
URL_COMPOSE = 'http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre'
URL_KEYSYMSTXT = "http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt"
URL_GDKKEYSYMSH = "http://git.gnome.org/browse/gtk%2B/plain/gdk/gdkkeysyms.h"
URL_UNICODEDATATXT = 'http://www.unicode.org/Public/5.2.0/ucd/UnicodeData.txt'
FILENAME_COMPOSE_SUPPLEMENTARY = 'gtk-compose-lookaside.txt'
# We currently support keysyms of size 2; once upstream xorg gets sorted,
# we might produce some tables with size 2 and some with size 4.
SIZEOFINT = 2
# Current max compose sequence length; in case it gets increased.
WIDTHOFCOMPOSETABLE = 5
keysymdatabase = {}
keysymunicodedatabase = {}
unicodedatabase = {}
headerfile_start = """/* GTK - The GIMP Tool Kit
* Copyright (C) 2007, 2008 GNOME Foundation
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/*
* File auto-generated from script found at http://bugzilla.gnome.org/show_bug.cgi?id=321896
* using the input files
* Input : http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre
* Input : http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt
* Input : http://www.unicode.org/Public/UNIDATA/UnicodeData.txt
*
* This table is optimised for space and requires special handling to access the content.
* This table is used solely by http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimple.c
*
* The resulting file is placed at http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimpleseqs.h
* This file is described in bug report http://bugzilla.gnome.org/show_bug.cgi?id=321896
*/
/*
* Modified by the GTK+ Team and others 2007, 2008. See the AUTHORS
* file for a list of people on the GTK+ Team. See the ChangeLog
* files for a list of changes. These files are distributed with
* GTK+ at ftp://ftp.gtk.org/pub/gtk/.
*/
#ifndef __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
#define __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
/* === These are the original comments of the file; we keep for historical purposes ===
*
* The following table was generated from the X compose tables include with
* XFree86 4.0 using a set of Perl scripts. Contact Owen Taylor <[email protected]>
* to obtain the relevant perl scripts.
*
* The following compose letter letter sequences confliced
* Dstroke/dstroke and ETH/eth; resolved to Dstroke (Croation, Vietnamese, Lappish), over
* ETH (Icelandic, Faroese, old English, IPA) [ D- -D d- -d ]
* Amacron/amacron and ordfeminine; resolved to ordfeminine [ _A A_ a_ _a ]
* Amacron/amacron and Atilde/atilde; resolved to atilde [ -A A- a- -a ]
* Omacron/Omacron and masculine; resolved to masculine [ _O O_ o_ _o ]
* Omacron/omacron and Otilde/atilde; resolved to otilde [ -O O- o- -o ]
*
* [ Amacron and Omacron are in Latin-4 (Baltic). ordfeminine and masculine are used for
* spanish. atilde and otilde are used at least for Portuguese ]
*
* at and Aring; resolved to Aring [ AA ]
* guillemotleft and caron; resolved to guillemotleft [ << ]
* ogonek and cedilla; resolved to cedilla [ ,, ]
*
* This probably should be resolved by first checking an additional set of compose tables
* that depend on the locale or selected input method.
*/
static const guint16 gtk_compose_seqs_compact[] = {"""
headerfile_end = """};
#endif /* __GTK_IM_CONTEXT_SIMPLE_SEQS_H__ */
"""
def stringtohex(str): return atoi(str, 16)
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def uniq(*args) :
""" Performs a uniq operation on a list or lists """
theInputList = []
for theList in args:
theInputList += theList
theFinalList = []
for elem in theInputList:
if elem not in theFinalList:
theFinalList.append(elem)
return theFinalList
def all_permutations(seq):
""" Borrowed from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178 """
""" Produces all permutations of the items of a list """
if len(seq) <=1:
yield seq
else:
for perm in all_permutations(seq[1:]):
for i in range(len(perm)+1):
#nb str[0:1] works in both string and list contexts
yield perm[:i] + seq[0:1] + perm[i:]
def usage():
print """compose-parse available parameters:
-h, --help this craft
-s, --statistics show overall statistics (both algorithmic, non-algorithmic)
-a, --algorithmic show sequences saved with algorithmic optimisation
-g, --gtk show entries that go to GTK+
-u, --unicodedatatxt show compose sequences derived from UnicodeData.txt (from unicode.org)
-v, --verbose show verbose output
-p, --plane1 show plane1 compose sequences
-n, --numeric when used with --gtk, create file with numeric values only
-e, --gtk-expanded when used with --gtk, create file that repeats first column; not usable in GTK+
Default is to show statistics.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "pvgashune", ["help", "algorithmic", "statistics", "unicodedatatxt",
"stats", "gtk", "verbose", "plane1", "numeric", "gtk-expanded"])
except:
usage()
sys.exit(2)
opt_statistics = False
opt_algorithmic = False
opt_gtk = False
opt_unicodedatatxt = False
opt_verbose = False
opt_plane1 = False
opt_numeric = False
opt_gtkexpanded = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-s", "--statistics"):
opt_statistics = True
if o in ("-a", "--algorithmic"):
opt_algorithmic = True
if o in ("-g", "--gtk"):
opt_gtk = True
if o in ("-u", "--unicodedatatxt"):
opt_unicodedatatxt = True
if o in ("-v", "--verbose"):
opt_verbose = True
if o in ("-p", "--plane1"):
opt_plane1 = True
if o in ("-n", "--numeric"):
opt_numeric = True
if o in ("-e", "--gtk-expanded"):
opt_gtkexpanded = True
if not opt_algorithmic and not opt_gtk and not opt_unicodedatatxt:
opt_statistics = True
def download_hook(blocks_transferred, block_size, file_size):
""" A download hook to provide some feedback when downloading """
if blocks_transferred == 0:
if file_size > 0:
if opt_verbose:
print "Downloading", file_size, "bytes: ",
else:
if opt_verbose:
print "Downloading: ",
sys.stdout.write('#')
sys.stdout.flush()
def download_file(url):
""" Downloads a file provided a URL. Returns the filename. """
""" Borks on failure """
localfilename = url.split('/')[-1]
if not isfile(localfilename) or getsize(localfilename) <= 0:
if opt_verbose:
print "Downloading ", url, "..."
try:
urlretrieve(url, localfilename, download_hook)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
print " done."
else:
if opt_verbose:
print "Using cached file for ", url
return localfilename
def process_gdkkeysymsh():
""" Opens the gdkkeysyms.h file from GTK+/gdk/gdkkeysyms.h """
""" Fills up keysymdb with contents """
filename_gdkkeysymsh = download_file(URL_GDKKEYSYMSH)
try:
gdkkeysymsh = open(filename_gdkkeysymsh, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the gdkkeysyms.h file and place contents in keysymdb """
linenum_gdkkeysymsh = 0
keysymdb = {}
for line in gdkkeysymsh.readlines():
linenum_gdkkeysymsh += 1
line = line.strip()
if line == "" or not match('^#define GDK_KEY_', line):
continue
components = split('\s+', line)
if len(components) < 3:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting 3 items in the line"
sys.exit(-1)
if not match('^GDK_KEY_', components[1]):
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a keysym starting with GDK_KEY_"
sys.exit(-1)
if match('^0x[0-9a-fA-F]+$', components[2]):
unival = long(components[2][2:], 16)
if unival == 0:
continue
keysymdb[components[1][4:]] = unival
else:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a hexadecimal number at the end of the line"
sys.exit(-1)
gdkkeysymsh.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is^Wwas preferential treatment for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is^was preferential treatment for Greek """
#keysymdb['combining_tilde'] = 0x342
""" Fixing VoidSymbol """
keysymdb['VoidSymbol'] = 0xFFFF
return keysymdb
def process_keysymstxt():
""" Grabs and opens the keysyms.txt file that Markus Kuhn maintains """
""" This file keeps a record between keysyms <-> unicode chars """
filename_keysymstxt = download_file(URL_KEYSYMSTXT)
try:
keysymstxt = open(filename_keysymstxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the keysyms.txt file and place content in keysymdb """
linenum_keysymstxt = 0
keysymdb = {}
for line in keysymstxt.readlines():
linenum_keysymstxt += 1
line = line.strip()
if line == "" or match('^#', line):
continue
components = split('\s+', line)
if len(components) < 5:
print "Invalid line %(linenum)d in %(filename)s: %(line)s'"\
% {'linenum': linenum_keysymstxt, 'filename': filename_keysymstxt, 'line': line}
print "Was expecting 5 items in the line"
sys.exit(-1)
if match('^U[0-9a-fA-F]+$', components[1]):
unival = long(components[1][1:], 16)
if unival == 0:
continue
keysymdb[components[4]] = unival
keysymstxt.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is preferential treatment for Greek """
""" => we get more savings if used for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is preferential treatment for Greek """
# keysymdb['combining_tilde'] = 0x342
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['Oslash'] = 0x0d8
""" This is for a missing (recently added) keysym """
keysymdb['dead_psili'] = 0x313
""" This is for a missing (recently added) keysym """
keysymdb['dead_dasia'] = 0x314
""" Allows to import Multi_key sequences """
keysymdb['Multi_key'] = 0xff20
keysymdb['zerosubscript'] = 0x2080
keysymdb['onesubscript'] = 0x2081
keysymdb['twosubscript'] = 0x2082
keysymdb['threesubscript'] = 0x2083
keysymdb['foursubscript'] = 0x2084
keysymdb['fivesubscript'] = 0x2085
keysymdb['sixsubscript'] = 0x2086
keysymdb['sevensubscript'] = 0x2087
keysymdb['eightsubscript'] = 0x2088
keysymdb['ninesubscript'] = 0x2089
keysymdb['dead_doublegrave'] = 0x030F
keysymdb['dead_invertedbreve'] = 0x0311
return keysymdb
def keysymvalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymdatabase.has_key(keysym):
return keysymdatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymvalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
#return -1
sys.exit(-1)
def keysymunicodevalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymunicodedatabase.has_key(keysym):
return keysymunicodedatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymunicodevalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
sys.exit(-1)
def rename_combining(seq):
filtered_sequence = []
for ks in seq:
if findall('^combining_', ks):
ks = sub('^combining_', 'dead_', ks)
if ks == 'dead_double_grave':
ks = 'dead_doublegrave'
if ks == 'dead_inverted_breve':
ks = 'dead_invertedbreve'
filtered_sequence.append(ks)
return filtered_sequence
keysymunicodedatabase = process_keysymstxt()
keysymdatabase = process_gdkkeysymsh()
""" Grab and open the compose file from upstream """
filename_compose = download_file(URL_COMPOSE)
try:
composefile = open(filename_compose, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Look if there is a lookaside (supplementary) compose file in the current
directory, and if so, open, then merge with upstream Compose file.
"""
xorg_compose_sequences_raw = []
for seq in composefile.readlines():
xorg_compose_sequences_raw.append(seq)
try:
composefile_lookaside = open(FILENAME_COMPOSE_SUPPLEMENTARY, 'r')
for seq in composefile_lookaside.readlines():
xorg_compose_sequences_raw.append(seq)
except IOError, (errno, strerror):
if opt_verbose:
print "I/O error(%s): %s" % (errno, strerror)
print "Did not find lookaside compose file. Continuing..."
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the compose file in xorg_compose_sequences"""
xorg_compose_sequences = []
xorg_compose_sequences_algorithmic = []
linenum_compose = 0
comment_nest_depth = 0
for line in xorg_compose_sequences_raw:
linenum_compose += 1
line = line.strip()
if match("^XCOMM", line) or match("^#", line):
continue
line = sub(r"\/\*([^\*]*|[\*][^/])\*\/", "", line)
comment_start = line.find("/*")
if comment_start >= 0:
if comment_nest_depth == 0:
line = line[:comment_start]
else:
line = ""
comment_nest_depth += 1
else:
comment_end = line.find("*/")
if comment_end >= 0:
comment_nest_depth -= 1
if comment_nest_depth < 0:
print "Invalid comment %(linenum_compose)d in %(filename)s: \
Closing '*/' without opening '/*'" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
if comment_nest_depth > 0:
line = ""
else:
line = line[comment_end + 2:]
if line is "":
continue
#line = line[:-1]
components = split(':', line)
if len(components) != 2:
print "Invalid line %(linenum_compose)d in %(filename)s: No sequence\
/value pair found" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
(seq, val ) = split(':', line)
seq = seq.strip()
val = val.strip()
raw_sequence = findall('\w+', seq)
values = split('\s+', val)
unichar_temp = split('"', values[0])
unichar = unichar_temp[1]
if len(values) == 1:
continue
codepointstr = values[1]
if values[1] == '#':
# No codepoints that are >1 characters yet.
continue
if raw_sequence[0][0] == 'U' and match('[0-9a-fA-F]+$', raw_sequence[0][1:]):
raw_sequence[0] = '0x' + raw_sequence[0][1:]
if match('^U[0-9a-fA-F]+$', codepointstr):
codepoint = long(codepointstr[1:], 16)
elif keysymunicodedatabase.has_key(codepointstr):
#if keysymdatabase[codepointstr] != keysymunicodedatabase[codepointstr]:
#print "DIFFERENCE: 0x%(a)X 0x%(b)X" % { "a": keysymdatabase[codepointstr], "b": keysymunicodedatabase[codepointstr]},
#print raw_sequence, codepointstr
codepoint = keysymunicodedatabase[codepointstr]
else:
print
print "Invalid codepoint at line %(linenum_compose)d in %(filename)s:\
%(line)s" % { "linenum_compose": linenum_compose, "filename": filename_compose, "line": line }
exit(-1)
sequence = rename_combining(raw_sequence)
reject_this = False
for i in sequence:
if keysymvalue(i) > 0xFFFF:
reject_this = True
if opt_plane1:
print sequence
break
if keysymvalue(i) < 0:
reject_this = True
break
if reject_this:
continue
if "U0342" in sequence or \
"U0313" in sequence or \
"U0314" in sequence or \
"0x0313" in sequence or \
"0x0342" in sequence or \
"0x0314" in sequence:
continue
if "dead_belowring" in sequence or\
"dead_currency" in sequence or\
"dead_belowcomma" in sequence or\
"dead_belowmacron" in sequence or\
"dead_belowtilde" in sequence or\
"dead_belowbreve" in sequence or\
"dead_belowdiaeresis" in sequence or\
"dead_belowcircumflex" in sequence:
continue
#for i in range(len(sequence)):
# if sequence[i] == "0x0342":
# sequence[i] = "dead_tilde"
if "Multi_key" not in sequence:
""" Ignore for now >0xFFFF keysyms """
if codepoint < 0xFFFF:
original_sequence = copy(sequence)
stats_sequence = copy(sequence)
base = sequence.pop()
basechar = keysymvalue(base, filename_compose, linenum_compose)
if basechar < 0xFFFF:
counter = 1
unisequence = []
not_normalised = True
skipping_this = False
for i in range(0, len(sequence)):
""" If the sequence has dead_tilde and is for Greek, we don't do algorithmically
because of lack of dead_perispomeni (i.e. conflict)
"""
bc = basechar
"""if sequence[-1] == "dead_tilde" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_horn" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_ogonek" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_psili":
sequence[i] = "dead_horn"
if sequence[-1] == "dead_dasia":
sequence[-1] = "dead_ogonek"
"""
unisequence.append(unichr(keysymunicodevalue(sequence.pop(), filename_compose, linenum_compose)))
if skipping_this:
unisequence = []
for perm in all_permutations(unisequence):
# print counter, original_sequence, unichr(basechar) + "".join(perm)
# print counter, map(unichr, perm)
normalized = normalize('NFC', unichr(basechar) + "".join(perm))
if len(normalized) == 1:
# print 'Base: %(base)s [%(basechar)s], produces [%(unichar)s] (0x%(codepoint)04X)' \
# % { "base": base, "basechar": unichr(basechar), "unichar": unichar, "codepoint": codepoint },
# print "Normalized: [%(normalized)s] SUCCESS %(c)d" % { "normalized": normalized, "c": counter }
stats_sequence_data = map(keysymunicodevalue, stats_sequence)
stats_sequence_data.append(normalized)
xorg_compose_sequences_algorithmic.append(stats_sequence_data)
not_normalised = False
break;
counter += 1
if not_normalised:
original_sequence.append(codepoint)
xorg_compose_sequences.append(original_sequence)
""" print xorg_compose_sequences[-1] """
else:
print "Error in base char !?!"
exit(-2)
else:
print "OVER", sequence
exit(-1)
else:
sequence.append(codepoint)
xorg_compose_sequences.append(sequence)
""" print xorg_compose_sequences[-1] """
def sequence_cmp(x, y):
if keysymvalue(x[0]) > keysymvalue(y[0]):
return 1
elif keysymvalue(x[0]) < keysymvalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymvalue(x[1]) > keysymvalue(y[1]):
return 1
elif keysymvalue(x[1]) < keysymvalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymvalue(x[2]) > keysymvalue(y[2]):
return 1
elif keysymvalue(x[2]) < keysymvalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymvalue(x[3]) > keysymvalue(y[3]):
return 1
elif keysymvalue(x[3]) < keysymvalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymvalue(x[4]) > keysymvalue(y[4]):
return 1
elif keysymvalue(x[4]) < keysymvalue(y[4]):
return -1
else:
return 0
def sequence_unicode_cmp(x, y):
if keysymunicodevalue(x[0]) > keysymunicodevalue(y[0]):
return 1
elif keysymunicodevalue(x[0]) < keysymunicodevalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymunicodevalue(x[1]) > keysymunicodevalue(y[1]):
return 1
elif keysymunicodevalue(x[1]) < keysymunicodevalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymunicodevalue(x[2]) > keysymunicodevalue(y[2]):
return 1
elif keysymunicodevalue(x[2]) < keysymunicodevalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymunicodevalue(x[3]) > keysymunicodevalue(y[3]):
return 1
elif keysymunicodevalue(x[3]) < keysymunicodevalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymunicodevalue(x[4]) > keysymunicodevalue(y[4]):
return 1
elif keysymunicodevalue(x[4]) < keysymunicodevalue(y[4]):
return -1
else:
return 0
def sequence_algorithmic_cmp(x, y):
if len(x) < len(y):
return -1
elif len(x) > len(y):
return 1
else:
for i in range(len(x)):
if x[i] < y[i]:
return -1
elif x[i] > y[i]:
return 1
return 0
xorg_compose_sequences.sort(sequence_cmp)
xorg_compose_sequences_uniqued = []
first_time = True
item = None
for next_item in xorg_compose_sequences:
if first_time:
first_time = False
item = next_item
if sequence_unicode_cmp(item, next_item) != 0:
xorg_compose_sequences_uniqued.append(item)
item = next_item
xorg_compose_sequences = copy(xorg_compose_sequences_uniqued)
counter_multikey = 0
for item in xorg_compose_sequences:
if findall('Multi_key', "".join(item[:-1])) != []:
counter_multikey += 1
xorg_compose_sequences_algorithmic.sort(sequence_algorithmic_cmp)
xorg_compose_sequences_algorithmic_uniqued = uniq(xorg_compose_sequences_algorithmic)
firstitem = ""
num_first_keysyms = 0
zeroes = 0
num_entries = 0
num_algorithmic_greek = 0
for sequence in xorg_compose_sequences:
if keysymvalue(firstitem) != keysymvalue(sequence[0]):
firstitem = sequence[0]
num_first_keysyms += 1
zeroes += 6 - len(sequence) + 1
num_entries += 1
for sequence in xorg_compose_sequences_algorithmic_uniqued:
ch = ord(sequence[-1:][0])
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
num_algorithmic_greek += 1
if opt_algorithmic:
for sequence in xorg_compose_sequences_algorithmic_uniqued:
letter = "".join(sequence[-1:])
print '0x%(cp)04X, %(uni)c, seq: [ <0x%(base)04X>,' % { 'cp': ord(unicode(letter)), 'uni': letter, 'base': sequence[-2] },
for elem in sequence[:-2]:
print "<0x%(keysym)04X>," % { 'keysym': elem },
""" Yeah, verified... We just want to keep the output similar to -u, so we can compare/sort easily """
print "], recomposed as", letter, "verified"
def num_of_keysyms(seq):
return len(seq) - 1
def convert_UnotationToHex(arg):
if isinstance(arg, str):
if match('^U[0-9A-F][0-9A-F][0-9A-F][0-9A-F]$', arg):
return sub('^U', '0x', arg)
return arg
def addprefix_GDK(arg):
if match('^0x', arg):
return '%(arg)s, ' % { 'arg': arg }
else:
return 'GDK_KEY_%(arg)s, ' % { 'arg': arg }
if opt_gtk:
first_keysym = ""
sequence = []
compose_table = []
ct_second_part = []
ct_sequence_width = 2
start_offset = num_first_keysyms * (WIDTHOFCOMPOSETABLE+1)
we_finished = False
counter = 0
sequence_iterator = iter(xorg_compose_sequences)
sequence = sequence_iterator.next()
while True:
first_keysym = sequence[0] # Set the first keysym
compose_table.append([first_keysym, 0, 0, 0, 0, 0])
while sequence[0] == first_keysym:
compose_table[counter][num_of_keysyms(sequence)-1] += 1
try:
sequence = sequence_iterator.next()
except StopIteration:
we_finished = True
break
if we_finished:
break
counter += 1
ct_index = start_offset
for line_num in range(len(compose_table)):
for i in range(WIDTHOFCOMPOSETABLE):
occurences = compose_table[line_num][i+1]
compose_table[line_num][i+1] = ct_index
ct_index += occurences * (i+2)
for sequence in xorg_compose_sequences:
ct_second_part.append(map(convert_UnotationToHex, sequence))
print headerfile_start
for i in compose_table:
if opt_gtkexpanded:
print "0x%(ks)04X," % { "ks": keysymvalue(i[0]) },
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i[1:])) }
elif not match('^0x', i[0]):
print 'GDK_KEY_%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
else:
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
for i in ct_second_part:
if opt_numeric:
for ks in i[1:][:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
for ks in i[:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
elif opt_gtkexpanded:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1])), 'cp':i[-1] }
else:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1][1:])), 'cp':i[-1] }
print headerfile_end
def redecompose(codepoint):
(name, decomposition, combiningclass) = unicodedatabase[codepoint]
if decomposition[0] == '' or decomposition[0] == '0':
return [codepoint]
if match('<\w+>', decomposition[0]):
numdecomposition = map(stringtohex, decomposition[1:])
return map(redecompose, numdecomposition)
numdecomposition = map(stringtohex, decomposition)
return map(redecompose, numdecomposition)
def process_unicodedata_file(verbose = False):
""" Grab from wget http://www.unicode.org/Public/UNIDATA/UnicodeData.txt """
filename_unicodedatatxt = download_file(URL_UNICODEDATATXT)
try:
unicodedatatxt = open(filename_unicodedatatxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
for line in unicodedatatxt.readlines():
if line[0] == "" or line[0] == '#':
continue
line = line[:-1]
uniproperties = split(';', line)
codepoint = stringtohex(uniproperties[0])
""" We don't do Plane 1 or CJK blocks. The latter require reading additional files. """
if codepoint > 0xFFFF or (codepoint >= 0x4E00 and codepoint <= 0x9FFF) or (codepoint >= 0xF900 and codepoint <= 0xFAFF):
continue
name = uniproperties[1]
category = uniproperties[2]
combiningclass = uniproperties[3]
decomposition = uniproperties[5]
unicodedatabase[codepoint] = [name, split('\s+', decomposition), combiningclass]
counter_combinations = 0
counter_combinations_greek = 0
counter_entries = 0
counter_entries_greek = 0
for item in unicodedatabase.keys():
(name, decomposition, combiningclass) = unicodedatabase[item]
if decomposition[0] == '':
continue
print name, "is empty"
elif match('<\w+>', decomposition[0]):
continue
print name, "has weird", decomposition[0]
else:
sequence = map(stringtohex, decomposition)
chrsequence = map(unichr, sequence)
normalized = normalize('NFC', "".join(chrsequence))
""" print name, sequence, "Combining: ", "".join(chrsequence), normalized, len(normalized), """
decomposedsequence = []
for subseq in map(redecompose, sequence):
for seqitem in subseq:
if isinstance(seqitem, list):
for i in seqitem:
if isinstance(i, list):
for j in i:
decomposedsequence.append(j)
else:
decomposedsequence.append(i)
else:
decomposedsequence.append(seqitem)
recomposedchar = normalize('NFC', "".join(map(unichr, decomposedsequence)))
if len(recomposedchar) == 1 and len(decomposedsequence) > 1:
counter_entries += 1
counter_combinations += factorial(len(decomposedsequence)-1)
ch = item
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
counter_entries_greek += 1
counter_combinations_greek += factorial(len(decomposedsequence)-1)
if verbose:
print "0x%(cp)04X, %(uni)c, seq:" % { 'cp':item, 'uni':unichr(item) },
print "[",
for elem in decomposedsequence:
print '<0x%(hex)04X>,' % { 'hex': elem },
print "], recomposed as", recomposedchar,
if unichr(item) == recomposedchar:
print "verified"
if verbose == False:
print "Unicode statistics from UnicodeData.txt"
print "Number of entries that can be algorithmically produced :", counter_entries
print " of which are for Greek :", counter_entries_greek
print "Number of compose sequence combinations requiring :", counter_combinations
print " of which are for Greek :", counter_combinations_greek
print "Note: We do not include partial compositions, "
print "thus the slight discrepancy in the figures"
print
if opt_unicodedatatxt:
process_unicodedata_file(True)
if opt_statistics:
print
print "Total number of compose sequences (from file) :", len(xorg_compose_sequences) + len(xorg_compose_sequences_algorithmic)
print " of which can be expressed algorithmically :", len(xorg_compose_sequences_algorithmic)
print " of which cannot be expressed algorithmically :", len(xorg_compose_sequences)
print " of which have Multi_key :", counter_multikey
print
print "Algorithmic (stats for Xorg Compose file)"
print "Number of sequences off due to algo from file (len(array)) :", len(xorg_compose_sequences_algorithmic)
print "Number of sequences off due to algo (uniq(sort(array))) :", len(xorg_compose_sequences_algorithmic_uniqued)
print " of which are for Greek :", num_algorithmic_greek
print
process_unicodedata_file()
print "Not algorithmic (stats from Xorg Compose file)"
print "Number of sequences :", len(xorg_compose_sequences)
print "Flat array looks like :", len(xorg_compose_sequences), "rows of 6 integers (2 bytes per int, or 12 bytes per row)"
print "Flat array would have taken up (in bytes) :", num_entries * 2 * 6, "bytes from the GTK+ library"
print "Number of items in flat array :", len(xorg_compose_sequences) * 6
print " of which are zeroes :", zeroes, "or ", (100 * zeroes) / (len(xorg_compose_sequences) * 6), " per cent"
print "Number of different first items :", num_first_keysyms
print "Number of max bytes (if using flat array) :", num_entries * 2 * 6
print "Number of savings :", zeroes * 2 - num_first_keysyms * 2 * 5
print
print "Memory needs if both algorithmic+optimised table in latest Xorg compose file"
print " :", num_entries * 2 * 6 - zeroes * 2 + num_first_keysyms * 2 * 5
print
print "Existing (old) implementation in GTK+"
print "Number of sequences in old gtkimcontextsimple.c :", 691
print "The existing (old) implementation in GTK+ takes up :", 691 * 2 * 12, "bytes"
| lgpl-2.1 | 2,910,440,404,792,069,000 | 34.258031 | 159 | 0.649688 | false |
tommyip/zulip | zerver/tests/test_outgoing_webhook_interfaces.py | 1 | 6782 | # -*- coding: utf-8 -*-
from typing import cast, Any, Dict
import mock
import json
import requests
from zerver.lib.outgoing_webhook import (
get_service_interface_class,
process_success_response,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.topic import TOPIC_NAME
from zerver.models import get_realm, get_user, SLACK_INTERFACE
class TestGenericOutgoingWebhookService(ZulipTestCase):
def setUp(self) -> None:
self.event = {
u'command': '@**test**',
u'message': {
'content': '@**test**',
},
u'trigger': 'mention',
}
self.bot_user = get_user("[email protected]", get_realm("zulip"))
service_class = get_service_interface_class('whatever') # GenericOutgoingWebhookService
self.handler = service_class(service_name='test-service',
token='abcdef',
user_profile=self.bot_user)
def test_process_success_response(self) -> None:
class Stub:
def __init__(self, text: str) -> None:
self.text = text
def make_response(text: str) -> requests.Response:
return cast(requests.Response, Stub(text=text))
event = dict(
user_profile_id=99,
message=dict(type='private')
)
service_handler = self.handler
response = make_response(text=json.dumps(dict(content='whatever')))
with mock.patch('zerver.lib.outgoing_webhook.send_response_message') as m:
process_success_response(
event=event,
service_handler=service_handler,
response=response,
)
self.assertTrue(m.called)
response = make_response(text='unparsable text')
with mock.patch('zerver.lib.outgoing_webhook.fail_with_message') as m:
process_success_response(
event=event,
service_handler=service_handler,
response=response
)
self.assertTrue(m.called)
def test_build_bot_request(self) -> None:
request_data = self.handler.build_bot_request(self.event)
request_data = json.loads(request_data)
self.assertEqual(request_data['data'], "@**test**")
self.assertEqual(request_data['token'], "abcdef")
self.assertEqual(request_data['message'], self.event['message'])
def test_process_success(self) -> None:
response = dict(response_not_required=True) # type: Dict[str, Any]
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, None)
response = dict(response_string='test_content')
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, dict(content='test_content'))
response = dict(
content='test_content',
widget_content='test_widget_content',
red_herring='whatever',
)
success_response = self.handler.process_success(response, self.event)
expected_response = dict(
content='test_content',
widget_content='test_widget_content',
)
self.assertEqual(success_response, expected_response)
response = dict()
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, None)
class TestSlackOutgoingWebhookService(ZulipTestCase):
def setUp(self) -> None:
self.stream_message_event = {
u'command': '@**test**',
u'user_profile_id': 12,
u'service_name': 'test-service',
u'trigger': 'mention',
u'message': {
'content': 'test_content',
'type': 'stream',
'sender_realm_str': 'zulip',
'sender_email': '[email protected]',
'stream_id': '123',
'display_recipient': 'integrations',
'timestamp': 123456,
'sender_id': 21,
'sender_full_name': 'Sample User',
}
}
self.private_message_event = {
u'user_profile_id': 24,
u'service_name': 'test-service',
u'command': 'test content',
u'trigger': 'private_message',
u'message': {
'sender_id': 3,
'sender_realm_str': 'zulip',
'timestamp': 1529821610,
'sender_email': '[email protected]',
'type': 'private',
'sender_realm_id': 1,
'id': 219,
TOPIC_NAME: 'test',
'content': 'test content',
}
}
service_class = get_service_interface_class(SLACK_INTERFACE)
self.handler = service_class(token="abcdef",
user_profile=None,
service_name='test-service')
def test_build_bot_request_stream_message(self) -> None:
request_data = self.handler.build_bot_request(self.stream_message_event)
self.assertEqual(request_data[0][1], "abcdef") # token
self.assertEqual(request_data[1][1], "zulip") # team_id
self.assertEqual(request_data[2][1], "zulip.com") # team_domain
self.assertEqual(request_data[3][1], "123") # channel_id
self.assertEqual(request_data[4][1], "integrations") # channel_name
self.assertEqual(request_data[5][1], 123456) # timestamp
self.assertEqual(request_data[6][1], 21) # user_id
self.assertEqual(request_data[7][1], "Sample User") # user_name
self.assertEqual(request_data[8][1], "@**test**") # text
self.assertEqual(request_data[9][1], "mention") # trigger_word
self.assertEqual(request_data[10][1], 12) # user_profile_id
@mock.patch('zerver.lib.outgoing_webhook.fail_with_message')
def test_build_bot_request_private_message(self, mock_fail_with_message: mock.Mock) -> None:
request_data = self.handler.build_bot_request(self.private_message_event)
self.assertIsNone(request_data)
self.assertTrue(mock_fail_with_message.called)
def test_process_success(self) -> None:
response = dict(response_not_required=True) # type: Dict[str, Any]
success_response = self.handler.process_success(response, self.stream_message_event)
self.assertEqual(success_response, None)
response = dict(text='test_content')
success_response = self.handler.process_success(response, self.stream_message_event)
self.assertEqual(success_response, dict(content='test_content'))
| apache-2.0 | 8,002,084,487,423,589,000 | 38.660819 | 96 | 0.583899 | false |
wujuguang/motor | test/tornado_tests/test_motor_transaction.py | 1 | 18586 | # Copyright 2018-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import collections
import os
import re
from bson import json_util
from bson.json_util import JSONOptions
from pymongo.read_concern import ReadConcern
from pymongo.results import (BulkWriteResult,
InsertManyResult,
InsertOneResult,
UpdateResult, DeleteResult)
from motor.motor_tornado import (MotorCommandCursor,
MotorCursor,
MotorLatentCommandCursor)
from test.utils import TestListener
from test.version import Version
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import unittest
from pymongo import (client_session,
operations,
read_preferences,
ReadPreference,
WriteConcern)
from tornado import gen
from pymongo.errors import OperationFailure, PyMongoError
from tornado.testing import gen_test
from test import SkipTest
from test.test_environment import env
from test.tornado_tests import MotorTest
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../json/transactions')
_TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG')
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def camel_to_upper_camel(camel):
return camel[0].upper() + camel[1:]
def camel_to_snake_args(arguments):
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
arguments[c2s] = arguments.pop(arg_name)
return arguments
def parse_read_preference(pref):
# Make first letter lowercase to match read_pref's modes.
mode_string = pref.get('mode', 'primary')
mode_string = mode_string[:1].lower() + mode_string[1:]
mode = read_preferences.read_pref_mode_from_name(mode_string)
max_staleness = pref.get('maxStalenessSeconds', -1)
tag_sets = pref.get('tag_sets')
return read_preferences.make_read_preference(
mode, tag_sets=tag_sets, max_staleness=max_staleness)
def parse_opts(opts):
parsed = {}
if 'readPreference' in opts:
parsed['read_preference'] = parse_read_preference(
opts.pop('readPreference'))
if 'writeConcern' in opts:
parsed['write_concern'] = WriteConcern(**opts.pop('writeConcern'))
if 'readConcern' in opts:
parsed['read_concern'] = ReadConcern(**opts.pop('readConcern'))
return parsed
def parse_args(args, sessions):
parsed = parse_opts(args)
if 'session' in args:
assert sessions is not None
parsed['session'] = sessions[args.pop('session')]
return parsed
class MotorTransactionTest(MotorTest):
@classmethod
def setUpClass(cls):
super(MotorTransactionTest, cls).setUpClass()
if not env.sessions_enabled:
raise SkipTest("Sessions not supported")
if not env.is_replica_set:
raise SkipTest("Requires a replica set")
if env.version < Version(3, 7):
raise SkipTest("Requires MongoDB 3.7+")
def transaction_test_debug(self, msg):
if _TXN_TESTS_DEBUG:
print(msg)
def check_result(self, expected_result, result):
write_results = (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
if isinstance(result, write_results):
for res in expected_result:
prop = camel_to_snake(res)
# SPEC-869: Only BulkWriteResult has upserted_count.
if (prop == "upserted_count"
and not isinstance(result, BulkWriteResult)):
if result.upserted_id is not None:
upserted_count = 1
else:
upserted_count = 0
self.assertEqual(upserted_count, expected_result[res], prop)
elif prop == "inserted_ids":
# BulkWriteResult does not have inserted_ids.
if isinstance(result, BulkWriteResult):
self.assertEqual(len(expected_result[res]),
result.inserted_count)
else:
# InsertManyResult may be compared to [id1] from the
# crud spec or {"0": id1} from the retryable write spec.
ids = expected_result[res]
if isinstance(ids, dict):
ids = [ids[str(i)] for i in range(len(ids))]
self.assertEqual(ids, result.inserted_ids, prop)
elif prop == "upserted_ids":
# Convert indexes from strings to integers.
ids = expected_result[res]
expected_ids = {}
for str_index in ids:
expected_ids[int(str_index)] = ids[str_index]
self.assertEqual(expected_ids, result.upserted_ids, prop)
else:
self.assertEqual(
getattr(result, prop), expected_result[res], prop)
return True
elif isinstance(result, dict):
for k, v in expected_result.items():
self.assertEqual(v, result[k])
else:
self.assertEqual(expected_result, result)
@gen.coroutine
def run_operation(self, sessions, collection, operation):
name = camel_to_snake(operation['name'])
if name == 'run_command':
name = 'command'
self.transaction_test_debug(name)
collection_opts = operation.get('collectionOptions')
if collection_opts:
collection = collection.with_options(**parse_opts(collection_opts))
obj = {
'collection': collection,
'database': collection.database,
'session0': sessions['session0'],
'session1': sessions['session1'],
}[operation['object']]
# Combine arguments with options and handle special cases.
arguments = operation['arguments']
arguments.update(arguments.pop("options", {}))
kwargs = parse_args(arguments, sessions)
for arg_name, arg_value in arguments.items():
c2s = camel_to_snake(arg_name)
if arg_name == "sort":
assert len(arg_value) == 1, 'test can only have 1 sort key'
kwargs[arg_name] = list(arg_value.items())
# Named "key" instead not fieldName.
elif arg_name == "fieldName":
kwargs["key"] = arg_value
# Aggregate uses "batchSize", while find uses batch_size.
elif arg_name == "batchSize" and name == "aggregate":
kwargs["batchSize"] = arg_value
# Requires boolean returnDocument.
elif arg_name == "returnDocument":
kwargs[c2s] = (arg_value == "After")
elif c2s == "requests":
# Parse each request into a bulk write model.
requests = []
for request in arg_value:
bulk_model = camel_to_upper_camel(request["name"])
bulk_class = getattr(operations, bulk_model)
bulk_arguments = camel_to_snake_args(request["arguments"])
requests.append(bulk_class(**bulk_arguments))
kwargs["requests"] = requests
else:
kwargs[c2s] = arg_value
cmd = getattr(obj, name)
result = cmd(**kwargs)
try:
result = gen.convert_yielded(result)
except gen.BadYieldError:
# Not an async method.
pass
else:
result = yield result
cursor_types = MotorCursor, MotorCommandCursor, MotorLatentCommandCursor
if isinstance(result, cursor_types):
result = yield result.to_list(length=None)
raise gen.Return(result)
def check_events(self, test, listener, session_ids):
res = listener.results
if not len(test['expectations']):
return
self.assertEqual(len(res['started']), len(test['expectations']))
for i, expectation in enumerate(test['expectations']):
event_type = next(iter(expectation))
event = res['started'][i]
# The tests substitute 42 for any number other than 0.
if (event.command_name == 'getMore'
and event.command['getMore']):
event.command['getMore'] = 42
elif event.command_name == 'killCursors':
event.command['cursors'] = [42]
# Replace afterClusterTime: 42 with actual afterClusterTime.
expected_cmd = expectation[event_type]['command']
expected_read_concern = expected_cmd.get('readConcern')
if expected_read_concern is not None:
time = expected_read_concern.get('afterClusterTime')
if time == 42:
actual_time = event.command.get(
'readConcern', {}).get('afterClusterTime')
if actual_time is not None:
expected_read_concern['afterClusterTime'] = actual_time
# Replace lsid with a name like "session0" to match test.
if 'lsid' in event.command:
for name, lsid in session_ids.items():
if event.command['lsid'] == lsid:
event.command['lsid'] = name
break
for attr, expected in expectation[event_type].items():
actual = getattr(event, attr)
if isinstance(expected, dict):
for key, val in expected.items():
if val is None:
if key in actual:
self.fail("Unexpected key [%s] in %r" % (
key, actual))
elif key not in actual:
self.fail("Expected key [%s] in %r" % (
key, actual))
else:
self.assertEqual(val, actual[key],
"Key [%s] in %s" % (key, actual))
else:
self.assertEqual(actual, expected)
def expect_error(expected_result):
if isinstance(expected_result, dict):
return set(expected_result.keys()).intersection((
'errorContains', 'errorCodeName', 'errorLabelsContain',
'errorLabelsOmit'))
return False
def end_sessions(sessions):
for s in sessions.values():
# Aborts the transaction if it's open.
s.end_session()
def create_test(scenario_def, test):
@gen_test
def run_scenario(self):
listener = TestListener()
# New client, to avoid interference from pooled sessions.
client = self.motor_rsc(event_listeners=[listener],
**test['clientOptions'])
try:
yield client.admin.command('killAllSessions', [])
except OperationFailure:
# "operation was interrupted" by killing the command's own session.
pass
if test['failPoint']:
yield client.admin.command(test['failPoint'])
database_name = scenario_def['database_name']
collection_name = scenario_def['collection_name']
write_concern_db = client.get_database(
database_name, write_concern=WriteConcern(w='majority'))
write_concern_coll = write_concern_db[collection_name]
yield write_concern_coll.drop()
yield write_concern_db.create_collection(collection_name)
if scenario_def['data']:
# Load data.
yield write_concern_coll.insert_many(scenario_def['data'])
# Create session0 and session1.
sessions = {}
session_ids = {}
for i in range(2):
session_name = 'session%d' % i
opts = camel_to_snake_args(test['sessionOptions'][session_name])
if 'default_transaction_options' in opts:
txn_opts = opts['default_transaction_options']
if 'readConcern' in txn_opts:
read_concern = ReadConcern(**txn_opts['readConcern'])
else:
read_concern = None
if 'writeConcern' in txn_opts:
write_concern = WriteConcern(**txn_opts['writeConcern'])
else:
write_concern = None
if 'readPreference' in txn_opts:
read_pref = parse_read_preference(
txn_opts['readPreference'])
else:
read_pref = None
txn_opts = client_session.TransactionOptions(
read_concern=read_concern,
write_concern=write_concern,
read_preference=read_pref,
)
opts['default_transaction_options'] = txn_opts
s = yield client.start_session(**opts)
sessions[session_name] = s
# Store lsid so we can access it after end_session, in check_events.
session_ids[session_name] = s.session_id
self.addCleanup(end_sessions, sessions)
listener.results.clear()
collection = client[database_name][collection_name]
for op in test['operations']:
expected_result = op.get('result')
if expect_error(expected_result):
with self.assertRaises(PyMongoError,
msg=op.get('name')) as context:
yield self.run_operation(sessions, collection, op.copy())
err = context.exception
if expected_result['errorContains']:
self.assertIn(expected_result['errorContains'].lower(),
str(err).lower())
if expected_result['errorCodeName']:
self.assertEqual(expected_result['errorCodeName'],
err.details.get('codeName'))
for label in expected_result.get('errorLabelsContain', []):
self.assertTrue(
err.has_error_label(label),
"%r should have errorLabel %s" % (err, label))
for label in expected_result.get('errorLabelsOmit', []):
self.assertFalse(
err.has_error_label(label),
"%r should NOT have errorLabel %s" % (err, label))
else:
result = yield self.run_operation(
sessions, collection, op.copy())
if 'result' in op:
self.check_result(expected_result, result)
for s in sessions.values():
yield s.end_session()
self.check_events(test, listener, session_ids)
# Assert final state is expected.
expected = test['outcome'].get('collection')
if expected is not None:
# Read from the primary to ensure causal consistency.
primary_coll = collection.with_options(
read_preference=ReadPreference.PRIMARY)
docs = yield primary_coll.find().to_list(length=None)
self.assertEqual(expected['data'], docs)
return run_scenario
class ScenarioDict(collections.OrderedDict):
"""Dict that returns {} for any unknown key, recursively."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
# Unlike a defaultdict, don't set the key, just return a dict.
return ScenarioDict({})
def copy(self):
return ScenarioDict(self)
def create_tests():
assert os.path.isdir(_TEST_PATH)
for dirpath, _, filenames in os.walk(_TEST_PATH):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
test_type, ext = os.path.splitext(filename)
if ext != '.json':
continue
with open(os.path.join(dirpath, filename)) as scenario_stream:
opts = JSONOptions(document_class=ScenarioDict)
scenario_def = json_util.loads(
scenario_stream.read(), json_options=opts)
# Construct test from scenario.
for test in scenario_def['tests']:
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_"),
str(test['description'].replace(" ", "_")))
new_test = create_test(scenario_def, test)
new_test = env.require(
lambda: not test.get('skipReason'),
test.get('skipReason'),
new_test)
if test_type == 'reads' and test['description'] == 'count':
new_test = env.require(
lambda: False,
"Motor has removed the 'count' helper",
new_test)
if 'secondary' in test_name:
new_test = env.require(
lambda: env.secondaries,
'No secondaries',
new_test)
# In Python 2, case test_name from unicode to str.
new_test.__name__ = str(test_name)
setattr(MotorTransactionTest, new_test.__name__, new_test)
create_tests()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 9,086,862,932,157,388,000 | 36.930612 | 80 | 0.546648 | false |
SEL-Columbia/commcare-hq | corehq/apps/cleanup/management/commands/check_case_integrity.py | 1 | 5062 | from collections import defaultdict
from optparse import make_option
from django.core.management.base import BaseCommand
from casexml.apps.case.cleanup import rebuild_case
from casexml.apps.case.models import CommCareCase
from corehq.elastic import stream_es_query, ES_URLS, ADD_TO_ES_FILTER
import dateutil.parser as dparser
import csv
import logging
from dimagi.utils.chunked import chunked
logger = logging.getLogger(__name__)
def forms_with_cases(domain=None, since=None, chunksize=500):
q = {"filter": {"and": [{"bool": {
"must_not": {
"missing": {
"field": "__retrieved_case_ids",
"existence": True,
"null_value": True}}}}]}}
q["sort"] = [{"domain.exact" : {"order": "asc"}}]
params={"domain.exact": domain} if domain else {}
if since:
q["filter"]["and"][0]["bool"]["must"] = {
"range": {
"received_on": {"from": since.strftime("%Y-%m-%d")}}}
q["filter"]["and"].extend(ADD_TO_ES_FILTER["forms"][:])
return stream_es_query(params=params, q=q, es_url=ES_URLS["forms"],
fields=["__retrieved_case_ids", "domain", "received_on"], chunksize=chunksize)
def case_ids_by_xform_id(xform_ids):
ret = defaultdict(list)
for res in CommCareCase.get_db().view('case/by_xform_id', keys=xform_ids, reduce=False):
ret[res["key"]].append(res["id"])
return dict(ret)
def iter_forms_with_cases(domain, since, chunksize=500):
for form_list in chunked(forms_with_cases(domain, since), chunksize):
case_id_mapping = case_ids_by_xform_id([f["_id"] for f in form_list])
for form in form_list:
form_id, f_case_ids, f_domain = form["_id"], form["fields"]["__retrieved_case_ids"], form["fields"]["domain"]
received_on = form["fields"]["received_on"]
for case_id in f_case_ids:
yield form_id, received_on, case_id, case_id in case_id_mapping.get(form_id, []), f_domain
def handle_problematic_data(datalist_tup, csv_writer, verbose=False, rebuild=False):
case_data = CommCareCase.get_db().view('_all_docs', keys=[d[1] for d in datalist_tup])
cases = set([c["id"] for c in case_data if 'id' in c])
for domain, case_id, form_id, received_on in datalist_tup:
error = "action_missing" if case_id in cases else "nonexistent_case"
csv_writer.writerow([domain, case_id, form_id, received_on, error])
if verbose and error == "nonexistent_case":
logger.info("Case (%s) from form (%s) does not exist" % (case_id, form_id))
elif verbose and error == "action_missing":
logger.info("Case (%s) missing action for form (%s)" % (case_id, form_id))
if rebuild:
if verbose:
logger.info("rebuilding case (%s) from scratch" % case_id)
try:
rebuild_case(case_id)
except Exception as e:
logger.info("Case Rebuild Failure: %s" % e)
class Command(BaseCommand):
args = '<domain>'
help = ('Checks all forms in a domain to make sure their cases were properly updated.')
option_list = BaseCommand.option_list + (
make_option('-s', '--since',
help="Begin check at this date."),
make_option('-f', '--filename',
help="Save output to this file."),
make_option('-r', '--rebuild', action="store_true",
help="Rebuild cases that were found to be corrupt"),
make_option('-c', '--chunk',
help="Set the chunk size"),
make_option('--verbose', action="store_true",
help="Verbose"),
)
def handle(self, *args, **options):
domain = args[0] if len(args) == 1 else None
since = dparser.parse(options["since"], fuzzy=True) if options.get("since") else None
filename = options.get("filename") or ("case_integrity" + ("_%s" % domain if domain else ""))
chunksize = options.get("chunk") or 500
if not filename.endswith(".csv"):
filename = "%s.csv" % filename
rebuild, verbose = options.get("rebuild"), options.get("verbose")
logger.info("writing to file: %s" % filename)
with open(filename, 'wb+') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Domain', 'Case ID', 'Form ID', 'Form Recieved On', 'Error'])
problematic = []
for form_id, received_on, case_id, action_exists, f_domain in iter_forms_with_cases(domain, since, chunksize):
if not action_exists:
problematic.append((f_domain, case_id, form_id, received_on))
if len(problematic) > chunksize:
handle_problematic_data(problematic, csv_writer, verbose=verbose, rebuild=rebuild)
problematic = []
handle_problematic_data(problematic, csv_writer, verbose=verbose, rebuild=rebuild)
| bsd-3-clause | -8,207,043,014,911,200,000 | 47.209524 | 122 | 0.590676 | false |
toruta39/blender-datablock-translator | translate_datablock_names.py | 1 | 7692 | bl_info = {
"name": "Translate Datablock Names",
"author": "Joshua Zhang",
"version": (1, 0),
"blender": (2, 69, 0),
"location": "Search > (rename)",
"description": "A blender addon/plugin that helps to translate datablock \
names to English.",
"wiki_url": "",
"tracker_url": "",
"category": "Object"
}
import urllib.request
import urllib.parse
import json
import time
import re
import xml.etree.ElementTree as ET
import bpy
class MSTranslator():
"""A Class to communicate with Microsoft Translator API"""
def __init__(self):
self.access_token = ""
self.access_token_expires_at = time.time()
self.get_access_token()
def get_access_token(self):
"""Get access token from Azure Marketplace.
If there's no existed access token, it'll try request a new one.
Returns: string
"""
if (
not bool(self.access_token) or
time.time() > self.access_token_expires_at
):
self.access_token = self.req_access_token()
return self.access_token
def req_access_token(self):
"""Request a new access token from Azure Marketplace
Returns: string
"""
url = "https://datamarket.accesscontrol.windows.net/v2/OAuth2-13"
data = {
"client_id": "blender-assets-translator",
"client_secret": "5TITh8SzOtQIefUJ/vKW10yk4/oNbGbgI+GquUdtgHo=",
"scope": "http://api.microsofttranslator.com",
"grant_type": "client_credentials"
}
data = urllib.parse.urlencode(data)
data = bytes(data, "utf-8")
req = urllib.request.Request(url=url, data=data)
result = urllib.request.urlopen(req).read()
result = str(result, "utf-8")
result = json.loads(result)
self.access_token_expires_at = time.time() + int(result["expires_in"])
return result["access_token"]
def translate(self, text, to_lang="en", from_lang=""):
"""Translate text to the target language
Keyword arguments:
text -- text to translate
to_lang -- optional, the target language code
from_lang -- optional, the source language code
Returns: string
"""
url = "http://api.microsofttranslator.com/v2/Http.svc/Translate"
data = {
"text": text,
"to": to_lang,
"from": from_lang
}
data = urllib.parse.urlencode(data)
url += "?" + data
req = urllib.request.Request(url=url, method="GET")
req.add_header("Authorization", "Bearer " + self.get_access_token())
result = urllib.request.urlopen(req).read()
result = str(result, "utf-8")
result = ET.fromstring(result)
result = result.text
return result
class TranslateDatablockNames(bpy.types.Operator):
"""Translate Datablock Names"""
bl_idname = "object.translate_datablock_names"
bl_label = "Translate Datablock Names"
bl_options = {'REGISTER', 'UNDO'}
is_object_to_translate = bpy.props.BoolProperty(
name='Object',
default=True,
description='Translate Object Names')
is_material_to_translate = bpy.props.BoolProperty(
name='Material',
default=True,
description='Translate Material Names')
is_animation_to_translate = bpy.props.BoolProperty(
name='Animation',
default=True,
description='Translate Animation Names')
is_armature_to_translate = bpy.props.BoolProperty(
name='Armature',
default=True,
description='Translate Armature Names')
is_shapekey_to_translate = bpy.props.BoolProperty(
name='Shape Key',
default=True,
description='Translate Shape Key Names')
dialog_width = 200
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self.properties, 'is_object_to_translate')
row.prop(self.properties, 'is_material_to_translate')
row = layout.row()
row.prop(self.properties, 'is_animation_to_translate')
row.prop(self.properties, 'is_armature_to_translate')
row = layout.row()
row.prop(self.properties, 'is_shapekey_to_translate')
def execute(self, context):
translate_datablock_name(
is_object_to_translate=self.is_object_to_translate,
is_material_to_translate=self.is_material_to_translate,
is_animation_to_translate=self.is_animation_to_translate,
is_armature_to_translate=self.is_armature_to_translate,
is_shapekey_to_translate=self.is_shapekey_to_translate
)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.invoke_props_dialog(self, self.dialog_width)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(TranslateDatablockNames.bl_idname)
def translate_datablock_name(
is_object_to_translate=False,
is_material_to_translate=False,
is_animation_to_translate=False,
is_armature_to_translate=False,
is_shapekey_to_translate=False
):
if is_object_to_translate:
for obj in bpy.data.objects:
if has_irregular_char(obj.name):
obj.name = hyphenize(ms_translator.translate(obj.name))
for mesh in bpy.data.meshes:
if has_irregular_char(mesh.name):
mesh.name = hyphenize(ms_translator.translate(mesh.name))
for group in bpy.data.groups:
if has_irregular_char(group.name):
group.name = hyphenize(ms_translator.translate(group.name))
if is_material_to_translate:
for material in bpy.data.materials:
if has_irregular_char(material.name):
material.name = hyphenize(
ms_translator.translate(material.name)
)
if is_animation_to_translate:
for action in bpy.data.actions:
if has_irregular_char(action.name):
action.name = hyphenize(ms_translator.translate(action.name))
if is_armature_to_translate:
for armature in bpy.data.armatures:
if has_irregular_char(armature.name):
armature.name = hyphenize(
ms_translator.translate(armature.name)
)
for bone in armature.bones:
bone.name = hyphenize(
ms_translator.translate(bone.name)
)
if is_shapekey_to_translate:
for shapekey in bpy.data.shape_keys:
if has_irregular_char(shapekey.name):
shapekey.name = hyphenize(
ms_translator.translate(shapekey.name)
)
for keyblock in shapekey.key_blocks:
if has_irregular_char(keyblock.name):
keyblock.name = hyphenize(
ms_translator.translate(keyblock.name)
)
def hyphenize(string):
return '-'.join(string.split())
def has_irregular_char(string):
match = re.search(r"[^\x00-\x7F]", string)
if match:
return True
else:
return False
def register():
global ms_translator
ms_translator = MSTranslator()
bpy.utils.register_class(TranslateDatablockNames)
bpy.types.OUTLINER_MT_search.append(menu_func)
def unregister():
global ms_translator
ms_translator = None
bpy.utils.unregister_class(TranslateDatablockNames)
bpy.types.OUTLINER_MT_search.remove(menu_func)
if __name__ == "__main__":
ms_translator = None
register()
| mit | 5,995,320,613,882,916,000 | 29.046875 | 78 | 0.604134 | false |
miloszz/DIRAC | RequestManagementSystem/DB/RequestDB.py | 1 | 33451 | ########################################################################
# $HeadURL $
# File: RequestDB.py
# Date: 2012/12/04 08:06:30
########################################################################
from types import ListType
""" :mod: RequestDB
=======================
.. module: RequestDB
:synopsis: db holding Requests
db holding Request, Operation and File
"""
__RCSID__ = "$Id $"
import random
import socket
import datetime
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Base.DB import DB
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import relationship, backref, sessionmaker, joinedload_all, mapper
from sqlalchemy.sql import update
from sqlalchemy import create_engine, func, Table, Column, MetaData, ForeignKey,\
Integer, String, DateTime, Enum, BLOB, BigInteger, distinct
# Metadata instance that is used to bind the engine, Object and tables
metadata = MetaData()
# Description of the file table
fileTable = Table( 'File', metadata,
Column( 'FileID', Integer, primary_key = True ),
Column( 'OperationID', Integer,
ForeignKey( 'Operation.OperationID', ondelete = 'CASCADE' ),
nullable = False ),
Column( 'Status', Enum( 'Waiting', 'Done', 'Failed', 'Scheduled' ), server_default = 'Waiting' ),
Column( 'LFN', String( 255 ), index = True ),
Column( 'PFN', String( 255 ) ),
Column( 'ChecksumType', Enum( 'ADLER32', 'MD5', 'SHA1', '' ), server_default = '' ),
Column( 'Checksum', String( 255 ) ),
Column( 'GUID', String( 36 ) ),
Column( 'Size', BigInteger ),
Column( 'Attempt', Integer ),
Column( 'Error', String( 255 ) ),
mysql_engine = 'InnoDB'
)
# Map the File object to the fileTable, with a few special attributes
mapper( File, fileTable, properties = {
'_Status': fileTable.c.Status,
'_LFN': fileTable.c.LFN,
'_ChecksumType' : fileTable.c.ChecksumType,
'_GUID' : fileTable.c.GUID,
} )
# Description of the Operation table
operationTable = Table( 'Operation', metadata,
Column( 'TargetSE', String( 255 ) ),
Column( 'CreationTime', DateTime ),
Column( 'SourceSE', String( 255 ) ),
Column( 'Arguments', BLOB ),
Column( 'Error', String( 255 ) ),
Column( 'Type', String( 64 ), nullable = False ),
Column( 'Order', Integer, nullable = False ),
Column( 'Status', Enum( 'Waiting', 'Assigned', 'Queued', 'Done', 'Failed', 'Canceled', 'Scheduled' ), server_default = 'Queued' ),
Column( 'LastUpdate', DateTime ),
Column( 'SubmitTime', DateTime ),
Column( 'Catalog', String( 255 ) ),
Column( 'OperationID', Integer, primary_key = True ),
Column( 'RequestID', Integer,
ForeignKey( 'Request.RequestID', ondelete = 'CASCADE' ),
nullable = False ),
mysql_engine = 'InnoDB'
)
# Map the Operation object to the operationTable, with a few special attributes
mapper(Operation, operationTable, properties={
'_CreationTime': operationTable.c.CreationTime,
'_Order': operationTable.c.Order,
'_Status': operationTable.c.Status,
'_LastUpdate': operationTable.c.LastUpdate,
'_SubmitTime': operationTable.c.SubmitTime,
'_Catalog': operationTable.c.Catalog,
'__files__':relationship( File,
backref = backref( '_parent', lazy = 'immediate' ),
lazy = 'immediate',
passive_deletes = True,
cascade = "all, delete-orphan" )
})
# Description of the Request Table
requestTable = Table( 'Request', metadata,
Column( 'DIRACSetup', String( 32 ) ),
Column( 'CreationTime', DateTime ),
Column( 'JobID', Integer, server_default = '0' ),
Column( 'OwnerDN', String( 255 ) ),
Column( 'RequestName', String( 255 ), nullable = False ),
Column( 'Error', String( 255 ) ),
Column( 'Status', Enum( 'Waiting', 'Assigned', 'Done', 'Failed', 'Canceled', 'Scheduled' ), server_default = 'Waiting' ),
Column( 'LastUpdate', DateTime ),
Column( 'OwnerGroup', String( 32 ) ),
Column( 'SubmitTime', DateTime ),
Column( 'RequestID', Integer, primary_key = True ),
Column( 'SourceComponent', BLOB ),
Column( 'NotBefore', DateTime ),
mysql_engine = 'InnoDB'
)
# Map the Request object to the requestTable, with a few special attributes
mapper( Request, requestTable, properties = {
'_CreationTime': requestTable.c.CreationTime,
'_Status': requestTable.c.Status,
'_LastUpdate': requestTable.c.LastUpdate,
'_SubmitTime': requestTable.c.SubmitTime,
'_NotBefore': requestTable.c.NotBefore,
'__operations__' : relationship( Operation,
backref = backref( '_parent', lazy = 'immediate' ),
order_by = operationTable.c.Order,
lazy = 'immediate',
passive_deletes = True,
cascade = "all, delete-orphan"
)
} )
########################################################################
class RequestDB( object ):
"""
.. class:: RequestDB
db holding requests
"""
def __getDBConnectionInfo( self, fullname ):
""" Collect from the CS all the info needed to connect to the DB.
This should be in a base class eventually
"""
self.fullname = fullname
self.cs_path = getDatabaseSection( self.fullname )
self.dbHost = ''
result = gConfig.getOption( self.cs_path + '/Host' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: Host' )
self.dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if self.dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == self.dbHost:
self.dbHost = 'localhost'
self.dbPort = 3306
result = gConfig.getOption( self.cs_path + '/Port' )
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption( '/Systems/Databases/Port' )
if result['OK']:
self.dbPort = int( result['Value'] )
else:
self.dbPort = int( result['Value'] )
self.dbUser = ''
result = gConfig.getOption( self.cs_path + '/User' )
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption( '/Systems/Databases/User' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: User' )
self.dbUser = result['Value']
self.dbPass = ''
result = gConfig.getOption( self.cs_path + '/Password' )
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption( '/Systems/Databases/Password' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: Password' )
self.dbPass = result['Value']
self.dbName = ''
result = gConfig.getOption( self.cs_path + '/DBName' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: DBName' )
self.dbName = result['Value']
def __init__( self, systemInstance = 'Default', maxQueueSize = 10 ):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger( 'RequestDB' )
# Initialize the connection info
self.__getDBConnectionInfo( 'RequestManagement/ReqDB' )
runDebug = ( gLogger.getLevel() == 'DEBUG' )
self.engine = create_engine( 'mysql://%s:%s@%s/%s' % ( self.dbUser, self.dbPass, self.dbHost, self.dbName ),
echo = runDebug )
metadata.bind = self.engine
self.DBSession = sessionmaker( bind = self.engine )
def createTables( self, toCreate = None, force = False ):
""" create tables """
try:
metadata.create_all( self.engine )
except Exception, e:
return S_ERROR( e )
return S_OK()
@staticmethod
def getTableMeta():
""" get db schema in a dict format """
return dict( [ ( classDef.__name__, None )
for classDef in ( Request, Operation, File ) ] )
def getTables(self):
""" Return the table names """
return S_OK( metadata.tables.keys() )
def cancelRequest( self, requestID ):
session = self.DBSession()
try:
updateRet = session.execute( update( Request )\
.where( Request.RequestID == requestID )\
.values( {Request._Status : 'Canceled',
Request._LastUpdate : datetime.datetime.utcnow()\
.strftime( Request._datetimeFormat )
}
)
)
session.commit()
# No row was changed
if not updateRet.rowcount:
return S_ERROR( "No such request %s" % requestID )
return S_OK()
except Exception, e:
session.rollback()
self.log.exception( "cancelRequest: unexpected exception", lException = e )
return S_ERROR( "cancelRequest: unexpected exception %s" % e )
finally:
session.close()
def putRequest( self, request ):
""" update or insert request into db
:param Request request: Request instance
"""
session = self.DBSession( expire_on_commit = False )
try:
try:
if hasattr( request, 'RequestID' ):
status = session.query( Request._Status )\
.filter( Request.RequestID == request.RequestID )\
.one()
if status[0] == 'Canceled':
self.log.info( "Request %s(%s) was canceled, don't put it back" % ( request.RequestID, request.RequestName ) )
return S_OK( request.RequestID )
except NoResultFound, e:
pass
# Since the object request is not attached to the session, we merge it to have an update
# instead of an insert with duplicate primary key
request = session.merge( request )
session.add( request )
session.commit()
session.expunge_all()
return S_OK( request.RequestID )
except Exception, e:
session.rollback()
self.log.exception( "putRequest: unexpected exception", lException = e )
return S_ERROR( "putRequest: unexpected exception %s" % e )
finally:
session.close()
def getScheduledRequest( self, operationID ):
session = self.DBSession()
try:
requestID = session.query( Request.RequestID )\
.join( Request.__operations__ )\
.filter( Operation.OperationID == operationID )\
.one()
return self.getRequest( requestID[0] )
except NoResultFound, e:
return S_OK()
finally:
session.close()
#
# def getRequestName( self, requestID ):
# """ get Request.RequestName for a given Request.RequestID """
#
# session = self.DBSession()
# try:
# requestName = session.query( Request.RequestName )\
# .filter( Request.RequestID == requestID )\
# .one()
# return S_OK( requestName[0] )
# except NoResultFound, e:
# return S_ERROR( "getRequestName: no request found for RequestID=%s" % requestID )
# finally:
# session.close()
def getRequest( self, reqID = 0, assigned = True ):
""" read request for execution
:param reqID: request's ID (default 0) If 0, take a pseudo random one
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession( expire_on_commit = False )
log = self.log.getSubLogger( 'getRequest' if assigned else 'peekRequest' )
requestID = None
try:
if reqID:
requestID = reqID
log.verbose( "selecting request '%s'%s" % ( reqID, ' (Assigned)' if assigned else '' ) )
status = None
try:
status = session.query( Request._Status )\
.filter( Request.RequestID == reqID )\
.one()
except NoResultFound, e:
return S_ERROR( "getRequest: request '%s' not exists" % reqID )
if status and status == "Assigned" and assigned:
return S_ERROR( "getRequest: status of request '%s' is 'Assigned', request cannot be selected" % reqID )
else:
now = datetime.datetime.utcnow().replace( microsecond = 0 )
reqIDs = set()
try:
reqAscIDs = session.query( Request.RequestID )\
.filter( Request._Status == 'Waiting' )\
.filter( Request._NotBefore < now )\
.order_by( Request._LastUpdate )\
.limit( 100 )\
.all()
reqIDs = set( [reqID[0] for reqID in reqAscIDs] )
reqDescIDs = session.query( Request.RequestID )\
.filter( Request._Status == 'Waiting' )\
.filter( Request._NotBefore < now )\
.order_by( Request._LastUpdate.desc() )\
.limit( 50 )\
.all()
reqIDs |= set( [reqID[0] for reqID in reqDescIDs] )
# No Waiting requests
except NoResultFound, e:
return S_OK()
if not reqIDs:
return S_OK()
reqIDs = list( reqIDs )
random.shuffle( reqIDs )
requestID = reqIDs[0]
# If we are here, the request MUST exist, so no try catch
# the joinedload_all is to force the non-lazy loading of all the attributes, especially _parent
request = session.query( Request )\
.options( joinedload_all( '__operations__.__files__' ) )\
.filter( Request.RequestID == requestID )\
.one()
if not reqID:
log.verbose( "selected request %s('%s')%s" % ( request.RequestID, request.RequestName, ' (Assigned)' if assigned else '' ) )
if assigned:
session.execute( update( Request )\
.where( Request.RequestID == requestID )\
.values( {Request._Status : 'Assigned',
Request._LastUpdate : datetime.datetime.utcnow()\
.strftime( Request._datetimeFormat )} )
)
session.commit()
session.expunge_all()
return S_OK( request )
except Exception, e:
session.rollback()
log.exception( "getRequest: unexpected exception", lException = e )
return S_ERROR( "getRequest: unexpected exception : %s" % e )
finally:
session.close()
def getBulkRequests( self, numberOfRequest = 10, assigned = True ):
""" read as many requests as requested for execution
:param int numberOfRequest: Number of Request we want (default 10)
:param bool assigned: if True, the status of the selected requests are set to assign
:returns a dictionary of Request objects indexed on the RequestID
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession( expire_on_commit = False )
log = self.log.getSubLogger( 'getBulkRequest' if assigned else 'peekBulkRequest' )
requestDict = {}
try:
# If we are here, the request MUST exist, so no try catch
# the joinedload_all is to force the non-lazy loading of all the attributes, especially _parent
try:
requests = session.query( Request )\
.options( joinedload_all( '__operations__.__files__' ) )\
.filter( Request._Status == 'Waiting' )\
.order_by( Request._LastUpdate )\
.limit( numberOfRequest )\
.all()
requestDict = dict((req.RequestID, req) for req in requests)
# No Waiting requests
except NoResultFound, e:
pass
if assigned and requestDict:
session.execute( update( Request )\
.where( Request.RequestID.in_( requestDict.keys() ) )\
.values( {Request._Status : 'Assigned'} )
)
session.commit()
session.expunge_all()
except Exception, e:
session.rollback()
log.exception( "unexpected exception", lException = e )
return S_ERROR( "getBulkRequest: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( requestDict )
def peekRequest( self, requestID ):
""" get request (ro), no update on states
:param requestID: Request.RequestID
"""
return self.getRequest( requestID, False )
def getRequestIDsList( self, statusList = None, limit = None, since = None, until = None ):
""" select requests with status in :statusList: """
statusList = statusList if statusList else list( Request.FINAL_STATES )
limit = limit if limit else 100
session = self.DBSession()
requestIDs = []
try:
reqQuery = session.query( Request.RequestID, Request._Status, Request._LastUpdate )\
.filter( Request._Status.in_( statusList ) )
if since:
reqQuery = reqQuery.filter( Request._LastUpdate > since )
if until:
reqQuery = reqQuery.filter( Request._LastUpdate < until )
reqQuery = reqQuery.order_by( Request._LastUpdate )\
.limit( limit )
requestIDs = [reqIDTuple[0] for reqIDTuple in reqQuery.all()]
except Exception, e:
session.rollback()
self.log.exception( "getRequestIDsList: unexpected exception", lException = e )
return S_ERROR( "getRequestIDsList: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( requestIDs )
def deleteRequest( self, requestID ):
""" delete request given its ID
:param str requestID: request.RequestID
:param mixed connection: connection to use if any
"""
session = self.DBSession()
try:
session.query( Request ).filter( Request.RequestID == requestID ).delete()
session.commit()
except Exception, e:
session.rollback()
self.log.exception( "deleteRequest: unexpected exception", lException = e )
return S_ERROR( "deleteRequest: unexpected exception : %s" % e )
finally:
session.close()
return S_OK()
def getDBSummary( self ):
""" get db summary """
# # this will be returned
retDict = { "Request" : {}, "Operation" : {}, "File" : {} }
session = self.DBSession()
try:
requestQuery = session.query( Request._Status, func.count( Request.RequestID ) )\
.group_by( Request._Status )\
.all()
for status, count in requestQuery:
retDict["Request"][status] = count
operationQuery = session.query(Operation.Type, Operation._Status, func.count(Operation.OperationID))\
.group_by( Operation.Type, Operation._Status )\
.all()
for oType, status, count in operationQuery:
retDict['Operation'].setdefault( oType, {} )[status] = count
fileQuery = session.query( File._Status, func.count( File.FileID ) )\
.group_by( File._Status )\
.all()
for status, count in fileQuery:
retDict["File"][status] = count
except Exception, e:
self.log.exception( "getDBSummary: unexpected exception", lException = e )
return S_ERROR( "getDBSummary: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( retDict )
def getRequestSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Returns a list of Request for the web portal
:param dict selectDict: parameter on which to restrain the query {key : Value}
key can be any of the Request columns, 'Type' (interpreted as Operation.Type)
and 'FromData' and 'ToData' are matched against the LastUpdate field
:param list sortList: [sorting column, ASC/DESC]
:param int startItem: start item (for pagination)
:param int maxItems: max items (for pagination)
"""
parameterList = [ 'RequestID', 'RequestName', 'JobID', 'OwnerDN', 'OwnerGroup',
'Status', "Error", "CreationTime", "LastUpdate"]
resultDict = {}
session = self.DBSession()
try:
summaryQuery = session.query( Request.RequestID, Request.RequestName,
Request.JobID, Request.OwnerDN, Request.OwnerGroup,
Request._Status, Request.Error,
Request._CreationTime, Request._LastUpdate )
for key, value in selectDict.items():
if key == 'ToDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate < value )
elif key == 'FromDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate > value )
else:
tableName = 'Request'
if key == 'Type':
summaryQuery = summaryQuery.join( Request.__operations__ )\
.group_by( Request.RequestID )
tableName = 'Operation'
elif key == 'Status':
key = '_Status'
if type( value ) == ListType:
summaryQuery = summaryQuery.filter( eval( '%s.%s.in_(%s)' % ( tableName, key, value ) ) )
else:
summaryQuery = summaryQuery.filter( eval( '%s.%s' % ( tableName, key ) ) == value )
if sortList:
summaryQuery = summaryQuery.order_by( eval( 'Request.%s.%s()' % ( sortList[0][0], sortList[0][1].lower() ) ) )
try:
requestLists = summaryQuery.all()
except NoResultFound, e:
resultDict['ParameterNames'] = parameterList
resultDict['Records'] = []
return S_OK( resultDict )
except Exception, e:
return S_ERROR( 'Error getting the webSummary %s' % e )
nRequests = len( requestLists )
if startItem <= len( requestLists ):
firstIndex = startItem
else:
return S_ERROR( 'getRequestSummaryWeb: Requested index out of range' )
if ( startItem + maxItems ) <= len( requestLists ):
secondIndex = startItem + maxItems
else:
secondIndex = len( requestLists )
records = []
for i in range( firstIndex, secondIndex ):
row = requestLists[i]
records.append( [ str( x ) for x in row] )
resultDict['ParameterNames'] = parameterList
resultDict['Records'] = records
resultDict['TotalRecords'] = nRequests
return S_OK( resultDict )
#
except Exception, e:
self.log.exception( "getRequestSummaryWeb: unexpected exception", lException = e )
return S_ERROR( "getRequestSummaryWeb: unexpected exception : %s" % e )
finally:
session.close()
def getRequestCountersWeb( self, groupingAttribute, selectDict ):
""" For the web portal.
Returns a dictionary {value : counts} for a given key.
The key can be any field from the RequestTable. or "Type",
which will be interpreted as 'Operation.Type'
"""
resultDict = {}
session = self.DBSession()
if groupingAttribute == 'Type':
groupingAttribute = 'Operation.Type'
elif groupingAttribute == 'Status':
groupingAttribute = 'Request._Status'
else:
groupingAttribute = 'Request.%s' % groupingAttribute
try:
summaryQuery = session.query( eval( groupingAttribute ), func.count( Request.RequestID ) )
for key, value in selectDict.items():
if key == 'ToDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate < value )
elif key == 'FromDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate > value )
else:
objectType = 'Request'
if key == 'Type':
summaryQuery = summaryQuery.join( Request.__operations__ )
objectType = 'Operation'
elif key == 'Status':
key = '_Status'
if type( value ) == ListType:
summaryQuery = summaryQuery.filter( eval( '%s.%s.in_(%s)' % ( objectType, key, value ) ) )
else:
summaryQuery = summaryQuery.filter( eval( '%s.%s' % ( objectType, key ) ) == value )
summaryQuery = summaryQuery.group_by( groupingAttribute )
try:
requestLists = summaryQuery.all()
resultDict = dict( requestLists )
except NoResultFound, e:
pass
except Exception, e:
return S_ERROR( 'Error getting the webCounters %s' % e )
return S_OK( resultDict )
except Exception, e:
self.log.exception( "getRequestSummaryWeb: unexpected exception", lException = e )
return S_ERROR( "getRequestSummaryWeb: unexpected exception : %s" % e )
finally:
session.close()
def getDistinctValues( self, tableName, columnName ):
""" For a given table and a given field, return the list of of distinct values in the DB"""
session = self.DBSession()
distinctValues = []
try:
result = session.query( distinct( eval ( "%s.%s" % ( tableName, columnName ) ) ) ).all()
distinctValues = [dist[0] for dist in result]
except NoResultFound, e:
pass
except Exception, e:
self.log.exception( "getDistinctValues: unexpected exception", lException = e )
return S_ERROR( "getDistinctValues: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( distinctValues )
def getRequestIDsForJobs( self, jobIDs ):
""" read request ids for jobs given jobIDs
:param list jobIDs: list of jobIDs
"""
self.log.debug( "getRequestIDsForJobs: got %s jobIDs to check" % str( jobIDs ) )
if not jobIDs:
return S_ERROR( "Must provide jobID list as argument." )
if type( jobIDs ) in ( long, int ):
jobIDs = [ jobIDs ]
jobIDs = set( jobIDs )
reqDict = { "Successful": {}, "Failed": {} }
session = self.DBSession()
try:
ret = session.query( Request.JobID, Request.RequestID )\
.filter( Request.JobID.in_( jobIDs ) )\
.all()
reqDict['Successful'] = dict( ( jobId, reqID ) for jobId, reqID in ret )
reqDict['Failed'] = dict( (jobid, 'Request not found') for jobid in jobIDs - set(reqDict['Successful']))
except Exception, e:
self.log.exception( "getRequestIDsForJobs: unexpected exception", lException = e )
return S_ERROR( "getRequestIDsForJobs: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( reqDict )
def readRequestsForJobs( self, jobIDs = None ):
""" read request for jobs
:param list jobIDs: list of JobIDs
:return: S_OK( "Successful" : { jobID1 : Request, jobID2: Request, ... }
"Failed" : { jobID3: "error message", ... } )
"""
self.log.debug( "readRequestForJobs: got %s jobIDs to check" % str( jobIDs ) )
if not jobIDs:
return S_ERROR( "Must provide jobID list as argument." )
if type( jobIDs ) in ( long, int ):
jobIDs = [ jobIDs ]
jobIDs = set( jobIDs )
reqDict = { "Successful": {}, "Failed": {} }
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession( expire_on_commit = False )
try:
ret = session.query( Request.JobID, Request )\
.options( joinedload_all( '__operations__.__files__' ) )\
.filter( Request.JobID.in_( jobIDs ) ).all()
reqDict['Successful'] = dict( ( jobId, reqObj ) for jobId, reqObj in ret )
reqDict['Failed'] = dict( ( jobid, 'Request not found' ) for jobid in jobIDs - set( reqDict['Successful'] ) )
session.expunge_all()
except Exception, e:
self.log.exception( "readRequestsForJobs: unexpected exception", lException = e )
return S_ERROR( "readRequestsForJobs: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( reqDict )
def getRequestStatus( self, requestID ):
""" get request status for a given request ID """
self.log.debug( "getRequestStatus: checking status for '%s' request" % requestID )
session = self.DBSession()
try:
status = session.query( Request._Status ).filter( Request.RequestID == requestID ).one()
except NoResultFound, e:
return S_ERROR( "Request %s does not exist" % requestID )
finally:
session.close()
return S_OK( status[0] )
def getRequestFileStatus( self, requestID, lfnList ):
""" get status for files in request given its id
:param str requestID: Request.RequestID
:param list lfnList: list of LFNs
"""
session = self.DBSession()
try:
res = dict.fromkeys( lfnList, "UNKNOWN" )
requestRet = session.query( File._LFN, File._Status )\
.join( Request.__operations__ )\
.join( Operation.__files__ )\
.filter( Request.RequestID == requestID )\
.filter( File._LFN.in_( lfnList ) )\
.all()
for lfn, status in requestRet:
res[lfn] = status
return S_OK( res )
except Exception, e:
self.log.exception( "getRequestFileStatus: unexpected exception", lException = e )
return S_ERROR( "getRequestFileStatus: unexpected exception : %s" % e )
finally:
session.close()
def getRequestInfo( self, requestID ):
""" get request info given Request.RequestID """
session = self.DBSession()
try:
requestInfoQuery = session.query( Request.RequestID, Request._Status, Request.RequestName,
Request.JobID, Request.OwnerDN, Request.OwnerGroup,
Request.DIRACSetup, Request.SourceComponent, Request._CreationTime,
Request._SubmitTime, Request._LastUpdate )\
.filter( Request.RequestID == requestID )
try:
requestInfo = requestInfoQuery.one()
except NoResultFound, e:
return S_ERROR( 'No such request' )
return S_OK( tuple( requestInfo ) )
except Exception, e:
self.log.exception( "getRequestInfo: unexpected exception", lException = e )
return S_ERROR( "getRequestInfo: unexpected exception : %s" % e )
finally:
session.close()
def getDigest( self, requestID ):
""" get digest for request given its id
:param str requestName: request id
"""
self.log.debug( "getDigest: will create digest for request '%s'" % requestID )
request = self.getRequest( requestID, False )
if not request["OK"]:
self.log.error( "getDigest: %s" % request["Message"] )
request = request["Value"]
if not isinstance( request, Request ):
self.log.info( "getDigest: request '%s' not found" )
return S_OK()
return request.getDigest()
def getRequestIDForName( self, requestName ):
""" read request id for given name
if the name is not unique, an error is returned
:param requestName : name of the request
"""
session = self.DBSession()
reqID = 0
try:
ret = session.query( Request.RequestID )\
.filter( Request.RequestName == requestName )\
.all()
if not ret:
return S_ERROR( 'No such request %s' % requestName )
elif len( ret ) > 1:
return S_ERROR( 'RequestName %s not unique (%s matches)' % ( requestName, len( ret ) ) )
reqID = ret[0][0]
except NoResultFound, e:
return S_ERROR( 'No such request' )
except Exception, e:
self.log.exception( "getRequestIDsForName: unexpected exception", lException = e )
return S_ERROR( "getRequestIDsForName: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( reqID )
| gpl-3.0 | -7,894,808,296,504,958,000 | 34.100735 | 154 | 0.572868 | false |
cgimenop/Excel2Testlink | ExcelParser/lib/openpyxl/chart/tests/test_surface_chart.py | 1 | 3879 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def SurfaceChart():
from ..surface_chart import SurfaceChart
return SurfaceChart
class TestSurfaceChart:
def test_ctor(self, SurfaceChart):
chart = SurfaceChart()
xml = tostring(chart.to_tree())
expected = """
<surfaceChart>
<axId val="10"></axId>
<axId val="100"></axId>
<axId val="1000"></axId>
</surfaceChart>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, SurfaceChart):
src = """
<surfaceChart>
<wireframe val="0"/>
<ser>
<idx val="0"/>
<order val="0"/>
</ser>
<ser>
<idx val="1"/>
<order val="1"/>
</ser>
<bandFmts/>
<axId val="2086876920"/>
<axId val="2078923400"/>
<axId val="2079274408"/>
</surfaceChart>
"""
node = fromstring(src)
chart = SurfaceChart.from_tree(node)
assert [a.val for a in chart.axId] == [10, 100, 1000]
@pytest.fixture
def SurfaceChart3D():
from ..surface_chart import SurfaceChart3D
return SurfaceChart3D
class TestSurfaceChart3D:
def test_ctor(self, SurfaceChart3D):
chart = SurfaceChart3D()
xml = tostring(chart.to_tree())
expected = """
<surface3DChart>
<axId val="10"></axId>
<axId val="100"></axId>
<axId val="1000"></axId>
</surface3DChart>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, SurfaceChart3D):
src = """
<surface3DChart>
<wireframe val="0"/>
<ser>
<idx val="0"/>
<order val="0"/>
<val>
<numRef>
<f>Blatt1!$A$1:$A$12</f>
</numRef>
</val>
</ser>
<ser>
<idx val="1"/>
<order val="1"/>
<val>
<numRef>
<f>Blatt1!$B$1:$B$12</f>
</numRef>
</val>
</ser>
<bandFmts/>
<axId val="2082935272"/>
<axId val="2082938248"/>
<axId val="2082941288"/>
</surface3DChart>
"""
node = fromstring(src)
chart = SurfaceChart3D.from_tree(node)
assert len(chart.ser) == 2
assert [a.val for a in chart.axId] == [10, 100, 1000]
@pytest.fixture
def BandFormat():
from ..surface_chart import BandFormat
return BandFormat
class TestBandFormat:
def test_ctor(self, BandFormat):
fmt = BandFormat()
xml = tostring(fmt.to_tree())
expected = """
<bandFmt>
<idx val="0" />
</bandFmt>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, BandFormat):
src = """
<bandFmt>
<idx val="4"></idx>
</bandFmt>
"""
node = fromstring(src)
fmt = BandFormat.from_tree(node)
assert fmt == BandFormat(idx=4)
@pytest.fixture
def BandFormatList():
from ..surface_chart import BandFormatList
return BandFormatList
class TestBandFormatList:
def test_ctor(self, BandFormatList):
fmt = BandFormatList()
xml = tostring(fmt.to_tree())
expected = """
<bandFmts />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, BandFormatList):
src = """
<bandFmts />
"""
node = fromstring(src)
fmt = BandFormatList.from_tree(node)
assert fmt == BandFormatList()
| mit | 3,734,306,017,059,204,600 | 22.509091 | 61 | 0.523331 | false |
warmlogic/SOCO | config.py | 1 | 6903 | # SOCO configuration
expName = 'SOCO'
# Number of sessions per subject
numSessions = 1
isEEG = True
playAudio = False
#############################
# Exp organization
#############################
# Number of trials/lists per session
numLists = 4
# Number of target stimuli per trial/list
study_listLen = 100
# number of buffer stimuli to add to start and end of lists
study_bufferStart = 2
study_bufferEnd = 2
# Recognition & Source task
test_numLures = 50
# Do a practice trial?
practiceList = True
prac_numLists = 2
pracstudy_listLen = 12
pracstudy_bufferStart = 2
pracstudy_bufferEnd = 2
practest_numLures = 6
#############################
# Display setup
#############################
# Stim setup
PICTURE_SIZE = 0.3
mid_coord = 0.5
top_coord = 0.05
bot_coord = 0.95
fback_coord = 0.25
# 15" PowerBook (1280x800); E013 iMac (1440x900)
#left_coord = 0.31
#right_coord = 0.69
# Wide (1680x1050): 15" MBP, office display
#left_coord = 0.31
#right_coord = 0.69
# Square (1024x768): D458? linux EEG
#left_coord = 0.28
#right_coord = 0.72
# Square (1280x1024): D464 linux EEG
left_coord = 0.25
right_coord = 0.75
#KEYIMAGE_SIZE = (0.96,0.12)
# Word Font size (proportion of vertical screen)
test_wordHeight = .05
instruct_wordHeight = .05
fixation_wordHeight = .05
# Orienting Stimulus text
orientText = '+'
# color frame info
#colors_names = ('red','blue')
#colors_rgb = ((255,0,0,80),(0,0,255,80))
colors_rgb = [(126,30,156,255),(21,176,26,255),(3,67,223,255),(255,129,192,255),(229,0,0,255),(249,115,6,255),(255,255,20,255),(101,55,0,255)]
color_names = ('purple','green','blue','pink','red','orange','yellow','brown')
#(2,147,134,255),(0,255,255,255)
# color_names = ('teal','cyan')
color_frame_side_prop = 0.2 # proportion of image size
color_frame_top_prop = 0.2 # proportion of image size
test_rect_side_prop = 0.15 # proportion of image size
test_rect_top_prop = 0.15 # proportion of image size
#################################
# EEG setup
#################################
# initial resting period duration
restEEGDuration = 20000
# this is the key that the experimenter has to hit to continue after
# impedance check
endImpedanceKey = "G"
# choose a huge number for the impedance countdown
maxImpedanceTime = 100000000
# number of blink breaks
study_blinkBreaksPerList = 3
test_blinkBreaksPerList = 10
# EEG: Pause after a blinking period to subject can finish blinking
pauseAfterBlink = 2000
textAfterBlink = "Get ready..."
# BEHAVIORAL ONLY: amount of time to break between study and test (isEEG == False)
breakTime = 300000 # 300000ms=5min
################################
# Study period parameters
################################
# Pause+Jitter after orienting cross before first study stim; only
# happens once on each list
study_preStimDelay = 2000
study_preStimJitter = 500
# Duration that the study stim is on the screen
study_stimDuration = 1500
study_stimJitter = None
# ISI+Jitter after study stim is cleared from the screen
study_ISI = 500
study_ISIJitter = 250
############################################################
# Test period parameters
############################################################
# keys for the test period
leftKeys_test = ('Z','X')
rightKeys_test = ('.','/')
newText_test = "N"
rememSourceText_test = "RC"
rememOtherText_test = "RO"
knowText_test = "F"
sureText_test = "Sure"
maybeText_test = "Maybe"
# # widescreen
# test_color_left_x = (0.53, 0.47)
# test_color_right_x = (0.47, 0.53)
# test_new_x = (0.43, 0.57)
# test_rs_x = (0.535, 0.465)
# test_ro_x = (0.465, 0.535)
# test_k_x = (0.425, 0.575)
# test_sure_x = (0.445, 0.555)
# test_maybe_x = (0.57, 0.425)
# square screen
test_color_left_x = (0.54, 0.46)
test_color_right_x = (0.46, 0.54)
test_new_x = (0.41, 0.59)
test_rs_x = (0.545, 0.455)
test_ro_x = (0.455, 0.545)
test_k_x = (0.40, 0.595)
test_sure_x = (0.43, 0.57)
test_maybe_x = (0.59, 0.405)
# Pause+Jitter before test period begins; happens once per list
test_preStimDelay = 2000
test_preStimJitter = 500
# amount of time to show fixation cross
test_preview = 750
test_previewJitter = None
#test_preStimDelay = 500
#test_preStimJitter = 150
# amount of time to show test stimulus
test_stimDuration = 750
test_stimJitter = None
# delay after each stim before response period
test_preRespOrientDelay = 1500
test_preRespOrientJitter = None
#test_preRespBlankDelay = 900
# min and max response duration
test_minRespDuration = 100
test_maxRespDuration = 30000
# delay after response period
test_ISI = 500
test_ISIJitter = 250
#############################################
# Other experiment parameters
#############################################
# countdown timer between lists
ILI_timer = True
ILI_dur = 30000
ILI_key = 'SPACE'
# make the stimulus pools
objectStimuli = 'images/object_stims'
#objectStimuliTxt = 'images/object_stims.txt'
objectBuffers = 'images/object_buffers'
#objectBuffersTxt = 'images/object_buffers.txt'
noiseStimuli = 'images/noise_stim'
#noiseStim = 'images/noise/noise1.png'
presentationType = 'image' # image, sound, text
presentationAttribute = 'name' # attribute to use to create the text
# description
# Trial text
# Beh
#trialBeginText = "Press any key for Trial #%d"
#testBeginText = "Press any key to begin test"
# EEG
sesBeginText = "Press any key for Session #%d."
trialBeginText = "Press any key for Trial #%d."
studyBeginText = "Blink now.\nPress space to begin Study #%d."
testBeginText = "Blink now.\nPress space to begin Test #%d."
studyPracBeginText = "Blink now.\nPress space to begin Study Practice."
testPracBeginText = "Blink now.\nPress space to begin Test Practice."
restEEGPrep = "Press any key to record resting data."
restEEGText = "Recording resting data. Please sit still..."
blinkRestText_study = "Blink now.\nPress any key to continue study period."
blinkRestText_test = "Blink now.\nPress any key to continue test period."
#confidenceText = "Sure | Probably | Guess | Guess | Probably | Sure"
# Set up the beeps
hiBeepFreq = 800
hiBeepDur = 500
hiBeepRiseFall = 100
loBeepFreq = 400
loBeepDur = 500
loBeepRiseFall = 100
# Instructions text file
instruct_getready = 'text/instruct_getready.txt'
# BEH
#instruct_intro = 'text/beh/instruct_intro.txt'
#instruct_study_practice = 'text/beh/instruct_study_practice.txt'
#instruct_test_practice = 'text/beh/instruct_test_practice.txt'
#instruct_study = 'text/beh/instruct_study.txt'
#instruct_test = 'text/beh/instruct_test.txt'
#midSessionBreak = 'text/beh/midSessionBreak.txt'
# EEG
instruct_intro = 'text/eeg/instruct_intro_eeg.txt'
instruct_study_practice = 'text/eeg/instruct_study_practice_eeg.txt'
instruct_test_practice = 'text/eeg/instruct_test_practice_eeg.txt'
instruct_study = 'text/eeg/instruct_study_eeg.txt'
instruct_test = 'text/eeg/instruct_test_eeg.txt'
midSessionBreak = 'text/eeg/midSessionBreak.txt'
# Default font
defaultFont = 'fonts/Verdana.ttf'
| gpl-3.0 | 1,999,850,138,623,218,700 | 27.882845 | 142 | 0.675359 | false |
Mr-F/dotmailer | dotmailer/surveys.py | 1 | 2805 | from dotmailer import Base
from dotmailer.connection import connection
class Survey(Base):
"""
"""
end_point = '/v2/surveys'
def __init__(self, **kwargs):
self.required_fields = []
super(Survey, self).__init__(**kwargs)
@classmethod
def get_multiple(cls, assigned_to_address_book_only=True, select=1000,
skip=0):
if assigned_to_address_book_only:
assigned_to_address_book_only = 'true'
else:
assigned_to_address_book_only = 'false'
response = connection.get(
cls.end_point,
query_params={
'AssignedToAddressBookOnly': assigned_to_address_book_only,
'Select': select,
'Skip': skip
}
)
return [cls(**entry) for entry in response]
@classmethod
def get_all(cls, assigned_to_address_book_only=True):
"""
Gets a list of all surveys in the account
:param assigned_to_address_book_only: A boolean value to
indicated if we should only retrieve surveys that have been
assigned to an address book. The default value for this is
True
:return:
"""
select = 1000
skip = 0
all_surveys = []
surveys = cls.get_multiple(assigned_to_address_book_only, select, skip)
num_of_entries = len(surveys)
while num_of_entries > 0:
all_surveys.extend(surveys)
# If there weren't enough entries then there are no more to
# load so simply break out of the loop
if num_of_entries < select:
break
skip += select
surveys = cls.get_multiple(assigned_to_address_book_only, select,
skip)
num_of_entries = len(surveys)
return all_surveys
@classmethod
def get_by_id(cls, id):
"""
Get a survey by it's ID value
:param id: The DotMailer unique ID value for the survey
:return:
"""
# Cast the ID parameter to an integer
id = int(id)
# Check that the ID parameter is greater than zero, if not raise
# an exception.
if id < 1:
raise Exception()
response = connection.get(
'{}/{}'.format(cls.end_point, id)
)
return cls(**response)
@classmethod
def get_survey_fields(cls, id):
"""
Gets a list of survey pages, each containing a list of the
fields on that page
:param id:
:return:
"""
response = connection.get(
'{}/{}/fields'.format(cls.end_point, id)
)
return response
| mit | -2,345,594,443,191,682,600 | 27.05 | 79 | 0.529768 | false |
kernsuite-debian/lofar | SAS/ResourceAssignment/ResourceAssignmentEstimator/resource_estimators/observation.py | 1 | 30414 | # observation.py
#
# Copyright (C) 2016-2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
import logging
import pprint
from math import ceil
from .base_resource_estimator import BaseResourceEstimator
from lofar.stationmodel.antennasets_parser import AntennaSetsParser
logger = logging.getLogger(__name__)
DATAPRODUCTS = "Observation.DataProducts."
COBALT = "Observation.ObservationControl.OnlineControl.Cobalt."
class ObservationResourceEstimator(BaseResourceEstimator):
""" ResourceEstimator for LOFAR Observations
"""
def __init__(self):
logger.info("init ObservationResourceEstimator")
super(ObservationResourceEstimator, self).__init__(name='observation')
self.required_keys = ('Observation.sampleClock',
'Observation.startTime',
'Observation.stopTime',
'Observation.antennaSet',
'Observation.nrBeams',
'Observation.Beam[0].subbandList',
'Observation.nrBitsPerSample',
'Observation.VirtualInstrument.stationList',
COBALT + 'Correlator.nrChannelsPerSubband',
COBALT + 'Correlator.integrationTime',
COBALT + 'BeamFormer.flysEye',
COBALT + 'BeamFormer.CoherentStokes.timeIntegrationFactor',
COBALT + 'BeamFormer.IncoherentStokes.timeIntegrationFactor',
'Observation.VirtualInstrument.stationList',
DATAPRODUCTS + 'Output_Correlated.enabled',
DATAPRODUCTS + 'Output_Correlated.identifications',
DATAPRODUCTS + 'Output_Correlated.storageClusterName',
DATAPRODUCTS + 'Output_CoherentStokes.enabled',
DATAPRODUCTS + 'Output_CoherentStokes.identifications',
DATAPRODUCTS + 'Output_CoherentStokes.storageClusterName',
COBALT + 'BeamFormer.CoherentStokes.which',
DATAPRODUCTS + 'Output_IncoherentStokes.enabled',
DATAPRODUCTS + 'Output_IncoherentStokes.identifications',
DATAPRODUCTS + 'Output_IncoherentStokes.storageClusterName',
COBALT + 'BeamFormer.IncoherentStokes.which'
)
self.asp = AntennaSetsParser()
def _calculate(self, parset, predecessor_estimates=[]):
""" Calculate the resources needed by the different data product types that can be in a single observation.
The predecessor_estimates argument is just to implement the same interface as pipelines. Observations have no predecessor.
The following return value example is for an observation duration of 240.0 s and 3 data product types for 2 clusters.
NOTE: 'nr_of_XX_files' is for that SAP estimate. The total is thus times the 'resource_count'.
'nr_of_cs_parts' is for a full CS TAB (per stokes component) in that SAP; not per estimate, which may still describe one part.
See the calibration pipeline estimator for some explanation on why parts of this format are currently needed. It also has input_files.
{
'errors': [],
'estimates': [{
'resource_types': {'bandwidth': 35791395, 'storage': 1073741824}, # for each uv output data product (thus the total is times the resource_count value)
'resource_count': 20, 'root_resource_group': 'CEP4',
'output_files': {
'uv': [{'sap_nr': 0, 'identification': 'mom.G777955.B2.1.C.SAP000.uv.dps',
'properties': {'uv_file_size': 1073741824, 'nr_of_uv_files': 1, 'start_sb_nr': 0}}]
}
}, {'resource_types': {'bandwidth': 35791395, 'storage': 1073741824}, # idem
'resource_count': 60, 'root_resource_group': 'CEP4',
'output_files': {
'uv': [{'sap_nr': 1, 'identification': 'mom.G777955.B2.1.C.SAP001.uv.dps',
'properties': {'uv_file_size': 1073741824, 'nr_of_uv_files': 1, 'start_sb_nr': 20}}]
}
}, {'resource_types': {'bandwidth': 35791395, 'storage': 1073741824}, # idem
'resource_count': 20, 'root_resource_group': 'CEP4',
'output_files': {
'uv': [{'sap_nr': 2, 'identification': 'mom.G777955.B2.1.C.SAP002.uv.dps',
'properties': {'uv_file_size': 1073741824, 'nr_of_uv_files': 1, 'start_sb_nr': 80}}]
}
}, {'resource_types': {'bandwidth': 71582789, 'storage': 2147483648}, # for each quad (4 stokes) of cs output tab part (thus the total is times the resource_count value)
'resource_count': 34, 'root_resource_group': 'DRAGNET',
'output_files': {
'cs': [{'sap_nr': 0, 'identification': 'mom.G777955.B2.1.C.SAP000.cs.dps',
'properties': {'cs_file_size': 536870912, 'nr_of_cs_files': 4, 'nr_of_cs_stokes': 4,
'nr_of_cs_parts': 2}}] # parts per tab for this sap
}
}, {'resource_types': {'bandwidth': 71582789, 'storage': 2147483648}, # idem
'resource_count': 6, 'root_resource_group': 'DRAGNET',
'output_files': {
'cs': [{'sap_nr': 1, 'identification': 'mom.G777955.B2.1.C.SAP001.cs.dps',
'properties': {'cs_file_size': 536870912, 'nr_of_cs_files': 4, 'nr_of_cs_stokes': 4,
'nr_of_cs_parts': 1, 'is_tab_nr': 0}}] # parts per tab for this sap
}
}, {'resource_types': {'bandwidth': 17895698, 'storage': 536870912}, # for each 'is' output tab part (thus the total is times the resource_count value)
'resource_count': 1, 'root_resource_group': 'DRAGNET',
'output_files': {
'is': [{'sap_nr': 1, 'identification': 'mom.G777955.B2.1.C.SAP001.is.dps',
'properties': {'is_file_size': 536870912, 'nr_of_is_files': 1, 'nr_of_is_stokes': 1,
'is_tab_nr': 0}}] # IS can have >1 parts, but currently max 1 IS TAB per SAP
}
}]
}
"""
logger.info("start estimate '{}'".format(self.name))
logger.info('parset: %s ' % parset)
# NOTE: observation estimates appear quite accurate. Most of the difference comes from Observation.stopTime
# being planned instead of real stop time, because of Cobalt block size not being exactly 1.0 s.
duration = self._getDuration(parset.getString('Observation.startTime'),
parset.getString('Observation.stopTime'))
errors = []
estimates = []
try:
if parset.getBool('Observation.DataProducts.Output_Correlated.enabled'):
estimates.extend(self.correlated(parset, duration))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
try:
if parset.getBool('Observation.DataProducts.Output_CoherentStokes.enabled'):
estimates.extend(self.coherentstokes(parset, duration))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
try:
if parset.getBool('Observation.DataProducts.Output_IncoherentStokes.enabled'):
estimates.extend(self.incoherentstokes(parset, duration))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
if not estimates:
logger.error('no data product estimates in observation resource estimate list!')
errors.append('Produced observation resource estimate list has no data product estimates!')
try:
estimates.extend(self.stations(parset))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
logger.debug('Observation resource estimates:\n' + pprint.pformat(estimates))
result = {'errors': errors, 'estimates': estimates}
return result
def correlated(self, parset, duration):
""" Estimate storage size and bandwidth needed for correlated ('uv')
data products. Also add SAP properties needed by the propagator.
The duration argument is a float in (fractional) seconds.
Return list of estimates, max 1 SAP per estimate (easier for assigner),
or raise ValueError on error.
"""
logger.info("calculating correlated data size")
storage_unit = 512 # all sizes in bytes
size_of_header = 512
size_of_overhead = 600000 # COBALT parset in MS HISTORY subtable + misc
size_of_short = 2
size_of_visib = 8 # a visibility is stored as a std::complex<float>
nr_polarizations = 2
channels_per_subband = parset.getInt(COBALT + 'Correlator.nrChannelsPerSubband', 64) # defaults as in COBALT
integration_time = parset.getFloat(COBALT + 'Correlator.integrationTime', 1)
nr_virtual_stations = self._virtual_stations(parset)
# Reflects MeasurementSets produced by the casacore LOFAR storage manager (LofarStMan)
# The sub-expression '+ val-1) / val' computes a rounded (positive) integer division.
integrated_seconds = int(duration / integration_time)
nr_baselines = nr_virtual_stations * (nr_virtual_stations + 1) / 2
data_size = (nr_baselines * channels_per_subband * nr_polarizations * nr_polarizations * \
size_of_visib + storage_unit-1) / storage_unit * storage_unit
n_sample_size = (nr_baselines * channels_per_subband * size_of_short + storage_unit-1) / \
storage_unit * storage_unit
file_size = (data_size + n_sample_size + size_of_header) * integrated_seconds + size_of_overhead # bytes
bandwidth = int(ceil(8 * file_size / duration)) # bits/second
root_resource_group = parset.getString(DATAPRODUCTS + 'Output_Correlated.storageClusterName')
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError("Correlated data output enabled, but nrBeams < 1")
# Estimates may differ per SAP for CS/IS. Decided to always produce a separate estimate per SAP.
# Hence, need to annotate each SAP with the right identifications for pipeline predecessor input filtering.
identifications = parset.getStringVector(DATAPRODUCTS + 'Output_Correlated.identifications')
sap_idents = self._sap_identifications(identifications, nr_saps)
total_files = 0 # sum of all subbands in all digital beams
estimates = []
for sap_nr in range(nr_saps):
subbandList = parset.getStringVector('Observation.Beam[%d].subbandList' % sap_nr)
nr_subbands = len(subbandList)
if nr_subbands == 0:
# Replace here by 'continue' (+ check total_files > 0 at the end) once we support separate subband lists for UV, CS, IS
raise ValueError("Correlated data output enabled, but empty subband list for sap %d" % sap_nr)
est = {'resource_types': {'bandwidth': bandwidth, 'storage': file_size},
'resource_count': nr_subbands,
'root_resource_group': root_resource_group,
'output_files': {'uv': [{'sap_nr': sap_nr, 'identification': sap_idents[sap_nr],
'properties': {'uv_file_size': file_size, 'nr_of_uv_files': 1, # thus total nr_of_uv_files is resource_count times 1
'start_sb_nr': total_files}}]}}
total_files += nr_subbands
estimates.append(est)
logger.debug("Correlated data estimates:\n" + pprint.pformat(estimates))
return estimates
def coherentstokes(self, parset, duration):
""" Estimate storage size and bandwidth needed for Coherent Stokes ('cs')
data products. Also add SAP properties needed by the propagator.
The duration argument is a float in (fractional) seconds.
Return list of estimates, max 1 SAP per estimate (easier for assigner),
or raise ValueError on error.
"""
logger.info("calculate coherent stokes data size")
size_of_sample = 4 # single precision float
coherent_type = parset.getString(COBALT + 'BeamFormer.CoherentStokes.which')
subbands_per_file = parset.getInt(COBALT + 'BeamFormer.CoherentStokes.subbandsPerFile', 512)
if subbands_per_file < 0:
raise ValueError('BeamFormer.CoherentStokes.subbandsPerFile may not be negative, but is %d' % subbands_per_file)
if subbands_per_file == 0:
subbands_per_file = 512
samples_per_second = self._samples_per_second(parset)
time_integration_factor = parset.getInt(COBALT + 'BeamFormer.CoherentStokes.timeIntegrationFactor')
# Note that complex voltages (XXYY) cannot be meaningfully integrated (time_integration_factor 1)
size_per_subband = (samples_per_second * size_of_sample * duration) / time_integration_factor
nr_coherent = len(coherent_type) # 'I' or 'IQUV' or 'XXYY'
doFlysEye = parset.getBool(COBALT + 'BeamFormer.flysEye')
root_resource_group = parset.getString(DATAPRODUCTS + 'Output_CoherentStokes.storageClusterName')
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError("Coherent Stokes data output enabled, but nrBeams < 1")
# Estimates may differ per SAP for CS/IS. Decided to always produce a separate estimate per SAP.
# Hence, need to annotate each SAP with the right identifications for pipeline predecessor input filtering.
identifications = parset.getStringVector(DATAPRODUCTS + 'Output_CoherentStokes.identifications')
sap_idents = self._sap_identifications(identifications, nr_saps)
estimates = []
for sap_nr in range(nr_saps):
logger.info("checking SAP {}".format(sap_nr))
subbandList = parset.getStringVector('Observation.Beam[%d].subbandList' % sap_nr)
nr_subbands = len(subbandList)
if nr_subbands == 0:
raise ValueError("Coherent Stokes data output enabled, but empty subband list for sap %d" % sap_nr)
nr_subbands_per_file = min(subbands_per_file, nr_subbands)
nr_coherent_tabs = 0
is_tab_nr = None
nr_tabs = parset.getInt('Observation.Beam[%d].nrTiedArrayBeams' % sap_nr)
for tab_nr in range(nr_tabs):
if not parset.getBool("Observation.Beam[%d].TiedArrayBeam[%d].coherent" % (sap_nr, tab_nr)):
is_tab_nr = tab_nr
logger.info("coherentstokes: skipping incoherent tab")
continue
nr_coherent_tabs += 1
logger.info("added %d coherent tabs before considering tab rings and fly's eye tabs", nr_coherent_tabs)
nr_tab_rings = parset.getInt('Observation.Beam[%d].nrTabRings' % sap_nr)
if nr_tab_rings < 0:
raise ValueError("SAP %d: nr of tab rings is < 0: %d" % (sap_nr, nr_tab_rings))
elif nr_tab_rings > 0:
nr_tabs = (3 * nr_tab_rings * (nr_tab_rings + 1) + 1)
nr_coherent_tabs += nr_tabs
logger.info("added %d tabs from %d tab rings", nr_tabs, nr_tab_rings)
if doFlysEye:
nr_tabs = self._virtual_stations(parset)
nr_coherent_tabs += nr_tabs
logger.info("added %d fly's eye tabs", nr_tabs)
if nr_coherent_tabs == 0:
raise ValueError("Coherent Stokes data output enabled, but no coherent tabs for sap %d" % sap_nr)
# Keep XXYY/IQUV together (>1 parts still possible).
# Else translator to parset filenames cannot know which stokes (nr_of_XX_stokes property too coarse).
# Also for complex voltages (XXYY) only: pipeline needs all 4 XXYY accessible from the same node.
#
# NOTE: If a TAB is split into parts, then the last TAB part may contain fewer subbands.
# Simplify: compute a single (max) file size for all TABs or TAB parts.
file_size = int(nr_subbands_per_file * size_per_subband) # bytes
storage = file_size * nr_coherent # bytes
bandwidth = int(ceil(8 * storage / duration)) # bits/second
nr_parts_per_tab = int(ceil(nr_subbands / float(nr_subbands_per_file))) # thus per tab per stokes
est = {'resource_types': {'storage': storage, 'bandwidth': bandwidth},
'resource_count': nr_coherent_tabs * nr_parts_per_tab,
'root_resource_group': root_resource_group,
'output_files': {'cs': [{'sap_nr': sap_nr, 'identification': sap_idents[sap_nr],
'properties': {'cs_file_size': file_size, 'nr_of_cs_files': nr_coherent,
'nr_of_cs_stokes': nr_coherent, 'nr_of_cs_parts': nr_parts_per_tab}}]}}
if is_tab_nr is not None: # translator to filenames needs to know: it may not have all CS+IS info in one claim
est['output_files']['cs'][0]['properties']['is_tab_nr'] = is_tab_nr
estimates.append(est)
logger.debug("Coherent Stokes data estimates:\n" + pprint.pformat(estimates))
return estimates
def incoherentstokes(self, parset, duration):
""" Estimate storage size and bandwidth needed for Incoherent Stokes ('is')
data products. Also add SAP properties needed by the propagator.
The duration argument is a float in (fractional) seconds.
Return list of estimates, max 1 SAP per estimate (easier for assigner),
or raise ValueError on error.
"""
logger.info("calculate incoherent stokes data size")
size_of_sample = 4 # single precision float
incoherent_type = parset.getString(COBALT + 'BeamFormer.IncoherentStokes.which')
subbands_per_file = parset.getInt(COBALT + 'BeamFormer.IncoherentStokes.subbandsPerFile', 512)
if subbands_per_file < 0:
raise ValueError('BeamFormer.IncoherentStokes.subbandsPerFile may not be negative, but is %d' % subbands_per_file)
if subbands_per_file == 0:
subbands_per_file = 512
samples_per_second = self._samples_per_second(parset)
time_integration_factor = parset.getInt(COBALT + 'BeamFormer.IncoherentStokes.timeIntegrationFactor')
size_per_subband = (samples_per_second * size_of_sample * duration) / time_integration_factor
nr_incoherent = len(incoherent_type) # 'I' or 'IQUV' ('XXYY' only possible for coherent stokes)
root_resource_group = parset.getString(DATAPRODUCTS + 'Output_IncoherentStokes.storageClusterName')
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError("Incoherent Stokes data output enabled, but nrBeams < 1")
# Estimates may differ per SAP for CS/IS. Decided to always produce a separate estimate per SAP.
# Hence, need to annotate each SAP with the right identifications for pipeline predecessor input filtering.
identifications = parset.getStringVector(DATAPRODUCTS + 'Output_IncoherentStokes.identifications')
sap_idents = self._sap_identifications(identifications, nr_saps)
estimates = []
for sap_nr in range(nr_saps):
logger.info("checking SAP {}".format(sap_nr))
subbandList = parset.getStringVector('Observation.Beam[%d].subbandList' % sap_nr)
nr_subbands = len(subbandList)
if nr_subbands == 0:
raise ValueError("Incoherent Stokes data output enabled, but empty subband list for sap %d" % sap_nr)
nr_subbands_per_file = min(subbands_per_file, nr_subbands)
# Atm can have 1 IS TAB per SAP, because its pointing is equal to the SAP pointing.
# (When we support online coh dedisp and on multiple DMs, we can have >1 IS per SAP.)
nr_incoherent_tabs = 0
nr_tabs = parset.getInt('Observation.Beam[%d].nrTiedArrayBeams' % sap_nr)
for tab_nr in range(nr_tabs):
if parset.getBool("Observation.Beam[%d].TiedArrayBeam[%d].coherent" % (sap_nr, tab_nr)):
continue
if nr_incoherent_tabs > 0:
# Could get here to produce >1 IS TAB copies, maybe for some software test
raise ValueError("SAP %i: >1 incoherent TAB not supported: TAB nrs %i and %i" % (sap_nr, tab_nr, is_tab_nr))
is_tab_nr = tab_nr
nr_incoherent_tabs += 1
logger.info("added %d incoherent tab(s)", nr_incoherent_tabs)
if nr_incoherent_tabs == 0:
raise ValueError("Incoherent Stokes data output enabled, but no incoherent tabs for sap %d" % sap_nr)
# Keep IQUV together (>1 parts still possible).
# Else translator to parset filenames cannot know which stokes (nr_of_XX_stokes property too coarse).
#
# NOTE: If a TAB is split into parts, then the last TAB part may contain fewer subbands.
# Simplify: compute a single (max) file size for all TABs or TAB parts.
file_size = int(nr_subbands_per_file * size_per_subband) # bytes
storage = file_size * nr_incoherent # bytes
bandwidth = int(ceil(8 * storage / duration)) # bits/second
nr_parts_per_tab = int(ceil(nr_subbands / float(nr_subbands_per_file))) # thus per tab per stokes
est = {'resource_types': {'storage': storage, 'bandwidth': bandwidth},
'resource_count': nr_incoherent_tabs * nr_parts_per_tab,
'root_resource_group': root_resource_group,
'output_files': {'is': [{'sap_nr': sap_nr, 'identification': sap_idents[sap_nr],
'properties': {'is_file_size': file_size, 'nr_of_is_files': nr_incoherent,
'nr_of_is_stokes': nr_incoherent, 'is_tab_nr': is_tab_nr}}]}}
estimates.append(est)
logger.debug("Incoherent Stokes data estimates:\n" + pprint.pformat(estimates))
return estimates
def _samples_per_second(self, parset):
""" set samples per second
"""
samples_160mhz = 155648
samples_200mhz = 196608
sample_clock = parset.getInt('Observation.sampleClock')
samples = samples_160mhz if 160 == sample_clock else samples_200mhz
logger.info("samples per second for {} MHz clock = {}".format(sample_clock, samples))
return samples
def _virtual_stations(self, parset):
""" calculate virtualnumber of stations
"""
stationList = parset.getStringVector('Observation.VirtualInstrument.stationList')
nr_virtual_stations = 0
if parset.getString('Observation.antennaSet') in ('HBA_DUAL', 'HBA_DUAL_INNER'):
for station in stationList:
if 'CS' in station:
nr_virtual_stations += 2
else:
nr_virtual_stations += 1
else:
nr_virtual_stations = len(stationList)
logger.info("number of virtual stations = {}".format(nr_virtual_stations))
return nr_virtual_stations
def _extract_sap_nr(self, identification):
""" Return sap nr as int from identification or None if
no int xxx in '.SAPxxx.' in identification.
"""
for s in identification.split('.'): # Find the SAP number, if present
if 'SAP' not in s:
continue
try:
return int(s[3:])
except:
pass
return None
def _sap_identifications(self, identifications, nr_saps):
""" Return list with identifications' identification for sap i at index i,
or '' at index i if no such identification for sap i.
NOTE: identifications should not contain entries for multiple data product types,
otherwise we cannot return a single identification per sap nr.
For output, there must be exactly 1 (non-duplicate) identification string per
data product type (how can you otherwise refer to it unambiguously?),
and per sap (per sap for observations only, but always the case here).
"""
sap_idents = [''] * nr_saps
for ident in identifications:
sap_nr = self._extract_sap_nr(ident)
try:
ident_seen = sap_idents[sap_nr]
except Exception as e: # e.g. sap_nr is None or out of bounds
logger.error("Ignoring observation identification string with no or invalid sap nr: %s", str(e))
continue
if not ident_seen:
sap_idents[sap_nr] = ident
elif ident_seen != ident:
logger.error("Cannot have multiple observation identifications per sap. Dropping %s", ident) # see doc string
return sap_idents
def stations(self, parset):
""" Estimate required RSPs and RCUs per station.
One or two RSP boards are returned per station depending on antennaset.
RCUs are encoded as a bitfield, to be able to tell which RCUs are actually neeeded.
Return list of estimates, or raise ValueError on error.
"""
estimates = []
antennaset = parset.getString('Observation.antennaSet')
stationset = parset.getStringVector('Observation.VirtualInstrument.stationList')
if not stationset:
raise ValueError("Observation.VirtualInstrument.stationList is empty")
rculists = self.asp.get_receiver_units_configuration_per_station(antennaset, stationset)
for station in stationset:
bitfield, count = self._rculist_to_bitfield(rculists[station])
rsps, channelbits = self._required_rsps(station, antennaset, parset)
est = {'resource_types': {'rcu': bitfield},
'resource_count': 1,
'station': station,
'root_resource_group': station}
estimates.append(est)
for rsp in rsps:
root_resource_group = station+rsp
est = {'resource_types': {},
'resource_count': 1,
'station': station,
'root_resource_group': root_resource_group}
est['resource_types']['bandwidth'] = 3000000000
est['resource_types']['rsp'] = channelbits
estimates.append(est)
return estimates
def _rculist_to_bitfield(self, rculist):
"""
Takes list of rcus as returned by Antennasets_parser ['LBL', 'LBH', None, ...] and encodes them as a bitfield.
Each bit represents one rcu, value is 1 if rcu is not None in input list (= is used), 0 otherwise.
Returns String representation of the bitfield and the number of used rcus.
"""
bitfield = ""
count = 0
for rcu in rculist:
if rcu is None:
bitfield = bitfield+"0"
else:
bitfield = bitfield+"1"
count = count + 1
return bitfield, count
def _required_rsps(self, station, antennaset, parset):
"""
Takes station name and list of antennafields.
Returns list with one or both required rsps and number of channelbits,
or raises ValueError on error.
"""
if station.startswith('CS'):
required_rsps = ['RSP0'] # default
if antennaset == 'HBA_ONE':
required_rsps = ['RSP1']
if antennaset in ['HBA_DUAL', 'HBA_DUAL_INNER']:
required_rsps = ['RSP0', 'RSP1']
else:
required_rsps = ['RSP'] # default for non-core stations
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError('Observation.nrBeams must be at least 1, but is %d' % nr_saps)
subBandList = []
for nr in range(nr_saps):
key = 'Observation.Beam['+str(nr)+'].subbandList'
sblist = parset.getStringVector(key)
if not sblist:
raise ValueError("%s is empty" % key)
subBandList.extend(sblist)
nrSubbands = len(subBandList)
nrBitsPerSample = parset.getInt('Observation.nrBitsPerSample')
if nrBitsPerSample != 16 and nrBitsPerSample != 8 and nrBitsPerSample != 4:
raise ValueError('Observation.nrBitsPerSample must be 16, 8, or 4, but is %d' % nrBitsPerSample)
channelbits = nrSubbands * nrBitsPerSample
return required_rsps, channelbits
| gpl-3.0 | 3,918,938,717,688,254,500 | 53.407871 | 182 | 0.597718 | false |
pnomolos/greatbigcrane | greatbigcrane/project/migrations/0007_auto__add_field_project_project_type__chg_field_project_description.py | 1 | 2050 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.project_type'
db.add_column('project_project', 'project_type', self.gf('django.db.models.fields.CharField')(default='buildout', max_length=9), keep_default=False)
# Changing field 'Project.description'
db.alter_column('project_project', 'description', self.gf('django.db.models.fields.TextField')(blank=True))
def backwards(self, orm):
# Deleting field 'Project.project_type'
db.delete_column('project_project', 'project_type')
# Changing field 'Project.description'
db.alter_column('project_project', 'description', self.gf('django.db.models.fields.TextField')())
models = {
'project.project': {
'Meta': {'object_name': 'Project'},
'base_directory': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'favourite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'git_repo': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'test_status': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['project']
| apache-2.0 | 401,360,477,161,549,800 | 46.674419 | 156 | 0.596585 | false |
jaaimino/dogebook | app.py | 1 | 9782 | from flask import Flask, session, redirect, url_for, escape, request, render_template
from flask.ext.mongoengine import MongoEngine
from pbkdf2 import crypt
from models import *
import logging, datetime, math, os
#SSL for future security maybe?
'''
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain('yourserver.crt', 'yourserver.key')
'''
app = Flask(__name__)
app.debug = False
app.config['MONGODB_SETTINGS'] = {
'db': 'dogebook',
'host': '127.0.0.1',
'port': 27017
}
db = MongoEngine(app)
app.logger.setLevel(logging.INFO) # use the native logger of flask
# secret app key. keep this really secret:
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
#Index route
@app.route('/')
def index():
if 'userid' in session:
return redirect('/posts/0')
return render_template("index.html", data={})
#New post route
@app.route('/newpost', methods=['POST'])
def new_post():
if 'userid' in session and request.method == 'POST':
user = User.objects(id=session['userid']).first()
postContent = request.form['inputText']
post = Post(content=postContent, author=user).save()
return redirect(url_for('index'))
#Add friend by userid route
@app.route('/add_friend/<userid>')
def add_friend(userid=None):
if 'userid' in session:
user = User.objects(id=session['userid']).first()
#print user.friends.__class__.__name__
friend = User.objects(id=userid).first()
if friend not in user.friends:
user.friends.append(friend)
user.save()
return redirect('/profile/'+session["userid"])
return redirect('/profile/'+session["userid"])
#Remove friend by userid route
@app.route('/remove_friend/<userid>')
def remove_friend(userid=None):
if 'userid' in session:
user = User.objects(id=session['userid']).first()
#print user.friends.__class__.__name__
friend = User.objects(id=userid).first()
if friend in user.friends:
user.friends.remove(friend)
user.save()
return redirect(url_for('index'))
return redirect(url_for('index'))
#Friend search route
@app.route('/find_friends', methods=['GET', 'POST'])
def find_friends():
if 'userid' in session:
user = User.objects(id=session['userid']).first()
results = []
if request.method == 'POST':
somename = request.form['inputName']
results = User.objects(name__contains=somename)
return render_template("find_friends.html", data={"user":user, "results":results, "nresults":len(results)})
return redirect(url_for('index'))
#Get a page of posts for your current user
@app.route('/posts/<page>')
def posts_page(page=0):
if 'userid' in session:
page = int(page)
posts_per_page = 10
user = User.objects(id=session['userid']).first()
#print user.friends
#User.objects.get(id='55a51d434c149d1f60daec89') #lookup by id example
#print "Wat?"
current_post = page * posts_per_page
posts_full = Post.objects(db.Q(author__in = user.friends) | db.Q(author = user)).order_by('-datetime')
page_count = int(math.ceil(posts_full.count()/10))
page_count = min(page_count,10)
posts = posts_full.skip(current_post).limit(10)
comment_counts = []
for post in posts:
comment_counts.append(len(post.comments))
next_page = page+1
if next_page > page_count:
next_page = page_count
prev_page = page-1
if prev_page < 0:
prev_page = 0
#print posts
return render_template("feed.html", data={"prev_page": prev_page, "currpage":page, "next_page":next_page, \
"page_count":page_count, "user":user, "posts":posts, "comment_counts":comment_counts})
return redirect(url_for('index'))
#Get a single post by id (And view comments)
@app.route('/post/<postid>')
def post_id(postid=None):
if 'userid' in session:
post = Post.objects(id=postid).first()
user = User.objects(id=session['userid']).first()
comments = post.comments
comments = sorted(comments, key=lambda r: r.datetime, reverse=True)[:15]
return render_template("single_post.html", data={"user":user, "post":post, "comments":comments})
return redirect(url_for('index'))
#Delete a user by id
@app.route('/delete_user/<userid>')
def delete_user_id(userid=None):
if 'userid' in session: #My userid
user = User.objects(id=session['userid']).first()
if user.username == "[email protected]":
user = User.objects(id=session['userid']).first()
targetUser = User.objects(id=userid).first()
posts = Post.objects(author=targetUser)
posts.delete()
targetUser.delete()
return redirect(url_for('index'))
return redirect(url_for('index'))
#Delete a post by id
@app.route('/post/<postid>/delete')
def delete_post_id(postid=None):
if 'userid' in session:
user = User.objects(id=session['userid']).first()
post = Post.objects(id=postid).first()
if(post.author == user): #Actually delete the post here
post.delete()
return redirect(url_for('index')) #Ultimately redirect
return redirect(url_for('index'))
#Add comment to post by id
@app.route('/post/<postid>/add_comment', methods=['POST'])
def comment_post_id(postid=None):
if request.method == 'POST':
if 'userid' in session:
user = User.objects(id=session['userid']).first()
post = Post.objects(id=postid).first()
if user in post.author.friends or post.author == user: #Actually add comment here
print "Adding comment"
comment = Comment(content=request.form['inputText'],author=user).save()
post.comments.append(comment)
post.save()
return redirect('/post/'+str(post.id)) #Ultimately redirect
return redirect(url_for('index'))
#Log in to the app
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
someusername = request.form['inputEmail']
alleged_password = request.form['inputPassword']
user = User.objects(username=someusername).first()
if user != None and user.password == crypt(alleged_password, user.password):
session['userid'] = str(user.id)
return redirect(url_for('index'))
return render_template('login.html', data={"message":"Wrong email or password"})
else:
if 'userid' in session:
return render_template('error.html', data={"error":"You're already logged in..."})
else:
return render_template('login.html', data={})
#Create an account
@app.route('/create_account', methods=['GET', 'POST'])
def create_account():
if request.method == 'POST':
name = request.form['inputName']
someemail = request.form['inputEmail']
pwhash = crypt(request.form['inputPassword'])
count = User.objects(username=someemail).count()
if(count == 0):
user = User(username=someemail, password=pwhash, name=name).save()
session['userid'] = str(user.id)
return redirect(url_for('index'))
else:
return render_template('create_account.html', data={"message":"Sorry, that email is already taken."})
else:
if 'userid' in session:
return render_template('error.html', data={"error":"You're already logged in. Please log out to create a new account."})
else:
return render_template('create_account.html')
#Log out of app
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('userid', None)
return redirect(url_for('index'))
#Redirect for supporting cool url scheme with convenience wrapped
@app.route('/profile')
def profile():
if 'userid' in session:
return redirect('/profile/'+session['userid'])
return redirect(url_for('index'))
#Go to profile by id
@app.route('/profile/<profileid>')
def profile_id(profileid=None):
if 'userid' in session:
user = User.objects(id=profileid).first()
currentuser = User.objects(id=session["userid"]).first()
userid = str(user.id)
return render_template("profile.html", data={"user":user, "friends":user.friends, "currentuser":currentuser, "userid":userid})
return redirect(url_for('index'))
#Edit profile by id. Only your own :)
@app.route('/profile/<profileid>/edit', methods=['GET', 'POST'])
def edit_profile_id(profileid=None):
if 'userid' in session:
if request.method == 'POST':
if session['userid'] == profileid:
user = User.objects(id=session['userid']).first()
user.update(name=request.form['inputName'], tagline=request.form['inputTagline'],city=request.form['inputCity'],state=request.form['inputState'],bio=request.form['inputBio'])
return redirect('/profile/'+profileid)
else:
print "Hackerrzzz"
else:
user = User.objects(id=session['userid']).first()
return render_template("edit_profile.html", data={"user":user})
else:
return redirect(url_for('index'))
#Handle some errors
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
#Handle some more errors
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 404
#Start the app
if (__name__ == '__main__'):
app.run(host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 8080)))
#app.run(host="127.0.0.1", port=8080, ssl_context=context) #No way to do ssl yet | apache-2.0 | -2,140,761,096,780,513,500 | 37.821429 | 190 | 0.627275 | false |
tmorrell/Molframe | inputs/selector.py | 1 | 14370 | import tkSimpleDialog
import tkMessageBox
#import p3d.protein
#import p3d.geo
from pymol.wizard import Wizard
from pymol import cmd, util
from pymol.controlling import mode_dict
class Bond(object):
def __init__(self,bond1,bond2,resid1,resid2):
if bond2 > bond1:
self.bond1=bond1
self.bond2=bond2
self.resid1=resid1
self.resid2=resid2
else:
self.bond1=bond2
self.bond2=bond1
self.resid1=resid2
self.resid2=resid1
self.indexes=[self.bond1,self.bond2]
class selector(Wizard):
def __init__(self,name,chain,resid,resid2,_self=cmd):
Wizard.__init__(self,_self)
self.resid = resid
self.resid2 = resid2
self.name = name
self.chain = chain
self.extend = 1
self.bonds=[]
self.resids=[]
self.indexes=[]
self.load=None
self.lead=0
def get_panel(self):
label = 'No Mutation'
return [
[ 1, 'Select Rotatable Bonds',''],
[ 1, 'for Residue '+ self.resid ,''],
[ 2, 'Pick Bond' , 'cmd.get_wizard().apply()'],
[ 2, 'Rotate View' , 'cmd.get_wizard().rotate()'],
[ 2, 'Show More Bonds' , 'cmd.get_wizard().show()'],
[ 2, 'Pick Rotatable Section' , 'cmd.get_wizard().srot()'],
[ 2, 'Write Bonds' , 'cmd.get_wizard().set_bonds()'],
[ 2, 'Reset Selected Bonds' , 'cmd.get_wizard().reset()'],
[ 2, 'Finished' , 'cmd.get_wizard().clear()'],
]
def srot(self):
cmd.deselect()
#self.pk2_st=None
self.load=1
self.get_prompt()
print "Testing", self.lead
cmd.config_mouse('three_button_editing')
def show(self):
left = str(int(self.resid)-self.extend)
right = str(int(self.resid)+self.extend)
cmd.show('lines','resid '+left+':'+right)
cmd.zoom('resid '+left+':'+right)
self.extend = self.extend+1
#def isbonded(self,bond0,bond1,stems):
# nextres = 0
# for stem in stems:
# if bond0==stem:
# nextres=bond1
# if bond1==stem:
# nextres=bond0
# return nextres
def get_bonds(self,stems,allbonds,rot_bonds=[]):
nextbonds = []
for stem in stems:
print "STEM", stem
for bond in allbonds:
#print bond.index
if stem in bond.index: #save next bond
print bond.index,"matched bond"
for n in bond.index:
if n != stem: #find next atom
if n not in rot_bonds: #if atom is new:
nextbonds.append(n)
#return indexes connected to stem
return nextbonds
def is_in_bonds(self,stem,bonds):
yes = 0
for bond in bonds:
if stem in bond.indexes:
yes = 1
return yes
def is_in_multiple_bonds(self,stem,bonds):
count = 0
for bond in bonds:
if stem in bond.indexes:
count = count + 1
if count == 2:
return True
else:
return False
#def reset_bond(self,known,bonds): #reset bond, if repeated index save repeat
# ret = []
# print "reset_bond"
# print known, "known"
# for rbon in bonds: #for each rot bond
# if known[0] in rbon.indexes:
# if known[1] not in rbon.indexes:
# ret = [known[1]]
# if known[1] in rbon.indexes:
# if known[0] not in rbon.indexes:
# ret = [known[0]]
# return ret
def set_bonds(self):
startingbond=[]
rangev = []
if self.lead==0:
print "Must select rotatable section first"
elif len(self.bonds)==0:
print "Must select at least one rotatable bonds"
else:
mres = min(self.resids)
xres = max(self.resids)
model = cmd.get_model('all') #('resid '+str(self.resid)+':'+str(self.resid2))
allbonds = model.bond
'''
Removed efficiency code to test end residue labeling - will be slow
if mres != xres: #multires case
mind = min(self.indexes)
xind = max(self.indexes)
irange = [mind,xind] #range of indexes we care about for bonding pattern
if self.lead < mind:
irange = [self.lead,xind]
if self.lead > xind:
irange = [mind,self.lead]
limitedset = []
we want to limit allbonds to a limited index range
for efficiency-may be problem if indexes are really screwed up
for b in allbonds:
if b.index[0] in range(irange[0],irange[1]) or \
b.index[1] in range(irange[0],irange[1]):
limitedset.append(b)
allbonds = limitedset
'''
#Remove dummy atom-for bonding only, will still be rotated
dummy = 'ZZ'
reduced = []
for b in allbonds:
d = False
if self.get_atom(b.index[0])[2] == dummy or self.get_atom(b.index[1])[2] == dummy:
d = True
if d == False:
reduced.append(b)
print self.get_atom(b.index[0]),self.get_atom(b.index[1])
#print "DONE"
allbonds = reduced
#start from rotatable selection point and find what atoms are always rotatable
rot_bonds = [self.lead]
print rot_bonds,"LEAD"
print self.bonds
for b in allbonds:
print b.index
stems = self.get_bonds(rot_bonds,allbonds,rot_bonds)
nextstep=[]
while len(stems) != 0: #while a bond remains
next_stem = set() #Internal
for s in stems: #check if at rotation
if self.is_in_bonds(s,self.bonds):
if len(nextstep) == 0:
nextstep.append(s) #don't move beyond rotation
rot_bonds.append(s)
next_stem.add(s)
#No else - We discard any other rotatable bonds - deal with later
else:
rot_bonds.append(s)
next_stem.add(s)
stems = self.get_bonds(next_stem,allbonds,rot_bonds)
outstring = "!Rotation of dye\n"
lenv = len(self.bonds)
outstring = outstring + '!NROT '+str(lenv)+'\n'
outstring = outstring + 'cons fix sele dbackbone .or. .not. '+\
'(resid @res .and. segid @chain) end\n\n'
#now we look along rest of chain
botbonds = []
count = 0
excluded = rot_bonds #We don't want to select rotatable bonds
stems = self.get_bonds(nextstep,allbonds,excluded)
bond=nextstep #This is a rotatable object
while len(stems) != 0:
excluded=excluded+stems#don't go to a stem two times
for stem in stems:
if self.is_in_bonds(stem,self.bonds): #only care about bonds
if len(bond)==0: #we have a new end of a bond
bond.append(stem)
elif stem != bond[0]:#We have second half of new bond
new_bond = stem
bond.append(new_bond)
count = count + 1
#We need to tease out other rotatable atoms from those in stems
for stem in stems:
if self.is_in_bonds(stem,self.bonds) == False:
#Just looking at other stems-none of these
# have rotatable elements
botbonds = botbonds+[stem]
nexts = list(set(self.get_bonds([stem],allbonds,excluded)))
while len(nexts) != 0:
botbonds = botbonds+nexts
excluded = excluded+nexts #don't go to stem two times
nexts = list(set(self.get_bonds(nexts,allbonds,excluded)))
#Now write output for rotation
outstring = outstring + 'label loop'+str(count)+'\n'
outstring = outstring + self.rotate_axis(bond[0],bond[1])
outstring = outstring + self.rotate_sel(120,botbonds)
outstring = outstring + 'incr '+str(count)+' by '+str(count)+'\n'
outstring = outstring + 'goto mini \n \n'
#We check if the new_bond atom is shared
#The old atom is discarded because we don't go backwards
if self.is_in_multiple_bonds(new_bond,self.bonds):
bond = [new_bond]
else:
bond = []
botbonds=botbonds+stems
stems = list(set(self.get_bonds(stems,allbonds,excluded)))
outfile = open('../../inputs/'+self.name+'_rot.str','w')
outfile.write(outstring)
#write .str file
stream = '!The atoms that are the end of the dye\n'
stream = stream + "define dyefix sele .NOT. ( "
for bindex in botbonds:
atom = self.get_atom(bindex)
stream = stream + " chain "+atom[0]+" .and. resi " + atom[1]+" .and. name "+atom[2]+ " .OR. "
stream = stream + ' ) end\n'
outfile = open('../../inputs/'+self.name+'.str','w')
outfile.write(stream)
print "All files written for ",self.name
def get_atom(self,index):
cmd.select("_p","index "+str(index+1))#convert from internal back to
#label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
return [str(self.pk_ac),str(self.pk_ar),str(self.pk_at)]
def rotate_axis(self,index1,index2):#print axis output
atom1=self.get_atom(index1)
atom2=self.get_atom(index2)
return "coor axis sele atom "+atom1[0]+' '+atom1[1]+' '+atom1[2]+\
" end sele atom "+atom2[0]+' '+atom2[1]+' '+atom2[2]+" end \n"
def rotate_sel(self,angle,flexbonds):#print selection output
outstring = 'coor rota axis PHI '+str(angle)+' sele dyefix '
atoms = []
print "rotate_sel", flexbonds
for index in flexbonds:
cmd.select("_p","index "+str(index+1))#convert from internal back
#to label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
atoms.append([str(self.pk_at),str(self.pk_ac),str(self.pk_ar)])
for atom in atoms: #set(atoms): #ensure every atom is only included once
outstring = outstring + ' .or. '
outstring = outstring+'atom '+atom[1]+' '+atom[2]+' '+atom[0]
return outstring+' end \n'
def do_select(self,selection):
cmd.deselect()
def rotate(self):
mode_dict['three_button_viewing'] = [ ('l','none','rota')]
cmd.config_mouse('three_button_viewing')
def reset(self):
#cmd.color("atomic")
#cmd.set_bond("line_color","atomic","all")
#util.cbag("all")
self.bonds=[]
cmd.set_bond("line_color","green","all")
def apply(self):
mode_dict['three_button_viewing'] = [ ('l','none','PkTB')]
cmd.config_mouse('three_button_viewing')
print "Apply"
def clear(self):
cmd.quit()
def get_prompt(self):
if self.load!=None:
return ["Please pick the atom in the direction of the section you want to rotate"]
if self.pk2_st!=None:
return ["You picked the bond between %s and %s"%(self.pk1_st, self.pk2_st)]
else:
return ["Please pick an atom or a bond..."]
def do_pick(self,picked_bond):
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
print "Picking Loop"
if picked_bond:
cmd.iterate("pk2","setattr(cmd.get_wizard(),'pk2_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
cmd.set_bond("line_color","orange","pk1","pk2")
print [self.pk1_st,self.pk2_st],'bond'
self.resids.append(int(self.pk1_st.split('/')[3])-1)
self.resids.append(int(self.pk2_st.split('/')[3])-1)
self.indexes.append(int(self.pk1_st.split('/')[5])-1)
self.indexes.append(int(self.pk2_st.split('/')[5])-1)
self.bonds.append(Bond(int(self.pk1_st.split('/')[5])-1,int(self.pk2_st.split('/')[5])-1,int(self.pk1_st.split('/')[3])-1,int(self.pk2_st.split('/')[3])-1))
# -1 converts to 0 start index, which is used for bonds - This will be one off from labels in pymol
cmd.unpick()
else:
# for single atom, also get 3D coordinates (EXAMPLE)
print "Single Atom"
self.load=None
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_r',""index)")
self.lead=self.pk1_r-1 #Converting to 0 start index, which is used for bonds
#This will be one off from labels in pymol
cmd.iterate_state(cmd.get_state(),"pk1","setattr(cmd.get_wizard(),'pk1_xyz',(x,y,z))")
#cmd.unpick()
cmd.refresh_wizard()
| gpl-2.0 | -8,294,040,477,171,383,000 | 40.412104 | 168 | 0.499304 | false |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/__init__.py | 1 | 3998 | # Copyright (C) 2011 Statoil ASA, Norway.
#
# The file '__init__.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
ert - Ensemble Reservoir Tool - a package for reservoir modeling.
The ert package itself has no code, but contains several subpackages:
ert.ecl: Package for working with ECLIPSE files. The far most mature
package in ert.
ert.job_queue:
ert.util:
The ert package is based on wrapping the libriaries from the ERT C
code with ctypes; an essential part of ctypes approach is to load the
shared libraries with the ctypes.CDLL() function. The ctypes.CDLL()
function uses the standard methods of the operating system,
i.e. standard locations configured with ld.so.conf and the environment
variable LD_LIBRARY_PATH.
To avoid conflict with other application using the ert libraries the
Python code should be able to locate the shared libraries without
(necessarily) using the LD_LIBRARY_PATH variable. The default
behaviour is to try to load from the library ../../lib64, but by using
the enviornment variable ERT_LIBRARY_PATH you can alter how ert looks
for shared libraries. This module will set the ert_lib_path of the
ert.cwrap.clib module; the actual loading will take place in that
module.
1. By default the code will try to load the shared libraries from
'../../lib64' relative to the location of this file.
2. Depending on the value of ERT_LIBRARY_PATH two different
behaviours can be imposed:
Existing path: the package will look in the path pointed to
by ERT_LIBRARY_PATH for shared libraries.
Arbitrary value: the package will use standard load order for
the operating system.
If the fixed path, given by the default ../../lib64 or ERT_LIBRARY_PATH
alternative fails, the loader will try the default load behaviour
before giving up completely.
"""
import os.path
import cwrap.clib
import sys
import warnings
try:
import ert_site_init
except ImportError:
pass
required_version_hex = 0x02060000
# 1. Start by initialing the ert_lib_path variable to None
ert_lib_path = None
# 2. Try to load the __ert_lib_path module; this module has been
# configured by cmake during the build configuration process. The
# module should contain the variable lib_path pointing to the
# directory with shared object files.
try:
import __ert_lib_path
ert_lib_path = __ert_lib_path.lib_path
except ImportError:
pass
# 3. Using the environment variable ERT_LIBRARY_PATH it is possible to
# override the default algorithms. If the ERT_LIBRARY_PATH is set
# to a non existing directory a warning will go to stderr and the
# setting will be ignored.
env_lib_path = os.getenv("ERT_LIBRARY_PATH")
if env_lib_path:
if os.path.isdir( env_lib_path ):
ert_lib_path = os.getenv("ERT_LIBRARY_PATH")
else:
sys.stderr.write("Warning: Environment variable ERT_LIBRARY_PATH points to nonexisting directory:%s - ignored" % env_lib_path)
# Check that the final ert_lib_path setting corresponds to an existing
# directory.
if ert_lib_path:
if not os.path.exists( ert_lib_path ):
ert_lib_path = None
# Set the module variable ert_lib_path of the ert.cwrap.clib module;
# this is where the actual loading will be performed.
cwrap.clib.ert_lib_path = ert_lib_path
if sys.hexversion < required_version_hex:
raise Exception("ERT Python requires at least version 2.6 of Python")
| gpl-3.0 | -8,916,155,810,471,547,000 | 33.17094 | 134 | 0.731616 | false |
pgmillon/ansible | lib/ansible/modules/cloud/docker/docker_secret.py | 1 | 9011 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_secret
short_description: Manage docker secrets.
version_added: "2.4"
description:
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
- Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
unless the C(force) option is set.
- Updates to secrets are performed by removing the secret and creating it again.
options:
data:
description:
- The value of the secret. Required when state is C(present).
type: str
data_is_b64:
description:
- If set to C(true), the data is assumed to be Base64 encoded and will be
decoded before being used.
- To use binary C(data), it is better to keep it Base64 encoded and let it
be decoded by this option.
type: bool
default: no
version_added: "2.8"
labels:
description:
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
type: dict
force:
description:
- Use with state C(present) to always remove and recreate an existing secret.
- If I(true), an existing secret will be replaced, even if it has not changed.
type: bool
default: no
name:
description:
- The name of the secret.
type: str
required: yes
state:
description:
- Set to C(present), if the secret should exist, and C(absent), if it should not.
type: str
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
- docker.docker_py_2_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Create secret foo (from a file on the control machine)
docker_secret:
name: foo
# If the file is JSON or binary, Ansible might modify it (because
# it is first decoded and later re-encoded). Base64-encoding the
# file directly after reading it prevents this to happen.
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
data_is_b64: true
state: present
- name: Change the secret data
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the secret
two: '2'
state: present
- name: No change
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing secret
state: present
- name: Update an existing label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the secret
one: '1'
state: present
- name: Force the removal/creation of the secret
docker_secret:
name: foo
data: Goodnight everyone!
force: yes
state: present
- name: Remove secret foo
docker_secret:
name: foo
state: absent
'''
RETURN = '''
secret_id:
description:
- The ID assigned by Docker to the secret object.
returned: success and C(state == "present")
type: str
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
'''
import base64
import hashlib
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import AnsibleDockerClient, DockerBaseClass, compare_generic
from ansible.module_utils._text import to_native, to_bytes
class SecretManager(DockerBaseClass):
def __init__(self, client, results):
super(SecretManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
if self.data is not None:
if parameters.get('data_is_b64'):
self.data = base64.b64decode(self.data)
else:
self.data = to_bytes(self.data)
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.data_key = None
def __call__(self):
if self.state == 'present':
self.data_key = hashlib.sha224(self.data).hexdigest()
self.present()
elif self.state == 'absent':
self.absent()
def get_secret(self):
''' Find an existing secret. '''
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
for secret in secrets:
if secret['Spec']['Name'] == self.name:
return secret
return None
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
return secret_id
def present(self):
''' Handles state == 'present', creating or updating the secret '''
secret = self.get_secret()
if secret:
self.results['secret_id'] = secret['ID']
data_changed = False
attrs = secret.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
if data_changed or labels_changed or self.force:
# if something changed or force, delete and re-create the secret
self.absent()
secret_id = self.create_secret()
self.results['changed'] = True
self.results['secret_id'] = secret_id
else:
self.results['changed'] = True
self.results['secret_id'] = self.create_secret()
def absent(self):
''' Handles state == 'absent', removing the secret '''
secret = self.get_secret()
if secret:
try:
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
data=dict(type='str', no_log=True),
data_is_b64=dict(type='bool', default=False),
labels=dict(type='dict'),
force=dict(type='bool', default=False)
)
required_if = [
('state', 'present', ['data'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='2.1.0',
min_docker_api_version='1.25',
)
try:
results = dict(
changed=False,
secret_id=''
)
SecretManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | 1,511,846,390,148,133,000 | 29.238255 | 137 | 0.609255 | false |
scanny/python-pptx | tests/oxml/test___init__.py | 1 | 3204 | # encoding: utf-8
"""
Test suite for pptx.oxml.__init__.py module, primarily XML parser-related.
"""
from __future__ import print_function, unicode_literals
import pytest
from lxml import etree
from pptx.oxml import oxml_parser, parse_xml, register_element_cls
from pptx.oxml.ns import qn
from pptx.oxml.xmlchemy import BaseOxmlElement
from ..unitutil.mock import function_mock, loose_mock, var_mock
class DescribeOxmlParser(object):
def it_strips_whitespace_between_elements(self, foo, stripped_xml_bytes):
xml_bytes = etree.tostring(foo)
assert xml_bytes == stripped_xml_bytes
class DescribeParseXml(object):
def it_uses_oxml_configured_parser_to_parse_xml(
self, mock_xml_bytes, fromstring, mock_oxml_parser
):
element = parse_xml(mock_xml_bytes)
fromstring.assert_called_once_with(mock_xml_bytes, mock_oxml_parser)
assert element is fromstring.return_value
def it_prefers_to_parse_bytes(self, xml_bytes):
parse_xml(xml_bytes)
def but_accepts_unicode_providing_there_is_no_encoding_declaration(self):
non_enc_decl = '<?xml version="1.0" standalone="yes"?>'
enc_decl = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
xml_body = "<foo><bar>føøbår</bar></foo>"
# unicode body by itself doesn't raise
parse_xml(xml_body)
# adding XML decl without encoding attr doesn't raise either
xml_text = "%s\n%s" % (non_enc_decl, xml_body)
parse_xml(xml_text)
# but adding encoding in the declaration raises ValueError
xml_text = "%s\n%s" % (enc_decl, xml_body)
with pytest.raises(ValueError):
parse_xml(xml_text)
class DescribeRegisterCustomElementClass(object):
def it_determines_cust_elm_class_constructed_for_specified_tag(self, xml_bytes):
register_element_cls("a:foo", CustElmCls)
foo = etree.fromstring(xml_bytes, oxml_parser)
assert type(foo) is CustElmCls
assert type(foo.find(qn("a:bar"))) is etree._Element
# ===========================================================================
# fixtures
# ===========================================================================
class CustElmCls(BaseOxmlElement):
pass
@pytest.fixture
def foo(xml_bytes):
return etree.fromstring(xml_bytes, oxml_parser)
@pytest.fixture
def fromstring(request):
return function_mock(request, "pptx.oxml.etree.fromstring")
@pytest.fixture
def mock_oxml_parser(request):
return var_mock(request, "pptx.oxml.oxml_parser")
@pytest.fixture
def mock_xml_bytes(request):
return loose_mock(request, "xml_bytes")
@pytest.fixture
def stripped_xml_bytes():
return (
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/ma'
'in"><a:bar>foobar</a:bar></a:foo>'
).encode("utf-8")
@pytest.fixture
def xml_bytes(xml_text):
return xml_text.encode("utf-8")
@pytest.fixture
def xml_text():
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/ma'
'in">\n'
" <a:bar>foobar</a:bar>\n"
"</a:foo>\n"
)
| mit | 4,207,162,620,822,190,600 | 28.1 | 84 | 0.634802 | false |
mrocklin/termpy | termpy/unification.py | 1 | 2669 | from functools import partial
from util import transitive_get as walk
from util import assoc
from variable import Var, var, isvar
import itertools as it
from ground import new, op, args, isleaf
################
# Reificiation #
################
def reify_generator(t, s):
return it.imap(partial(reify, s=s), t)
def reify_tuple(*args):
return tuple(reify_generator(*args))
def reify_list(*args):
return list(reify_generator(*args))
def reify_dict(d, s):
return dict((k, reify(v, s)) for k, v in d.items())
reify_dispatch = {
tuple: reify_tuple,
list: reify_list,
dict: reify_dict,
}
reify_isinstance_list = []
def reify(e, s):
""" Replace variables of expression with substitution
>>> from termpy.unification import reify, var
>>> x, y = var(), var()
>>> e = (1, x, (3, y))
>>> s = {x: 2, y: 4}
>>> reify(e, s)
(1, 2, (3, 4))
>>> e = {1: x, 3: (y, 5)}
>>> reify(e, s)
{1: 2, 3: (4, 5)}
"""
if isvar(e):
return reify(s[e], s) if e in s else e
elif type(e) in reify_dispatch:
return reify_dispatch[type(e)](e, s)
elif not isleaf(e):
new_op = reify(op(e), s)
new_args = reify(args(e), s)
return new(new_op, new_args)
else:
return e
###############
# Unification #
###############
def unify_seq(u, v, s):
if len(u) != len(v):
return False
for uu, vv in zip(u, v): # avoiding recursion
s = unify(uu, vv, s)
if s is False:
return False
return s
def unify_dict(u, v, s):
if len(u) != len(v):
return False
for key, uval in u.iteritems():
if key not in v:
return False
s = unify(uval, v[key], s)
if s is False:
return False
return s
unify_dispatch = {
(tuple, tuple): unify_seq,
(list, list): unify_seq,
(dict, dict): unify_dict,
}
def unify(u, v, s): # no check at the moment
""" Find substitution so that u == v while satisfying s
>>> from termpy.unification import unify, var
>>> x = var('x')
>>> unify((1, x), (1, 2), {})
{~x: 2}
"""
u = walk(u, s)
v = walk(v, s)
if u == v:
return s
elif isvar(u):
return assoc(s, u, v)
elif isvar(v):
return assoc(s, v, u)
types = (type(u), type(v))
if types in unify_dispatch:
return unify_dispatch[types](u, v, s)
elif not isleaf(u) and not isleaf(v):
s = unify(op(u), op(v), s)
if s is False:
return s
else:
return unify(args(u), args(v), s)
else:
return False
| bsd-3-clause | -958,728,437,775,557,600 | 22.619469 | 59 | 0.516298 | false |
opennode/nodeconductor-assembly-waldur | src/waldur_core/monitoring/models.py | 1 | 1936 | from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from waldur_core.core.models import NameMixin
from waldur_core.monitoring.managers import (
ResourceItemManager,
ResourceSlaManager,
ResourceSlaStateTransitionManager,
)
class ScopeMixin(models.Model):
content_type = models.ForeignKey(on_delete=models.CASCADE, to=ContentType)
object_id = models.PositiveIntegerField()
scope = GenericForeignKey('content_type', 'object_id')
class Meta:
abstract = True
class ResourceItem(NameMixin, ScopeMixin):
value = models.FloatField()
objects = ResourceItemManager()
class Meta:
unique_together = ('name', 'content_type', 'object_id')
class ResourceSla(ScopeMixin):
period = models.CharField(max_length=10)
value = models.DecimalField(max_digits=11, decimal_places=4, null=True, blank=True)
agreed_value = models.DecimalField(
max_digits=11, decimal_places=4, null=True, blank=True
)
objects = ResourceSlaManager()
class Meta:
unique_together = ('period', 'content_type', 'object_id')
class ResourceSlaStateTransition(ScopeMixin):
period = models.CharField(max_length=10)
timestamp = models.IntegerField()
state = models.BooleanField(
default=False, help_text=_('If state is True resource became available')
)
objects = ResourceSlaStateTransitionManager()
class Meta:
unique_together = ('timestamp', 'period', 'content_type', 'object_id')
class MonitoringModelMixin(models.Model):
class Meta:
abstract = True
sla_items = GenericRelation('monitoring.ResourceSla')
monitoring_items = GenericRelation('monitoring.ResourceItem')
state_items = GenericRelation('monitoring.ResourceSlaStateTransition')
| mit | -6,042,245,834,683,955,000 | 30.737705 | 87 | 0.728822 | false |
LordFlashmeow/Collatz-Conjecture | Length_Highest_Manipulation.py | 1 | 2253 | def highest(start, stop):
begin = start
dict_max = {}
while begin <= stop:
current = set()
number = begin
if begin == 1:
number = 2
while number >= 1:
if number == 1:
max_num = int(max(current))
break
elif number % 2 == 0:
number /= 2
current.add(number)
else:
number = (number * 3) + 1
current.add(int(number))
if begin == 1:
dict_max[1] = 0
else:
dict_max[begin] = max_num
begin += 1
return dict_max
def longest(start, stop):
begin = start
dict_length = {1: 0}
while begin <= stop:
number = begin
numbers = set()
while number > 1:
if number % 2 == 0:
number /= 2
numbers.add(int(number))
else:
number = (number * 3) + 1
numbers.add(int(number))
dict_length[begin] = len(numbers)
begin += 1
return dict_length
def combined(start, stop,):
dict_length = longest(start, stop)
dict_max = highest(start, stop)
final_dict = {}
for key in (dict_length.keys() | dict_max.keys()):
if key in dict_length: final_dict.setdefault(key, []).append(dict_length[key])
if key in dict_max: final_dict.setdefault(key, []).append(dict_max[key])
return final_dict
start_num = int(input("Enter the number to start at "))
stop_num = int(input("Enter the number to end at "))
my_list = combined(start_num, stop_num)
export_yn = input("Do you want to export the set of values? (y/n) ")
if export_yn == "y":
filename = input("Enter the name of the file to save to: ") + ".csv"
# with open(filename, 'w') as file:
# [file.write('{0},{1}\n'.format(key, value)) for key, value in my_list.items()]
begin = start_num
for length, high in my_list.values():
file = open(filename, "a")
length = str(length)
high = str(high)
begin = str(begin)
combined = begin + "," + length + "," + high + "\n"
file.write(combined)
file.close()
begin = int(begin)
begin += 1
| gpl-3.0 | -3,609,880,505,250,593,300 | 26.814815 | 87 | 0.509987 | false |
orashi/PaintsPytorch | dev_train.py | 1 | 13516 | import argparse
import os
import random
from math import log10
import scipy.stats as stats
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import Variable, grad
from models.dev_model import *
from data.nvData import CreateDataLoader
parser = argparse.ArgumentParser()
parser.add_argument('--datarootC', required=True, help='path to colored dataset')
parser.add_argument('--datarootS', required=True, help='path to sketch dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')
parser.add_argument('--cut', type=int, default=1, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--optim', action='store_true', help='load optimizer\'s checkpoint')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--baseGeni', type=int, default=2500, help='start base of pure pair L1 loss')
parser.add_argument('--geni', type=int, default=0, help='continue gen image num')
parser.add_argument('--epoi', type=int, default=0, help='continue epoch num')
parser.add_argument('--env', type=str, default=None, help='tensorboard env')
parser.add_argument('--advW', type=float, default=0.0001, help='adversarial weight, default=0.0001')
parser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup # !!!!!
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
writer = SummaryWriter(log_dir=opt.env, comment='this is great')
dataloader = CreateDataLoader(opt)
netG = def_netG(ngf=opt.ngf)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = def_netD(ndf=opt.ndf)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
netF = def_netF()
print(netD)
criterion_L1 = nn.L1Loss()
criterion_L2 = nn.MSELoss()
one = torch.FloatTensor([1])
mone = one * -1
fixed_sketch = torch.FloatTensor()
fixed_hint = torch.FloatTensor()
saber = torch.FloatTensor([0.485 - 0.5, 0.456 - 0.5, 0.406 - 0.5]).view(1, 3, 1, 1)
diver = torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
if opt.cuda:
netD.cuda()
netG.cuda()
netF.cuda()
fixed_sketch, fixed_hint = fixed_sketch.cuda(), fixed_hint.cuda()
saber, diver = saber.cuda(), diver.cuda()
criterion_L1.cuda()
criterion_L2.cuda()
one, mone = one.cuda(), mone.cuda()
# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))
if opt.optim:
optimizerG.load_state_dict(torch.load('%s/optimG_checkpoint.pth' % opt.outf))
optimizerD.load_state_dict(torch.load('%s/optimD_checkpoint.pth' % opt.outf))
# schedulerG = lr_scheduler.ReduceLROnPlateau(optimizerG, mode='max', verbose=True, min_lr=0.0000005,
# patience=8) # 1.5*10^5 iter
# schedulerD = lr_scheduler.ReduceLROnPlateau(optimizerD, mode='max', verbose=True, min_lr=0.0000005,
# patience=8) # 1.5*10^5 iter
# schedulerG = lr_scheduler.MultiStepLR(optimizerG, milestones=[60, 120], gamma=0.1) # 1.5*10^5 iter
# schedulerD = lr_scheduler.MultiStepLR(optimizerD, milestones=[60, 120], gamma=0.1)
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(opt.batchSize, 1, 1, 1)
# alpha = alpha.expand(opt.batchSize, real_data.nelement() / opt.batchSize).contiguous().view(opt.batchSize, 3, 64,
# 64)
alpha = alpha.cuda() if opt.cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
return gradient_penalty
flag = 1
lower, upper = 0, 1
mu, sigma = 1, 0.01
maskS = opt.imageSize // 4
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
for epoch in range(opt.niter):
data_iter = iter(dataloader)
i = 0
while i < len(dataloader):
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation
# train the discriminator Diters times
Diters = opt.Diters
if gen_iterations < opt.baseGeni: # L1 stage
Diters = 0
j = 0
while j < Diters and i < len(dataloader):
j += 1
netD.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
###############################
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],
0).cuda()
hint = torch.cat((real_vim * mask, mask), 1)
# train with fake
fake_cim = netG(Variable(real_sim, volatile=True), Variable(hint, volatile=True)).data
errD_fake = netD(Variable(torch.cat((fake_cim, real_sim), 1))).mean(0).view(1)
errD_fake.backward(one, retain_graph=True) # backward on score on real
errD_real = netD(Variable(torch.cat((real_cim, real_sim), 1))).mean(0).view(1)
errD_real.backward(mone, retain_graph=True) # backward on score on real
errD = errD_real - errD_fake
# gradient penalty
gradient_penalty = calc_gradient_penalty(netD, torch.cat([real_cim, real_sim], 1),
torch.cat([fake_cim, real_sim], 1))
gradient_penalty.backward()
optimizerD.step()
############################
# (2) Update G network
############################
if i < len(dataloader):
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True # to avoid computation
netG.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],
0).cuda()
hint = torch.cat((real_vim * mask, mask), 1)
if flag: # fix samples
writer.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=16))
writer.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=16))
writer.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=16))
vutils.save_image(real_cim.mul(0.5).add(0.5),
'%s/color_samples' % opt.outf + '.png')
vutils.save_image(real_sim.mul(0.5).add(0.5),
'%s/blur_samples' % opt.outf + '.png')
fixed_sketch.resize_as_(real_sim).copy_(real_sim)
fixed_hint.resize_as_(hint).copy_(hint)
flag -= 1
fake = netG(Variable(real_sim), Variable(hint))
if gen_iterations < opt.baseGeni:
contentLoss = criterion_L2(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),
netF(Variable((real_cim.mul(0.5) - saber) / diver)))
contentLoss.backward()
errG = contentLoss
# contentLoss = criterion_L1(fake, Variable(real_cim))
# contentLoss.backward()
# errG = contentLoss
else:
errG = netD(torch.cat((fake, Variable(real_sim)), 1)).mean(0).view(
1) * opt.advW # TODO: what if???
errG.backward(mone, retain_graph=True)
contentLoss = criterion_L2(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),
netF(Variable((real_cim.mul(0.5) - saber) / diver)))
contentLoss.backward()
# contentLoss = criterion_L1(fake, Variable(real_cim))
# contentLoss.backward(retain_graph=True)
optimizerG.step()
############################
# (3) Report & 100 Batch checkpoint
############################
if gen_iterations < opt.baseGeni:
writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] content %f '
% (epoch, opt.niter, i, len(dataloader), gen_iterations, contentLoss.data[0]))
else:
writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)
writer.add_scalar('wasserstein distance', errD.data[0], gen_iterations)
writer.add_scalar('errD_real', errD_real.data[0], gen_iterations)
writer.add_scalar('errD_fake', errD_fake.data[0], gen_iterations)
writer.add_scalar('Gnet loss toward real', errG.data[0], gen_iterations)
writer.add_scalar('gradient_penalty', gradient_penalty.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'
% (epoch, opt.niter, i, len(dataloader), gen_iterations,
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], contentLoss.data[0]))
if gen_iterations % 500 == 0:
fake = netG(Variable(fixed_sketch, volatile=True), Variable(fixed_hint, volatile=True))
writer.add_image('colorized imgs', vutils.make_grid(fake.data.mul(0.5).add(0.5), nrow=16),
gen_iterations)
if gen_iterations % 2000 == 0:
for name, param in netG.named_parameters():
writer.add_histogram('netG ' + name, param.clone().cpu().data.numpy(), gen_iterations)
for name, param in netD.named_parameters():
writer.add_histogram('netD ' + name, param.clone().cpu().data.numpy(), gen_iterations)
vutils.save_image(fake.data.mul(0.5).add(0.5),
'%s/fake_samples_gen_iter_%08d.png' % (opt.outf, gen_iterations))
gen_iterations += 1
# do checkpointing
if opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)
torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)
elif epoch % opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
| mit | -7,141,511,444,784,469,000 | 44.204013 | 119 | 0.591299 | false |
deginner/swagxample | setup.py | 1 | 1028 | from setuptools import setup
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Libraries",
]
setup(
name='Swagxample',
version='0.0.3.1',
packages=['swagxample'],
url='https://bitbucket.org/deginner/swagxample',
license='MIT',
classifiers=classifiers,
author='deginner',
author_email='[email protected]',
description='An HTTP server application using Swagger 2.0, bitjws, and SQLAlchemy.',
setup_requires=['pytest-runner'],
include_package_data = True,
install_requires=[
'sqlalchemy>=1.0.9',
'secp256k1==0.11',
"bitjws==0.6.3.1",
"flask>=0.10.0",
"flask-login",
"flask-cors",
"flask-bitjws>=0.1.1.4",
"alchemyjsonschema"
],
tests_require=['pytest', 'pytest-cov'],
extras_require={"build": ["flask-swagger"]},
entry_points = """
[console_scripts]
createuser = create_user:create_user
"""
)
| mit | -4,867,287,245,942,831,000 | 26.783784 | 88 | 0.603113 | false |
hcchengithub/peforth | log.txt.py | 1 | 211869 |
peforth
[x] 13:59 2017-07-31 找到 JavaScript eval() equivalent in Python
https://stackoverflow.com/questions/701802/how-do-i-execute-a-string-containing-python-code-in-python
成功了!!
>>> mycode = 'print ("hello world")'
>>> exec(mycode)
hello world
>>>
The technique of returning a function from another function is known as currying:
https://stackoverflow.com/questions/14261474/how-do-i-write-a-function-that-returns-another-function
Python annoymous function lambda
http://blog.csdn.net/majianfei1023/article/details/45269343
https://www.zhihu.com/question/20125256
[x] review project-k , should project-k support python too?
which will be peforth.py 10:04 2019/11/26 it's projectk.py now.
[x] 直接問 pyforth 的原作者的版權條件 ---> 用不著了.
[x] 實驗用 exec() 生成一個 function
s = '''
def show(s):
print(s)
'''
exec(s)
>>> show('abc')
abc
>>> 成功了!
[x] Try to define an python object
s = '''
class a():
vm = None
def b(self): # self is must
print(b) # b unknown
print(self)
print(a)
vm = self
c = a()
'''
exec(s)
[x] peforth 可以引用的讀檔範例
# average5 .py
def main() :
fileName = input ("What file are the numbers in? " )
infile = open (fileName, ' r ')
sum = 0
count = 0
for line in infile:
sum = sum + eval (line)
count = count + 1
print ("\nThe average Of the numbers is", sum / count)
main ( )
# average6.py
def main() :
fileName = input ("What file are the numbers in? " )
infile = open ( fileName
sum = 0.0
count = 0
line = infile.readline()
while line != ""
sum = sum + eval(line)
count = count + 1
line = infile.readline()
print("\nThe average Of the numbers is", sum / count)
main()
[x] module 的用法搞懂了,很簡單。 peforth.py 就是 peforth VM.
不需要像 javascript 用一個 function 把整個 vm 包起來, see
GitHub\peforth\projectile.py
python can redefine functions and methods. Function and methods are
variables too.
python objects, like javascript, can add properties and methods
through simply assign a value to it.
>>> type(show) # show is an object
<class 'projectile.Projectile'>
>>> show
<projectile.Projectile object at 0x000001C6260D0438>
>>> show.x = 0 # assign new property to show
>>> show.y = 11
>>> show.p = 22
>>> dir(show) # check it out
['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__',
'__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__',
'__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__',
'__str__', '__subclasshook__', '__weakref__', 'getHere', 'getX', 'getY', 'p',
'update', 'x', 'xpos', 'xvel', 'y', 'ypos', 'yvel']
>>>
[x] python 也可以 see function 的 source code
https://stackoverflow.com/questions/427453/how-can-i-get-the-source-code-of-a-python-function
def foo(a):
x = 2 # how about a comment?
return x + a
import inspect
# inspect.getsource(foo)
# u'def foo(a):\n x = 2\n return x + a\n'
print (inspect.getsource(foo))
==> 結果完全成功, 連 comment 也被顯示出來。
==> 但是 py> py: 組合出來的 function 不行
py> tick('test').cfa ==> 1
py> dictionary[1:] ==> [.s, <function <lambda> at 0x0000024CE15810D0>,
.s, <function <lambda> at 0x0000024CE1581158>, .s, None, None]
OK py> inspect.getsource(dictionary[2]) .
could not get source code <------------------- error message
Debug? [y/N]
同一篇 stackoverflow 介紹的 dis module 也真的可行!
>>> import dis
>>> def func(x):
... print(x+1)
...
>>> func(123)
124
>>> dis.dis(func)
2 0 LOAD_GLOBAL 0 (print)
2 LOAD_FAST 0 (x)
4 LOAD_CONST 1 (1)
6 BINARY_ADD
8 CALL_FUNCTION 1
10 POP_TOP
12 LOAD_CONST 0 (None)
14 RETURN_VALUE
>>> 哇! 顯示出 function 的機械碼, 太正點了!!
[x] Python equivalent of:
Word.prototype.toString = function(){return this.name + " " + this.help}; // every word introduces itself
--> 有了, 就是去定義 __str__ prototype of the class
#------- ex2.py ---------------
class d():
def __str__(self):
return "a __str__"
def __repr__(self):
return "a __repr__"
class x():
name = 'stella'
feet = 'splender'
#------------------------------
>>> import ex2
>>> x = ex2.x()
>>> x
<ex2.x object at 0x00000170D77202B0> <---- default __repr__ 打印
>>> print(x)
<ex2.x object at 0x00000170D77202B0> <---- default __str__ 傳回值
>>> d = ex2.d()
>>> d # <--------- 執行該 obj 時, 打印 __repr__() 的傳回值
a __repr__ # 應該讓它執行該 word
>>> print(d) # <---- obj 本身的傳回值是 __str__() 的傳回值
a __str__
>>>
[x] 進一步刺探未來的 peforth.py kernel module 的特性
Ynote: 搞懂 python 的 module files globals() locals().note
[x] docode() 要組裝 function 需參考 anonymous function 的定義方法:
https://stackoverflow.com/questions/6629876/how-to-make-an-anonymous-function-in-python-without-christening-it
Study built-in function exec() https://docs.python.org/3/library/functions.html#exec
Study build-in function compile() https://docs.python.org/3/library/functions.html#compile
[x] genxt() 成功了
[x] IDLE path working directory working folder
import sys
sys.path.append('c:/Users/hcche/Documents/GitHub/peforth')
[x] 12:50 2017/08/12 已經跑起來了, debugging compiling == 'code' 的問題
--> 可能是 end-code 裡面 Word(newname,newxt) 失敗的關係 --> no, it can't fail
--> 應該是 docode 裡面, 結構不太好, 萬一 reDef 或 genxt() 失敗了會怎樣?
很多都會半途結束, 留下 compiling == 'code' 的問題。 --> all tested, behavior acceptable now
[x] "import re" in peforth.py kernel is not a good choice.
Simply letting the main program to do that. The main program is eforth.3py
--> Yeah! it works.
c:\Users\hcche\Documents\GitHub\peforth>python eforth.3py
hello eforth!!
--> 錯了, 每個 .py 檔都自己 import re, import pdb 反而是對的, see:
https://stackoverflow.com/questions/8957859/python-child-cannot-use-a-module-the-parent-imported
... Generally if you're doing simple obvious things like importing a standard module,
you should do it the simple and obvious way......
[x] reproduce the problem:
import peforth as vm
vm.dictate('code test end-code') # Try this first
vm.words['forth']
這樣是成功的,但是進入 forth command line 之後, 同樣的工作... 還是成功的。
--> 改試 vm.dictate('code test3 print("hello test3!!") end-code')
>>> vm.execute('test3') --> hello test3!! 很成功
--> 進 forth command line
>>> vm.peforth()
OK code test4 print("hello test4") end-code
OK test4
hello test4
OK
還是很成功
--> 好像要出過 error e.g. word unknown 之類才能複製問題
>>> code test5 end-code
File "<stdin>", line 1
code test5 end-code
^
SyntaxError: invalid syntax
>>>
的確是這樣!!! now I've got the SRP
--> 似乎是 w.xt(w) 執行 end-code 時出問題, 檢查此時的 end-code
RI, outer() 裡面分辨 token 是否 [int, float] 用 eval(token) 會有 exception
必須要用 try - except 處理才行。 --> Fixed !!!
[x] why after OK type 'words' no response <--- should be : Error! words unknown.
--> 結果發現, 所有的 dir(vm) attributes 都這樣!!
(Pdb) eval('pop') ==> <function pop at 0x00000178A534A730>
(Pdb) eval('dictionary') ==> [0]
(Pdb) eval('stack') ==> [{'forth': [0, code, end-code, //, stop, *debug*]}, {'forth': [0, code, end-code, //, stop, *debug*]}, {'forth': [0, code, end-code, //, stop, *debug*]}, <class 'peforth.Word'>, <function phaseA at 0x00000178A534A0D0>, <function phaseB at 0x00000178A534A158>]
所以, outer() 還要再改良。
--> eval() 的結果 + 0 就可以保證他是 number 了
[x] kernel project-k.py instead of peforth.py
[X] code word's help, not easy, keep the record.
# stack diagram
ntibwas, s = ntib, nextstring("\\(")
if s['flag']: # stack diagram is existing
pdb.set_trace()
newhelp = '( ' + nexttoken('\\)') + nexttoken() + ' '
else: # return s to tib
ntib = ntibwas
# word description
ntibwas, s = ntib, nextstring("\\")
if s['flag']: # description is existing
newhelp += nexttoken('\n|\r')
else: # return s to tib
ntib = ntibwas
code \ last().help += nexttoken('\n|\r'); end-code immediate
// ( <comment> -- ) Give help message to the new word.
code ( last().help = '( ' + nexttoken('\\)') + nexttoken() + ' ' end-code immediate
// ( -- ) Get stack diagram to the last's help.
--> v1.23 code words 可以用 # 下 help 了。
[x] In jeforth, window.colonxt is dynamicly created by definition of ':'.
Can peforth.f do that too in python? Yes!!!
>>> def test():
... globals()['cc'] = 123
...
>>> cc
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'cc' is not defined
>>> test()
>>> cc
123
>>>
[/] : test ;
'module' object does not support item assignment
Debug? [y/N] y
RI: last().xt = xt # also vm['colonxt'] <------ [/] easy, deal with this later
[x] After the above error probably, after colon definition the compiling is still True!!!
--> because forgot declare it a global.
B i n g o ! ! colon definition works now
[x] literal needs to use closure
def gen(n): # function generator
def f(): # literal run time function
print(n)
f.description = "{} {}".format(type(n),n)
return f
f = gen([11,22,33])
f()
>>> f.description
"<class 'list'> [11, 22, 33]"
# functions are not shown by __str__ and __repr__ like dict
# def str(self): # return help message
# return f.description
# def repr(self): # execute xt and return help message
# return f.description
# str.desc = "I am str"
# repr.desc = "I am repr"
# f.__str__ = str
# f.__repr__ = repr
[x] py> py: 都應該改用 compile(code,"")
compile CN http://www.th7.cn/Program/Python/201608/923063.shtml
用到 lambda 就不能用來【賦值】, 安全理由. 故 py: 不能用 lambda. 要的話就必須用 compile 的。
https://stackoverflow.com/questions/20695745/why-use-lambdas-vs-1-line-function-declarations
--> [x] 已經發現 py: tick('//').immediate=True 行不通了!!!
--> 用 <py> </py> </pyV> 分別改寫了 py: py> , ok now
[x] pyExec pyEval 是多餘的 --> 去除
[x] (Pdb) execute("sdfsdf")
(Pdb)
沒半點錯誤訊息, 有問題看不出來!!
--> fixed, now it's a panic.
[x] compiling 未定義怎麼不觸發 unknown?
--> outer() 用 eval(token) 想判斷 token 是否 number 不行,
當 token='compiling' 時不會觸發 exception 反而傳回其值 True or False !!
--> 改用 complex(token) 很完美!
[x] t> >t t@
>>> line = 'Cats are smarter than dogs\n\\ 1234\n\\ 2233'
>>> matchObj = re.search( r'\n\\ (\d*)$', line)
>>> matchObj.group()
'\n\\ 2233'
>>> matchObj.group(1)
'2233'
>>> len(matchObj.group())
7
>>> line[:-7]
'Cats are smarter than dogs\n\\ 1234'
>>>
[x] [/py] [/pyV] 只分別取得 exec-code 與 eval-code 不執行, 可以用 execute 執行嗎?
[x] execute 也要能執行 exec-code 或 eval-code ---> done
[x] 這兩個都不要,應該是個 compyle ( 'source' -- code object ) \ python compiler 指令
[x] 讓 execute() 認得 code object
--> OK ' compyle .
compyle ( "source" -- exec-code ) Python compile source to exec-code object __str__ OK
OK char print('hi') compyle
OK execute
hi
OK 一次就成功了!!
[x] colon definition 裡看能不能用 comma 塞入一個 code object ?
--> : test char print('hi') compyle execute ; 成功
: test2 [ char print('hi') compyle , ] ; 也成功
: cfa ' py> dictionary[pop().cfa:] . cr ;
OK cfa test2
[ /* test2 */ <code object <module> at 0x0000019B24E1F8A0, file "", line 1>, None,
/* cfa */ ', <function xt.<locals>.<lambda> at 0x0000019B24E29C80>, ., cr, None,
None]
OK
[x] 有了 compyle 要不要改寫 <py> </py> </pyV> 等?
--> 只簡化了 </py> 一點點
[x] debug :: --> root cause 又是 branch 裡 assignment to ip 忘了加 vm.ip
OK 11 22 ' + :: xt() .s ==> [33] OK 表示 :: interpret mode 功能 ok
OK : test :: xt() ;
--Return--
> <string>(2)xt()->None
(Pdb) c
OK see-cfa test
[<code object <module> at 0x000001F1364F68A0, file "", line 1>, None, None]
OK 22 33 ' + test
OK .s
[55]
OK
[x] constant 要用到 vm.forth['varname'] 複習一下 python 語法
constant 要做的事 --> 'push(vm["forth"]["x"])'
一開始 word-list 都沒有自己的空間
(Pdb) vm['forth']
*** TypeError: 'module' object is not subscriptable
(Pdb) vm.forth
*** AttributeError: module 'projectk' has no attribute 'forth'
不能這樣 init :
(Pdb) vm['forth']={}
*** TypeError: 'module' object does not support item assignment
要這樣 init :
(Pdb) setattr(vm,'forth',{})
Object 的 attribute 不能這樣 access :
(Pdb) vm['forth'] <--- 這是 dict 的方式
*** TypeError: 'module' object is not subscriptable
要這樣 access :
(Pdb) vm.forth
{}
(Pdb) getattr(vm,'forth')
{}
(Pdb)
[x] colon definition 失敗還是會佔一個位置
OK 123 constant x
OK 345 to x
Error! Assigning to a none-value.
Debug? [y/N]
OK : test 44445555 to x ;
Error! Assigning to a none-value. <--- 馬上觸發錯誤,好。
Debug? [y/N]
OK words
0 code end-code // ...snip... to x test <--- test 佔了位置
OK : ttt ;
OK words
0 code end-code // ...snip... to x test ttt <--- 確實佔了位置
OK test
Error! test unknown. <---- colon definition 失敗, 只是沒有 reveal 而已
Debug? [y/N]
OK rescan-word-hash <---- rescan 之後它就會出現!!
OK test
OK .s
[44445555]
OK
--> jeforth 也一樣, 算了, 有警告就可以了。
--> (forget) 一下可以把它消除掉
[x] tib 平時有被 corrupted
OK char $ . rewind
OK 11 22 33 *debug* # <---- 最簡單的
(Pdb) tib
'112233*debug*' # <----- 就已經有問題了 !!!
(Pdb)
問題在 kernel nexttoken() 裡面
--> Root cause 1 : nexttoken() <--- skip leading white spaces 改寫
Root cause 2 : tib and ntib are strange <-- ntib 太大先排除
[x] writeTextFile 實驗
OK <py> open("pathname.txt", "wt")</pyV> constant f
reDef f
OK f .
<_io.TextIOWrapper name='pathname.txt' mode='wt' encoding='cp950'> OK f :> name
--> pathname.txt OK
OK f :: write("abc")
OK f :: write("123")
OK f :: write("中文")
OK f :: close()
encoding='utf-8'
[x] refill works now. Use refill to improve <text> first. Let it accept
multiple lines. ---> 最後是簡單地引進 accept2 用 Ctrl-D 切換 multiple-line mode 即可. 保留以下研究過程。
: <text>.interpret ( <multi-lines> -- "string" ) // get multi-lines string from ternimal
CR word ( s )
begin
accept if ( s line )
\ get string to s, leave </text> and the rests in tib by adjusting ntib
py> re.search("(.*)</text>(.*)",tos()) ( s line re )
py> bool(tos()) if \ line has </text> ?
( s line re )
py: vm.tib="</text>"+tos().group(2);vm.ntib=0;
\ s += re.group(1)
nip ( s re ) :> group(1) + ( s )
exit
else ( s line re )
\ s += line
drop + ( s )
else ( s )
\ s += '\n'
py> pop()+'\n'
then
refill
again ;
我發現, bool(regEx) 可以看出 re.search 的結果是否 found
[x] See MetaMoji 討論如何適當分割以上複雜的 <text>.interpret 成簡單的 一行成功; 多行輸入 兩段。
其中多行輸入是個公用 routine
[x] 實驗後綴法是否有簡化功效? 使 group(1) 成為共同的結果
\ regular expression 實例
OK <py> re.search("(.*?)</text>(.*)","aa </text>bb</text>")</pyV> ( re ) constant re
OK re bool . cr ^^^^^^ 故意加上後綴讓 re.search 總是成功
True <--- 總是成功
OK re :> group() . cr
aa </text>bb</text>
OK re :> group(1) . cr
aa <----------------------------- group(1) 為所求
OK re :> group(2) . cr
bb</text> <-------------------- group(2) 去掉後綴之後還給 tib
OK <py> re.search("(.*?)</text>(.*)","aa bb</text>")</pyV> ( re ) constant re
OK re bool . cr
True
OK re :> group() . cr
aa bb</text>
OK re :> group(1) . cr
aa bb <------------ 當 bool group(2) False 時 group(1) 仍為所求, 故確有簡化功效
OK re :> group(2) . cr
OK re :> group(2)=="" . cr
True
OK re :> group(2) bool .
False OK
[x] 多行輸入公用 routine
[x] 19:46 2020/10/04 複習需要 ^D 多行輸入 multiple lines inpue 的原因:如果是 colon definition 本來
就可以在 compiling state 多行輸入,問題出在 code ... end-code 期間需要 ^D multiple lines input.
: accepts ( "deli" <multiple lines> -- "string" ) // Get multiple lines from tib up to delimiter
( deli )
begin
accept if ( s line )
\ get string to s, leave </text> and the rests in tib by adjusting ntib
py> re.search("(.*)</text>(.*)",tos()) ( s line re )
py> bool(tos()) if \ line has </text> ?
( s line re )
py: vm.tib="</text>"+tos().group(2);vm.ntib=0;
\ s += re.group(1)
nip ( s re ) :> group(1) + ( s )
exit
else ( s line re )
\ s += line
drop + ( s )
else ( s )
\ s += '\n'
py> pop()+'\n'
then
refill
again ;
code accept2 # use Ctrl-D at the end to terminate the input. py> chr(4)=='^D' --> True
result, s = "", input()
while not chr(4) in s:
result += s
s = input()
result += s.replace(chr(4),'\n') # all ^D become \n
push(result)
push(True)
end-code // ( -- str T|F ) Read a line from terminal.
[x] accept can be single line accept1 or multiple lines accept2 , switch by Ctrl-D
8: [EOT] (<class 'str'>) <---- the Ctrl-D from input()
OK py> ord(tos()[0]) . cr
4
OK
示範 <accept> ... </accept> 的用法
------- clipboard ---------
dropall
<accept>
11
22
33
44
55
</accept>
66
77
88
99
----------------------------
OK dropall # paste 之後的樣子
OK <accept>
11
22
33
44
55
</accept>66 # 這是最後一行,注意!66 可以往前緊貼, delimiter 會整個被忽略掉。
OK 77
OK 88
OK 99
----------------------------
OK .s # 看看結果 .......
0: 11
22
33
44
55
66
(<class 'str'>)
1: True (<class 'bool'>)
2: 77 4Dh (<class 'int'>)
3: 88 58h (<class 'int'>)
4: 99 63h (<class 'int'>)
OK
[x] .s in trouble when cell is False, None ... etc
[x] peforth.py 可以直接執行 : python peforth.py
也可以由 python interpreter 執行: >>> peforth.main() 此時 exit 回到 python interpreter
bye 則會傳回 errorlevel 回到 DOS.
# 從 python interpreter 就可以看到 peforth.py module 裡的 globals
>>> dir(peforth)
['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__',
'__package__', '__spec__', 'greeting', 'main', 'panic', 'readTextFile',
'vm', 'writeTextFile']
# 從 python interpreter 更可以看到 project-k vm 裡的 globals
>>> dir(peforth.vm)
['EXIT', 'RET', 'Word', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__spec__', 'code', 'colonxt', 'comma', 'compiling', 'context',
'context_word_list', 'current', 'current_word_list', 'debug', 'dictate', 'dictionary',
'dis', 'docode', 'doendcode', 'endcode', 'execute', 'forth', 'genxt', 'greeting',
'here', 'inner', 'inspect', 'ip', 'isReDef', 'json', 'last', 'major_version', 'multiple',
'name', 'newhelp', 'newname', 'newxt', 'nextstring', 'nexttoken', 'ntib', 'order', 'os',
'outer', 'panic', 'pdb', 'phaseA', 'phaseB', 'pop', 'push', 're', 'readTextFile', 'reset',
'rstack', 'rtos', 'stack', 'stop', 'tib', 'tick', 'tos', 'version', 'vm', 'vocs', 'wordhash',
'words', 'writeTextFile']
# 從 python interpreter 也可以執行 peforth
>>> peforth.vm.dictate
<function dictate at 0x000001D1368E2510>
>>> peforth.vm.dictate('version')
p e f o r t h v1.01
source code http://github.com/hcchengithub/peforth
# 在 peforth 裡面定義的東西, 回到 python interpreter 取用:
>>> peforth.main()
OK 123 constant x
OK exit
>>> peforth.vm.forth
{'obj2dict': <function object2dict at 0x000001D136934510>, 'x': 123}
>>> peforth.vm.forth['x'] --> 123
# 用 obj2dict() 把 Word 轉成 dict, 這是 see 的準備
>>> peforth.vm.forth['obj2dict'](peforth.vm.tick('+'))
{'__class__': 'Word', '__module__': 'projectk', 'name': '+', 'xt': <function xt at 0x000001D1368F28C8>, 'immediate': False, 'help': '( a b -- a+b) Add two numbers or concatenate two strings.', 'comment': '', 'vid': 'forth', 'wid': 51, 'type': 'code'}
[x] see code words
# json 需要先給它 obj2dict() function 才能處理我們的 object
OK py> json.dumps(tick('+'),indent=4) .
Failed to run <Word '</pyV>'>: Object of type 'Word' is not JSON serializable
Continue, Debug, or Abort? [C/d/a] a ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# 從 peforth 裡面定義轉換 function
<py>
def object2dict(obj):
#convert object to a dict
d = {}
d['__class__'] = obj.__class__.__name__
d['__module__'] = obj.__module__
d.update(obj.__dict__)
return d
push(object2dict)
</py>
^D
OK .s
0: <function object2dict at 0x000001D136934510> (<class 'function'>)
OK constant obj2dict
OK exit
# 有了轉換 function 就可以讓 json 完成工作
>>> import json
>>> print(json.dumps(peforth.vm.tick('+'),default=peforth.vm.forth['obj2dict'],indent=4))
{
"__class__": "Word",
"__module__": "projectk",
"name": "+",
"xt": {
"__class__": "function",
"__module__": "projectk",
"source": "def xt(_me=None): ### + ###\n push(pop(1)+pop()) \n",
"name": "+"
},
"immediate": false,
"help": "( a b -- a+b) Add two numbers or concatenate two strings.",
"comment": "",
"vid": "forth",
"wid": 51,
"type": "code"
}
>>>
[x] code object 希望能帶 source code 以供 see
OK 45 @ dir .
['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__',
'__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__',
'__le__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
'__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'co_argcount', 'co_cellvars',
'co_code', 'co_consts', 'co_filename', 'co_firstlineno', 'co_flags', 'co_freevars',
'co_kwonlyargcount', 'co_lnotab', 'co_name', 'co_names', 'co_nlocals', 'co_stacksize',
'co_varnames'] OK
OK --> 不行, code object 裡面不能新增 attribute 也不能改裡面的
若不行, 只好模仿 Word 弄成一個 class 來裝 code object 就可以帶上 source code
或用 closure , 也就是 genxt() 的方法也是現成已經成功的辦法。也不見得比 compyle 差。
或用 dis.dis(func) 也好, 更具視覺效果
[x] 想到給 code object 加上 source code 顯示的辦法了, 引進 class Comment, 類似 class Word
但是 do nothing (由 phaseA phaseB 實現) 只帶著 comment comma(Comment('lalalal')) 進
dictionary 裡去躺著,等 see command 來利用。
OK py: comma(Comment("lalala"))
OK here
OK .
637 OK 636 @ .
lalala OK 636 @ type . --> <class 'projectk.Comment'>
OK 636 @ .
lalala
OK 636 @ execute -->
Failed to run <Word 'execute'>: must be str, not Comment
Continue, Debug, or Abort? [C/d/a] a
[x] modify phaseA phaseB to support Comment class
--> done!
[x] modify ::, :>, </py>, and </pyV> to add comment
[x] 目前 literal 仍被當一般 function 用 dis.dis() 顯示 --> 改成顯示 literal
OK 339 @ . # 已知 339 處是個 literal function
<function xt.<locals>.f.<locals>.literal at 0x000001ED9B6579D8> OK 339 @ :> __name__ .
OK 339 @ :> str . # 印出 readable 的方法
Literal: pop(). <class 'str'> OK
--> 可以修改 toString 了
==> see 終於完成了!!!
[x] 其實 __doc__ attribute 就是用來放說明文字的 . . .
--> 錯!
Failed to run <Word '</py>'>: 'code' object attribute '__doc__' is read-only
Continue, Debug, or Abort? [C/d/a]
可是我試過了呀!? 如下:
00035: RET (<class 'NoneType'>)
00036: Literal: \\n|\\r <class 'str'>
00037: RET (<class 'NoneType'>)
00038: lambda:push(eval(vm.greeting())) (<class 'projectk.Comment'>)
00039: (<class 'function'>)
7 0 LOAD_GLOBAL 0 (push)
2 LOAD_GLOBAL 1 (eval)
4 LOAD_DEREF 0 (eval_code)
6 CALL_FUNCTION 1
8 CALL_FUNCTION 1
10 RETURN_VALUE
OK 39 @ .
<function xt.<locals>.<lambda> at 0x0000017E8D269598> OK
OK 39 @ dir .
['__annotations__', '__call__', '__class__', '__closure__',
'__code__', '__defaults__', '__delattr__', '__dict__',
'__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__get__',
...snip...]
OK 39 @ :> __doc__ .
None
OK 39 @ :: __doc__="abc"
OK 39 @ :> __doc__ .
abc OK
這是 : version py> vm.greeting() ; // ( -- revision ) print the greeting message and return the revision code
compile() 出來的 eval_code, exec_code 的 __doc__ 都是 read-only, 但是
包過一層 lambda 之後就可以編寫了。 <------ 真相大白!!
--> </py> 直接 comma(exec_code) 實在沒有好處, 犧牲了 __doc__ 又
迫使 phaseB 無謂地變得複雜。
--> [x] 改掉!
[x] these lines are strange,
"" value description ( private ) // ( -- "text" ) description of a selftest section
[] value expected_rstack ( private ) // ( -- [..] ) an array to compare rstack in selftest
[] value expected_stack ( private ) // ( -- [..] ) an array to compare data stack in selftest
0 value test-result ( private ) // ( -- boolean ) selftest result from [d .. d]
[] value [all-pass] ( private ) // ( -- ["words"] ) array of words for all-pass in selftest
the "( private )" become prefix of their word.help !
--> value command gets stack diagram ?
--> ( command 看到 last 沒有 help 就把後續的 (...) comment 加進去了! 應該限制
compiling state 才這麼做。
[x] *** debugging, OK now. RI: constant and value were in trouble due to that I
changed the Comment word and the way to compile code objects.
[x] python shell and eforth 互相參考手上的資料
>>> peforth.main()
OK 0 constant shell # peforth 定義的變量
OK exit
# 從外面把 globals() 給它
>>> getattr(peforth.vm,'forth')['shell']=globals()
>>> peforth.vm.forth
{'obj2dict': <function obj2dict at 0x000002C8D8F5B1E0>,
'description': '', 'expected_rstack': [], 'expected_stack': [],
'test-result': 0, '[all-pass]': [],
'shell': {'__name__': '__main__', '__doc__': None, '__package__': None,
'__loader__': <class '_frozen_importlib.BuiltinImporter'>,
'__spec__': None, '__annotations__': {}, '__builtins__': <module 'builtins' (built-in)>,
'peforth': <module 'peforth' from 'c:\\Users\\hcche\\Documents\\GitHub\\peforth\\peforth.py'>}}
>>> peforth.main()
OK shell .
{'__name__': '__main__', '__doc__': None, '__package__': None,
'__loader__': <class '_frozen_importlib.BuiltinImporter'>,
'__spec__': None, '__annotations__': {},
'__builtins__': <module 'builtins' (built-in)>,
'peforth': <module 'peforth' from 'c:\\Users\\hcche\\Documents\\GitHub\\peforth\\peforth.py'>}
OK
# 從外面 DOS copy-paste 進來,一氣呵成 (不要 indent, 用 block mode)
python
import peforth
peforth.vm.dictate('0 constant shell')
peforth.vm.dictate('// ( -- dict ) 最外層 python interpreter 的 globals()')
getattr(peforth.vm,'forth')['shell']=globals()
peforth.main() # 從 python interpreter 切換進入 peforth
\ 進入了 peforth interpret state
<accept> \ 從 terminal 收取跨行 input lines
<py>
import sys
push(sys)</py> constant sys
// ( -- sys ) The sys module. Try: sys py: help(pop())
</accept> \ ( -- string T|f ) 從 terminal copy-paste 進來的 string
[if] tib.insert help sys [then]
[x] examples tools utilities goodies 範例 栗子 例子
\ 列出所有的 code words
<py> [w.name for w in words['forth'][1:] if 'code' in w.type] </pyV>
\ 列出所有的 selftest passed words
<py> [w.name for w in words['forth'][1:] if 'pass'==getattr(w,'selftest',False)] </pyV> . cr
\ 列出所有 immediate words
<py> [w.name for w in words['forth'] if getattr(w,'immediate',False) ] </pyV> . cr
\ 把尾巴 2 個 TOS 切出來成為單獨的 list (array)
( -2 ) >r py: t,vm.stack=stack[rtos(1):],stack[:rpop(1)];push(t)
--> slice
\ Execute DOS command
OK <py> exec('import os',globals(),globals())</py> # import the os module
OK py: os.system('dir')
Volume in drive C is Windows
Volume Serial Number is 2EA4-3202
Directory of c:\Users\hcche\Documents\GitHub\peforth
2017-08-23 09:31 <DIR> .
2017-08-23 09:31 <DIR> ..
2017-07-31 20:35 65 .gitattributes
2017-06-25 13:31 18,226 voc.f
2017-08-25 13:03 <DIR> __pycache__
10 File(s) 178,951 bytes
3 Dir(s) 264,579,960,832 bytes free
OK
# But after <py> os.system(r"cd c:\Users\hcche\Documents\GitHub\ML\WH300")</py>
the peforth working directory is not changed. It changes only the temperary shell.
\ copy 以下 comment (用 np++ column mode) 從 DOS box Ctrl-V 一路跑起來
<comment>
python
import peforth
peforth.vm.dictate('0 constant shell')
peforth.vm.dictate('// ( -- dict ) 最外層 python interpreter 的 globals()')
getattr(peforth.vm,'forth')['shell']=globals()
peforth.main() # 從 python interpreter 切換進入 peforth
\ 進入了 peforth interpret state
<accept> \ 從 terminal 收取跨行 input lines
<py>
import sys
push(sys)</py> constant sys
// ( -- sys ) The sys module. Try: sys py: help(pop())
</accept> \ ( -- string T|f ) 從 terminal copy-paste 進來的 string
[if] tib.insert help sys [then]
</comment>
\ DOS command line one-liner to print the path environment variable
c:\Users\hcche\Desktop>python -m peforth s' push(os.get_exec_path())' compyle execute (see) bye
[x] <accept> <py> does not work when unless putting <py> to next line <---- problem
--> rest of the line after <accept> should be the first line of the multiple lines
[x] OK include c:\Users\hcche\Documents\GitHub\ML\WH300\wh300.f
C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
"This module will be removed in 0.20.", DeprecationWarning)
Failed to run <Word 'sinclude'>: pop from empty list
Continue, Debug, or Abort? [C/d/a] a
OK
--> possibly because rstack are to be used to return while reset() ( stop command )
clears the rstack. --> 應該是猜對了。 stop command 只能中斷 outer loop 不能把 rstack 清掉!!
[x] let <accept> <text> auto indent. Use spaces before </accept> </text> as the common strip.
--> study <text> </text> 直接用 BL word 把 </text> 之前的 spaces 都忽略掉了, 這裡要改一下。
--> code test push(nextstring('[^ ]')) end-code test 123 得到:
0: {'str': ' ', 'flag': True} (<class 'dict'>)
1: 123 7Bh (<class 'int'>)
用來取得 </text> 之前的 spaces --> 這只是一法,也不太好。
--> 不如取所有 lines 的 leading spaces 之最大公因數,一律刪除就對了。
1. 切成 lines in an array
</text> :> splitlines() ( [lines] )
2. 算出每行的前導 spaces 個數
len - lstrip
OK s" abc" py> len(pop()) tib.
s" abc" py> len(pop()) \ ==> 7 (<class 'int'>)
OK s" abc" :> lstrip() py> len(pop()) tib.
s" abc" :> lstrip() py> len(pop()) \ ==> 3 (<class 'int'>)
OK
3. 取最小值,
OK py> min([1,2,3]) tib.
py> min([1,2,3]) \ ==> 1 (<class 'int'>)
OK
4. 每行都去除這麼多前導 spaces
[ e for e in m]
cls dropall <accept>
<text>
line1
line2
line3
line4
line5
</text> constant lines
</accept>
drop tib.insert
lines :> splitlines() constant [lines]
<py> map(lambda x:len(x)-len(x.lstrip()),vm.forth['[lines]'])</pyV>
constant leading-spaces // ( -- map ) 只能用一次!
\ 檢查 leading-spaces 有兩種方法,後者才漂亮
\ <py> [i for i in vm.forth['leading-spaces']]</pyV> tib. \ check leading-spaces
\ leading-spaces py> list(pop()) .
\ OK leading-spaces py> list(pop()) . # 如果 map 不大這個可以考慮
\ [12, 16, 16, 16, 16, 8] OK
\ OK leading-spaces py> list(pop()) . # map 之類的 iterator 都不能 rewind/reset
\ [] OK
leading-spaces py> min(pop()) constant common-indent
[lines] common-indent <py> [i[tos():] for i in pop(1)]</pyV> nip constant [result]
result py> "\n".join(pop()) constant result // ( -- string ) the cooked multi-lines string
: -indent ( multi-lines -- cooked ) // Remove common indent of the string
:> splitlines() ( [lines] )
<py> map(lambda x:len(x)-len(x.lstrip()),tos())</pyV> ( [lines] map[^spaces] )
py> min(pop()) ( [lines] indent )
<py> [i[tos():] for i in pop(1)]</pyV> nip ( [result] )
py> "\n".join(pop()) ;
code -indent
lines = pop()
array = lines.splitlines() # [lines]
spaces = map(lambda x:len(x)-len(x.lstrip()),array) # [spaces]
indent = min(spaces) # number of common indent
cut = [i[indent:] for i in array] # [cuted lines]
push("\n".join(cut)) end-code
// ( multi-lines -- cooked ) Remove common indent of the string
bingo! it works!
[x] don't need to use map in -indent, use [f(i) for i in lines.splitlines()]
should be enough --> Yes! The following two lines are equivalent:
spaces = map(lambda x:len(x)-len(x.lstrip()),array) # iterator
spaces = [len(x)-len(x.lstrip()) for x in array] # list
[x] Start to use peforth for the wh300 project . . .
用 peforth 來實現 wh300
第一個好消息就是 import module 變成 forth word 成功了!!
<py>
import numpy as np
push(np)
</py> constant np // ( -- numpy ) The numpy module
OK np .
<module 'numpy' from 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\numpy\\__init__.py'> OK
OK
--> import to globals() is the best way. The above method is interesting but not the best.
--> Done ! wh300.f works fine now.
[x] -indent 很聰明地 " "*100 的花招把 </text> 之前的線索給毀了!!! 目前變成過度 indent.
--> 過度 indent 修好了, constant 的 runtime 又出問題。因為是 runtime, root cause 很難找。
Root cause : 下面 lambda 的 code 內縮了,應該不要。所以是 -indent 有問題。
str__', '__subclasshook__'] OK py> dictionary[456].__doc__ .
lambda:exec(
source = '\tpush(getattr(vm,"{}")["{}"])'.format(current, last().name)
last().xt = genxt('constant',source)
if not getattr(vm,current,False): setattr(vm,current,{})
exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name))
) OK 123 constant x
Failed to run <function xt.<locals>.<lambda> at 0x000001C0C39B61E0>: unexpected indent (<string>, line 2)
Continue, Debug, or Abort? [C/d/a] a
OK
--> 對照 ok of 'see constant' 可見得上面問題版的 lambda source code 裡有多的 indent
------------ Definition in dictionary ------------
00456: lambda:exec(
source = '\tpush(getattr(vm,"{}")["{}"])'.format(current, last().name)
last().xt = genxt('constant',source)
if not getattr(vm,current,False): setattr(vm,current,{})
exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name))
)
--> 先用醜版面過關取得完整功能, 再來對付它。
--> interpret state ok, try compile --> ok too --> so what's the problem..it's clear
當 <py> 之後跟著兩個 space 時其實這個實驗就已經複製到問題了, 厲害的是要到 test 的
runtime 才會執行 lambda 從而觸發到 unexpected indent ... 難怪這麼難抓!!
: test
<py>
a=1
b=2
c=3
</py> ;
--> breakpoint 在 -indent 當 last==constant 時
code -indent
if debug and last().name=='constant': pdb.set_trace() <--- 斷到了
...snip....
--> constant 改到有問題的 <py>..</py> 版本 --> 看看這時 -indent 收到啥
|(Pdb) p lines
|' \n source = \'\\tpush(getattr(vm,"{}")["{}"])...snip...
^---- 這個 space 就是問題所在了 !!!! 真難找。
--> Root cause: in constant source code, after the <py> an extra space was there!
--> See Ynote : "peforth -indent command indent 問題探討-- 成功了! 扫描_20170828180311 _peforth_"
[X] reset() 能不能強一點? panic() 好幾次很煩....也許有意義?
[x] compyle 裡用到 lambda 來產生 function 有問題!
# 這個可以!
>>> s = '''
... dd = {'a':123,'b':456}
... print(dd)
... v = [dd[i] for i in dd] # 取得所有的 value of a dict
... print(v)
... '''
>>> exec(s) # <----------- 直接執行 exec() 很好,沒問題
{'a': 123, 'b': 456}
[123, 456]
--
# 經過 lambda 之後 local name space 就有怪現象了
# 如下不行了, 這是經過 lambda 之後產生的結果。 compyle command 不要用 lambda . . . .
... s = '''
... dd = {'a':123,'b':456}
... print(dd)
... v = [dd[i] for i in dd] # 取得所有的 value of a dict
... print(v)
... '''
>>> f = lambda:exec(s)
>>> f()
{'a': 123, 'b': 456}
NameError: name 'dd' is not defined
>>>
--> compyle 裡改用 genfunc(source) 來產生 function
----- this snippet works fine ------------
<py>
# python does not support annoymous function. But it supports closure,
# so we can recover it. genfunc("body","args") returns a function which
# is composed by the given source code and arguments.
def genfunc(body,args):
local = {}
source = "def func({}):".format(args)
# args is something like "", or 'x, y=123,z=None'
if body.strip()=="":
source = source+"\n pass\n";
else:
source = (source+'\n{}').format(body)
try:
exec(source,globals(),local)
except Exception as err:
panic("Failed in genfunc(body,{}): {}\nBody:\n{}".format(args,err,body))
local['func'].__doc__ = source
return local['func']
push(genfunc)
</py> constant genfunc // ( -- func ) function generater genfunc(body,args)
genfunc <py> pop()(' print("hi")',"")</pyV> :: ()
\ ==> hi
( arguments ) s" x,y"
( body ) <text>
result = x**2 + y**2
print(result)
</text> -indent
genfunc :> (pop(),pop()) constant f // ( -- func ) f(3,4) prints 25 which is 3^2+4^2
f :: (3,4)
\ ==> 25
----- this snippet works fine ------------
結果:
^D
hi <--- 正確,正確
25
Multiple-line mode is on, Ctrl-D switches it off.
OK
--- genfunc() 進了 project-k kernel -----------
( name ) s" lalala"
( arguments ) s" x,y"
( body ) <text>
result = x**3 + y**3
print(result)
</text> -indent
py> genfunc(pop(),pop(),pop()) constant f f :: (3,4)
# it works fine !!
--- 有問題要到 runtime 才會發現, 故 selftest 很重要 -----------
( name ) s" lalala"
( arguments ) s" x,y"
( body ) <text>
result = x*y
print(resultttttttt)
</text> -indent
py> genfunc(pop(),pop(),pop()) constant f
\ 到這裡都沒問題, 以下執行了才發現問題,而且 error message 線索差很遠
OK f :: (1,2)
Failed in </py> command: name 'resultttttttt' is not defined
Body:
pop()(1,2)
Continue, Debug, or Abort? [C/d/a]
----- it works fine --------------
[x] 改用 genfunc() 取代 lambda 之後, indent 習慣又全變了, 因為 function body
一定要 indent 而與原來的 exec(body) 相反。 共有 <py> py> py: :: :> 這些
東西受影響, 剩下 :: :> 要改 --> all done.
[x] Now without lambda (genfunc instead) test the original problem:
<py>
dd = {'a':123,'b':456}
print(dd)
v = [dd[i] for i in dd] # 取得所有的 value of a dict
print(v)
</py>
results:
{'a': 123, 'b': 456}
[123, 456] <---------------- Pass!!
[x] code compyle
execute('-indent');execute('indent')
若用 dictate('-indent indent') 則無效, 何故?
--> 以下實驗卻又都 ok !
--> RI: 因為當時在 compiling state !! 用 dictate() 的結果是把兩個 words
compile 進去了,既沒效果又出別的問題。
==> 用 dictate() 問題比較多,不能放心亂用。
這兩行 debug trick 技巧留作紀念:
if tos().find('vm.greeting')!=-1: pdb.set_trace()
dictate('-indent indent') # 奇怪, dictate 就不行???
[x] (forget) in trouble now
OK (forget)
Failed to run <function compyle_anonymous at 0x0000018230B22400>: 'Word' object has no attribute 'cfa'
--> 這問題自動好了
[x] improve the greeting when imported from python interpreter
OK py> sys.argv .
['peforth.py'] <------- run from DOS box
>>> import peforth
OK py> sys.argv .
[''] <----------------- run from python interpreter, need more help messages
[x] 整理 try - exception in peforth.f
# 從 python interpreter 裡用 genfunc() 產生 function
>>> f = peforth.vm.genfunc(" 1/0",'','test2')
>>> f
<function test2 at 0x000001B42DB13E18>
# 測試看看,確實會出錯
>>> f()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 2, in test2
ZeroDivisionError: division by zero
>>> f
<function test2 at 0x000001B42DB13E18>
# 直接 compile 進 peforth 的字典
>>> peforth.vm.comma(f)
# 進到 peforth 一執行 error message 又是 </py> 發的!
>>> peforth.main()
OK here 1- @ :: ()
Failed in </py> (compiling=False): division by zero
Body:
pop()()
Continue, Debug, or Abort? [C/d/a] a
# 檢查看看,他確實是 test2
OK here 1- @ :> __doc__ .
def test2():
1/0 OK
--> 探討原因,似乎「誰執行的,error message 就打給誰」,這樣應該資訊比較充分。
:: 裡面 interpret state 是 </py>, compiling state 則是 compyle --> 試試看
OK here 1- @ constant f \ 取得 test2 function
OK : test f :: () ; \ 故意讓 :: 的 compiling state 表演
OK test \ 一執行,報錯的變成 phaseB()
Callable in phaseB <function compyle_anonymous at 0x000001CC3771D1E0>: division by zero
Body:
def compyle_anonymous():
pop()()
Continue, Debug, or Abort? [C/d/a] a
--> ^^^^^^^--- 這個 Body information 似乎沒啥用,好像錯了?其實沒錯。
--> 如下,這是 f :: () 這種寫法的結果,沒錯,它的 Body 當然顯示不出 f 的 source code
------------ Definition in dictionary ------------
00711: f __str__ (<class 'projectk.Word'>)
00712: def compyle_anonymous():
pop()() (<class 'function'>)
2 0 LOAD_GLOBAL 0 (pop)
2 CALL_FUNCTION 0
4 CALL_FUNCTION 0
6 POP_TOP
8 LOAD_CONST 0 (None)
10 RETURN_VALUE
00713: RET (<class 'NoneType'>)
------------ End of the difinition ---------------
--> 正確的寫法是 :
OK : test2 [ f , ] ;
OK test2
Callable in phaseB <function test2 at 0x000001CC35113E18>: division by zero
Body:
def test2(): <------------------ 果然顯示出了 除0 的 source code
1/0
Continue, Debug, or Abort? [C/d/a] a
OK see test2
{
... snip...
"cfa": 715
}
------------ Definition in dictionary ------------
00715: def test2():
1/0 (<class 'function'>)
2 0 LOAD_CONST 1 (1)
2 LOAD_CONST 2 (0)
4 BINARY_TRUE_DIVIDE
6 POP_TOP
8 LOAD_CONST 0 (None)
10 RETURN_VALUE
00716: RET (<class 'NoneType'>)
------------ End of the difinition ---------------
OK
--> 即使在 interpret state 也不一定讓 </py> 來報錯(描述不精確),如下:
OK f py: execute(pop())
Callable in phaseB <function test2 at 0x000001CC35113E18>: division by zero
Body:
def test2(): <----------------- 直接就看到真正的 source code
1/0
Continue, Debug, or Abort? [C/d/a]
--> try: exception: 以後繼續改進。。。。。。
[x] multiple lines of tib. are not showing correctly.
--> try test.f
111 tib.
222 tib.
333 tib.
--> I've got it. From clipboard is ok, from accept2 is not.
OK ^D
111 tib.
222 tib.
333 tib.
^D
111 \ ==> 111 (<class 'int'>)
111 \ ==> 222 (<class 'int'>)
111 \ ==> 333 (<class 'int'>)
OK
--> fixed
[x] RET at end of dictionary is expected but missing <--- problem!!
--> improve (dump) d dump --> ok now
[x] Oh, my God! peforth can be a debugger or 內視鏡 of python:
<py>
any python code; peforth is available e.g. push()
push(123);import peforth;peforth.main() # enter peforth break point, wonderful !!
</py>
--> The way to enter peforth interpreter is not very good, though it's clear.
--> ok now, the breakpoint usage is :
push(locals());ok('111>>')
==> python -i 本來就可以回到 phthon interpreter 以便進行靜態分析執行結果。
寫就 endo.py ( see my ynote) 當作 pdb 的另一選擇,在斷點上查看程式當時狀態。
[x] 手動 install peforth 的方法 see my ynote
[x] peforth package 裡面 __init__.py 就是 peforth.py 也就是 __main__.py
[x] 這時候要解決的是 peforth.f , quit.f 的 path , 用 __path__[0] 即可。
[x] import projectk.py as vm 要改成 from . import projectk.py as vm 把 path 指定清楚
[x] projectk.py 裡面用 vm = __import__(__name__) 在 package 裡不適用
改由 __init__.py 來填 vm.vm = vm 即可。
==> 成功了 !
手動安裝
========
1. 把本 project 的四個檔案 projectk.py quit.f peforth.f __main__.py 全部 copy 到如下新創建的 folder: c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth
2. 把其中 __main__.py 多 copy 一份成 __init__.py 即可。
執行 peforth 有四個方式
=======================
1. 從 project folder 下執行 python __main__.py OK 後打 : test .’ hello world!’ cr ; test 印出 hello world! 打 bye 離開。
2. 從 project folder 外面執行 python peforth OK 後打 : test .’ hello world!’ cr ; test 印出 hello world! 打 bye 離開。
3. 安裝好 peforth package 之後,任意 folder 下執行 python -m peforth 後同上。
4. 安裝好 peforth package 之後,任意 folder 下執行 python 然後 import peforth 然後按照指示打 peforth.main() 進入 peforth 後同上。
[x] why peforth? why endo.py? 一個 object 用來保存被觀察的 locals 不就好了?
1. indent 自由
2. 現成的 tool, forth 可以記住很多命令, 複雜的 command 可以臨時組合而成
[x] peforth 既然可以是個 python debug 學習工具,拿 peforth 來當 breakpoint 就要盡量簡單。
--> The REPL, peforth.main(), renamed to peforth.ok()
REPL, or Read-Eval-Print-Loop.
--> peforth.ok(prompt='OK ',loc={}) for user to specify the prompt and giving the locals
at the moment.
--> at the point ok() started, TOS is the tuple with information from the caller.
The data stack was supposed to be empty, here after it won't be.
--> The TOS provides the prompt, the locals
[x] debug command 不要了, 會跟 py> debug which is vm.debug 撞名,沒必要增加這個問題。
[X] I found a python problem!!
False==0 is True, False<=0 is True, False<=0.1 is True
False<0.0001 is True, False<-0.1 is False
這是在引用 debug 來篩選哪些 breakpoint 做不做用時遇到的問題。debug 初值為 False 結果
debug<=33 竟然是成立的!
2019/11/25 10:26:06
[x] ." a" prints an extra space <--- problem
RI: dot . command 早期為了 debug 好看,有多印一個 space 可以不要了。
[x] peforth.path to indicates the home directory where peforth.f is
[x] IDLE generates keyboardinterrupts
try-except can fix it http://effbot.org/zone/stupid-exceptions-keyboardinterrupt.htm
--> 改寫 accept 加上了 try-except 檢查避免被 IDLE resize window 時的 KeyboardInterrupt
意外甩出。
--> resize window 的 KeyboardInterrupt 好了,但是 Ctrl-D 不能用,要輸入 multi-lines 可
改用 <accept> ... </accept> tib.insert 代替。
[x] peforth 的 version 在 whl 打包時要如何統一定義來源?
本文 "Single sourcing the version" 提供多種選擇。
https://packaging.python.org/guides/single-sourcing-package-version/#single-sourcing-the-version
我選用了 version.txt 檔案的方法,好像與 jeforth.3we 類似。
peforth/version.txt 只有一行 python statement 讓相關的單位都來參考它。
[X] 因此今後 projectk.major_version 就留在 projectk.py 裡沒有直接用到了。
__version__ = "1.02"
試出適合 setup.py 使用的 experiments 如下:
dropall cls
<accept>
<py>
loc = {} # locals
with open(v('package-directory')+"peforth\\"+"version.txt") as fp:
exec(fp.read(),{},loc )
# later on we use: loc['__version__']
push(loc)
print('loc[\'__version__\'] is ',loc['__version__'])
</py>
</accept>
tib.insert
.s
實際在 setup.py 裡的程式:
loc = {} # locals
with open("peforth/version.txt") as fp:
exec(fp.read(),{},loc ) # later on we use: loc['__version__']
version=loc['__version__'] # Refered in setup(...) as an argument
在 peforth/__main__.py 裡的程式:
# Get version code from peforth/version.txt for whl package
# to see the single source of version code.
exec(readTextFile(path + "version.txt"),{},locals())
vm.version = __version__
[x] Improve (see) to see source code from project-k
OK py> reset (see) <--- no good so far
{
"__class__": "function",
"__module__": "peforth.projectk"
}
py> reset.__doc__ tib. \ ==> None (<class 'NoneType'>)
py> reset.__code__ tib. \ ==> <code object reset at 0x000001CE712C2810, file "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 42> (<class 'code'>)
__code__ is the chance to improve.
[x] (see) only sees class and module, that can be improved to include some more e.g. __code__
==> circular reference detected 無法解決, 暫用 .members .source 應付。
[x] 決心用 3hta 寫一個 pykb.f 專門用來給 peforth 當 keyboard input to support multiple lines
find the process ID of peforth for sendkey
s" where name like '%python%'" see-process
--> 已經完成 include pykb.f 之後,用 {F7} 把 inputbox 下給 python
[x] T550 上 activate-shell 無效,sendkeys 還好。但是 git.f 卻又好好的。
--> 似乎從 __main__.py 直接執行的 python 是切不過去的,經由 DOS Box 跑起來的才可以。
> include git.f \ 對照看看為何人家可以?
> s" where name like '%python%'" list-them
python.exe:8212 \ 查出 python (直接 double click __main__.py 起來的)
> WshShell :: appActivate(8212)
> launch-git-shell
> shellId . ==> 1608 \ 查出 git shell
> WshShell :: appActivate(1608) \ 這個可以切過去
> WshShell :: appActivate(8212) \ 這個就不行
--> 如果退出 python 則該 DOS Box 能 activate 嗎?
> s" where name like '%cmd%'" list-them
TOTALCMD64.EXE:20780
cmd.exe:22848
cmd.exe:9556
> WshShell :: appActivate(20780) 可以切到 total commander
> WshShell :: appActivate(22848) 可以切到剛退出 peforth 的 DOS Box
> WshShell :: appActivate(9556) 這個不知是啥,切不過去!
用 see-process 看進去,竟然可能是 Google Chrome 的東西
string Name; cmd.exe
uint32 ProcessId; 9556
string Caption; cmd.exe
string CommandLine; C:\WINDOWS\system32\cmd.exe /d /c "C:\Users\hcche\AppData\Local\youdao\Dict\Application\stable\text_extractor_host.exe" chrome-extension://aohddidmgooofkgohkbkaohadkolgejj/ --parent-window=0 < \\.\pipe\chrome.nativeMessaging.in.53dc641bdd08e0c9 > \\.\pipe\chrome.nativeMessaging.out.53dc641bdd08e0c9
string CreationClassName; Win32_Process
--> 所以切不到某些 process 是有的,何解?
進一步研究發現,這個 python 是從 Anaconda3 run 起來的
> s" where name like '%python%'" list-them
python.exe:20092
> WshShell :: appActivate(20092)
string Name; python.exe
uint32 ProcessId; 20092
string CommandLine; C:\ProgramData\Anaconda3\python.exe "C:\Users\hcche\Documents\GitHub\peforth\__main__.py"
--> Not root cause. 即使 Anaconda 的 python 也能切過去,只要。。。
--> 把 Title 改成 peforth 吧!看看是否改得到所在的 cmd or powershell
DOS command c:\> title titlename can change the doxbox title but it's not
a process attribute so it doen't help.
--> 所以答案是: 直接跑 __main__.py 或經過 dosbox 都可能行或不行,
process ID 可以用 nnnn to processid 指定的,就算了吧!
--> 多印些 info 讓 user 自己手動設 processid, Done!
[x] improve .members --> __class__ attribute can easily be circularly deep and long
m py> inspect.getmembers(pop()) py> str(pop()) tib.
[x] try to str(obj) then json.loads(string) and then json.dumps
--> str() generates non-json 不行!
--> 暫時放棄了
[x] C:\Users\hcche\Documents\GitHub\Morvan\tutorials\tensorflowTUT\tensorflow6_session.f
如何一口氣把所有的 python section variables 都變成 forth values?
l :> keys() tib. \ ==> dict_keys(
['result2', 'result', 'sess', 'product', 'matrix2', 'matrix1', 'tf']
) (<class 'dict_keys'>)
--> 要能 programmatically 產生 constant --> 改寫 constant 得 (constant)
: (constant) ( n "name" -- ) // Create a constnat
(create) <py>
source = ' push(getattr(vm,"{}")["{}"])'.format(current, last().name)
last().xt = genxt('constant',source)
if not getattr(vm,current,False): setattr(vm,current,{})
exec('getattr(vm,"{}")["{}"]=pop()'.format(current, last().name))
</py>
reveal ;
OK 123 char x (constant)
OK x . ==> 123 OK
--> 一把就成功了! 能不能用在 colon definition 裡面?
: test 234 char y (constant) ;
test
y . ==> 234 成功!
--> 有了 (constant) 應該就可以自動產生所有的 locals() 了
==> ok now! vm.outport(loc) defined in quit.f
[x] Install peforth from source
---- 早期 (1.22 版以前) 不懂得用 python setup.py install 時的替代方法 ----
a command to update the peforth module
@ c:\Users\...\Python36\Lib\site-packages\peforth\..
Get the path
import os.path as ospath
# py> pdb :> __file__ tib. \ ==> C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\pdb.py (<class 'str'>)
# py> ospath.dirname(pdb.__file__) tib. \ ==> C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib (<class 'str'>)
# py> ospath.split(pdb.__file__) tib. \ ==> ('C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib', 'pdb.py') (<class 'tuple'>)
# py> ospath.splitdrive(pdb.__file__) tib. \ ==> ('C:', '\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb.py') (<class 'tuple'>)
# py> ospath.splitext(pdb.__file__) tib. \ ==> ('C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb', '.py') (<class 'tuple'>)
# py> ospath.splitunc(pdb.__file__) tib. \ ==> ('', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\pdb.py') (<class 'tuple'>)
py> ospath.dirname(pdb.__file__)+"\\site-packages\\peforth\\" ( targetPath )
getenv(key, default=None)
Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str.
getenv compare with py> ospath.dirname(pdb.__file__)
if same then proceed the patch program to copy all files
if not then warning and stop
算了,直接 copy 就好了
------ update.bat ------
set pythonlib=C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib
copy -y version.txt %pythonlib%\site-packages\peforth
copy -y projectk.py %pythonlib%\site-packages\peforth
copy -y __main__.py %pythonlib%\site-packages\peforth
copy -y __init__.py %pythonlib%\site-packages\peforth
copy -y peforth.f %pythonlib%\site-packages\peforth
copy -y quit.f %pythonlib%\site-packages\peforth
------ ------ ------ ------ ------
[x] 發現 pip help install 列出了 pip install 的種種用法。
update.bat 直接從 project directly update peforth package 到
lib\site-packages\peforth 的方式太暴力了。
--> Try this example from pip help install :
pip install [options] [-e] <local project path> ...
[X] 有待研究 14:33 18/05/21 v1.16 試用結果,失敗:
c:\Users\hcche\Documents\GitHub>pip install -e peforth
Obtaining file:///C:/Users/hcche/Documents/GitHub/peforth
Missing build time requirements in pyproject.toml for file:///C:/Users/hcche/Documents/GitHub/peforth: 'setuptools' and 'wheel'.
This version of pip does not implement PEP 517 so it cannot build a wheel without 'setuptools' and 'wheel'.
Installing build dependencies ... done
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\hcche\Documents\GitHub\peforth\setup.py", line 9, in <module>
with open("peforth/version.txt") as fp:
FileNotFoundError: [Errno 2] No such file or directory: 'peforth/version.txt'
----------------------------------------
Command "python setup.py egg_info" failed with error code 1 in C:\Users\hcche\Documents\GitHub\peforth\
c:\Users\hcche\Documents\GitHub>
[x] Ynote: "研究 install peforth from source 的方法" 已經成功。
[/] jump to 遙遠的下面 "---- 2018.12.15 懂得用 python setup.py install 需要修改 ----"
-
[/] 螢幕編輯器
os.get_terminal_size(...)
Return the size of the terminal window as (columns, lines).
[x] (forget) 有 error
'Word' object has no attribute 'cfa' <-- 用 getattr(obj,name,None) 即可。
[x] peforth 1.3 uploaded to pypi. 準備來寫 wiki 介紹怎麼
應用 peforth 來學習 TensorFlow.
--> Done https://github.com/hcchengithub/peforth/wiki/Example-4-Examine-a-Machine-Learning-exercise
[x] 繼續完成 peforth.f 的 selftest 元件
--> string 轉譯成 array [d ... d] [r ... r] 要用到
: test2 char 123,456 s" [{}]" :> format(pop()) py> eval(pop()) ;
--> String.indexOf 改成 String.find
\ Selftest 要 redirect print() 方便取得並檢查螢幕輸出的內容。
\ 這是個 redirect print() 的有效範例
\ Selftest 要 redirect print() 方便取得並檢查螢幕輸出的內容。
\ 改寫成輸出到 buffer. See http://www.cnblogs.com/turtle-fly/p/3280519.html
<accept>
py> [""] value screen-buffer // ( -- 'string' ) Selftest screen buffer
<py>
class Screenbuffer:
def __init__(self,buf):
self.stdoutwas=sys.stdout
self.buffer=buf
def write(self, output_stream):
self.buffer[0] += output_stream
def view(self):
self.stdoutwas.write(self.buffer[0])
def reset(self):
sys.stdout=self.stdoutwas
vm.Screenbuffer=Screenbuffer
# redirection
sys.stdout=Screenbuffer(vm.forth['screen-buffer'])
# print to screen buffer
sys.stdout.stdoutwas.write("-------1111-----\n")
print( 'hello')
print( 'world')
sys.stdout.stdoutwas.write("-------2222-----\n")
# view screen buffer
sys.stdout.view()
# reset
sys.stdout.reset()
outport(locals())
</py>
</accept>
tib.insert
[x] 探討,整理,討論幾種產生 function 或執行 inline python code 的方法
1. projectk.py genxt() 有 __doc__ 專為 code word xt 硬性 _me argument
2. projectk.py genfunc() 有 __doc__ 一般用途 name args body
3. peforth.f compyle 產生一般用途的 annonymous function 沒有 args
4. <py>...</py> 前後都是 immediate 正常使用沒問題。但若想先組合好 source code 再讓
</py> or </pyV> 去執行,就有變化了。以下 try, try2 兩個都是有意義的、可以的
OK : try char 123 [compile] </pyV> ;
OK try .
123OK
OK : try2 [ char 123 ] </pyV> ;
OK try2 .
123OK
但是下面這個其實是不知所云的:
OK : try3 char 123 </pyV> ;
其結果也是莫名其妙的:
Error! try3 unknown.
OK
5. 直接用 exec(), eval() 執行臨時組合出來的 string, e.g. [r [d [p 的定義。
6. 直接用 compile(), genfunc() 可能不會有,吧?
[x] 有很嚴重的 bug
OK : test <py> 123 </pyV> ;
OK see test
...snip...
------------ Definition in dictionary ------------
00784: def compyle_anonymous():
push(123 ) (<class 'function'>)
2 0 LOAD_GLOBAL 0 (push)
2 LOAD_CONST 1 (123)
4 CALL_FUNCTION 1
6 POP_TOP
8 LOAD_CONST 0 (None)
10 RETURN_VALUE
00785: RET (<class 'NoneType'>)
------------ End of the difinition ---------------
OK
OK : test py> "abc" ;
reDef test
OK see test <----------- 沒反應!
OK ' test (see) <----------- 沒反應!
OK ' test . <----------- 沒反應!
--> 順序倒過來怎樣? 先試 : test py> "abc" ;
--> OK 一切正常
--> 再一個空的東西 : nothing ; --> 也正常!
--> 就是不能有 inline python? : test2 py> 1234 ;
--> OK 一切正常
--> 整個重來,那這樣呢?
: test <py> 123 </pyV> ; : test2 py> "abc" ;
--> 都 OK, 算了,不了了之。可能是寫 selftest 捅出來的哈哈題。
[x] python or javascript can't access by address then how to
access by reference instead of access by value? (call by name call by address call by reference)
昨天寫 selftest 為了取得 screenbuffer 就是得定義成
py> [""] value screen-buffer // ( -- ['string'] ) Selftest screen buffer
而非
py> "" value screen-buffer // ( -- 'string' ) Selftest screen buffer
否則會 access 不到這個特定的 string.
[x] 照著 MetaMoji 2017-9-17 15:15 的討論, 研究把 <selftest> sections 都 dump 出來的辦法。
--> 從 quit.f 裡一查即知, 應該是一行解決:
py> tick('<selftest>').buffer char peforth-selftest.f writeTextFile stop
--> 成功!
--> 此後就是改寫 peforth-selftest.f 而已。
[x] (constant) 遇到 reDef writeTextFile 會議常中止 --> 不能用 panic 警告,用 print 即可。
[x] About to release peforth v1.4
1. py:~ py>~ ::~ :>~ are so good to have.
2. selftest not completed yet but nice to have some
Release steps see Ynote: "Pack peforth to peforth.whl" > 打包步驟。
[x] v1.4 released, from now on v1.5
[x] 有了 argv 就不要有 greeting 也不要 reDef warnings.
--> 所以要提早取得 command line, quit.f 太晚了。
--> Done!
[x] PyPI README.rst 有辦法了 可查看 rst2html 也可以 convert from markdown
https://stackoverflow.com/questions/26737222/pypi-description-markdown-doesnt-work
--> 先用 pypandoc module 用轉的看看
py:~ import pypandoc; push(pypandoc)
constant pypandoc // ( -- module )
pypandoc :: convert('README.md','rst')
Failed in </py> (compiling=False): No pandoc was found:
either install pandoc and add it to your PATH or or call
pypandoc.download_pandoc(...) or install pypandoc wheels
with included pandoc.
--> OK pypandoc :> download_pandoc py: help(pop())
Help on function download_pandoc in module pypandoc.pandoc_download:
download_pandoc(url=None, targetfolder=None, version='latest')
Download and unpack pandoc
Downloads prebuild binaries for pandoc from `url` and unpacks it into
`targetfolder`.
:param str url: URL for the to be downloaded pandoc binary distribution for
the platform under which this python runs. If no `url` is give, uses
the latest available release at the time pypandoc was released.
:param str targetfolder: directory, where the binaries should be installed
to. If no `targetfolder` is give, uses a platform specific user
location: `~/bin` on Linux, `~/Applications/pandoc` on Mac OS X, and
`~\AppData\Local\Pandoc` on Windows.
OK pypandoc :: download_pandoc()
* Downloading pandoc from https://github.com/jgm/pandoc/releases/download/1.19.2.1/pandoc-1.19.2.1-windows.msi ...
--> download 半天下不來。。。很煩
--> http://pandoc.org/ 有 online converter , 分小段手動把 README.md 轉成 README.rst 吧!
pandoc.org 專門做各種文檔格式轉換。
--> Online reStructuredText editor http://rst.ninjs.org/
--> Yes!!
[x] Release v1.5
[x] 把 update.bat setup.bat setup.py 等等統一起來
--> 抄 3we 的 setup.bat
--> done!
[x] Example in comment of the "words" command needs improvement
--> 整個改良了,如今可以接受 pattern
[x] alias 要繼承原來的 help & comment 嗎? 整個檢查看看。。。
--> 要,但是 // 改成只有現有的 help 是 "(...)" 才用尾綴的,否則都用取代的。
[x] Bug: (see) unexpectedly leaves the given tos on the data stack if it's not a Word.
[x] 發現 python 應該也能執行 WshShell 因此可能不用靠 jeforth.3hta pykb.f
[x] 錄 elearning 介紹 peforth
[ ] wiki 介紹 py: help(genxt) py> genxt .source ' + . members 等好用的東西
--> 唉,當時的層次真是,不好說啊!好是好,推薦自己的好東西,沒有照
顧 user 的需求。
[X] 把網頁或至少 3hta 變成 peforth 的 input box, 解決 multiple line input 的問題。
--> 從 peforth 去 launch 3hta include pykb.f
--> python 能不能知道自己是誰 run 的?如果知道,就可以解決 Wsh.sendkey() 不知往哪 send
的問題。
--> 有 ^D, ipython, jupyter notebook 等方法了。
[x] peforth.f selftest almost done, still many to go:
<py> [w.name for w in words['forth'][1:] if 'pass'!=getattr(w,'selftest',False)] </pyV> cr . cr
[x] 最進新發現的 bug 特別要加進 selftest
--> (see) none Word 之後 stack 沒清乾淨 --> 有兩條路的 words 都可疑!
--> 甚至,例如 words 最後有多印一個 print() 也是測試的重點。
[/] readTextFile, writeTextFile 好像都還不能用 -- haha bug 被 inport 取代掉的
[x] display-off 之後 on 不回來,突然發生,很奇怪。沒了 display 不好 debug.
display-off 之內如果是 ." hello world" 就沒問題。
是 words help 才有問題,而且是卡在 words 裡面了 <== 因為 words 出錯,導致 display-on
沒 run 到。只要在 words 後面加上 e 就好了。表示是 nexttoken() 又出問題了,從
檔案裡執行(而非console command line)時它會繞過 CRLF 往下抓到下一個 token 因此
程式都亂了,特別是被他抓走的正好就是 display-on 螢幕都沒了,所以難搞。
這類 nexttoken 可有可無的命令在 selftest 時都可能有問題。
應該都會被挑出來。
[x] review 所有用到 word 以及 nexttoken() 的地方。。。
[x] python -i -m peforth version
exit 之後會有 error ---> 不見了!可能是 WshShell win32 package
--> 又來了!!!
c:\Users\hcche\Documents\GitHub\peforth>python -i __main__.py
p e f o r t h v1.07
source code http://github.com/hcchengithub/peforth
*** Start self-test
*** End of peforth.f self-test ... pass
OK bye
Traceback (most recent call last):
File "__main__.py", line 129, in <module>
ok()
.... snip ......
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\_sitebuiltins.py", line 26, in __call__
raise SystemExit(code)
SystemExit: None
>>>
--> can repro? python -i __main__.py and bye ... Yes, repro'ed
--> OK py: exit() <--- can repro
OK py: exit(0) <--- repro too!!!
--> can it repro on a simplified ~.py instead of peforth?
--> Yes!! as simple as only one statement in test.py :
c:\Users\hcche\Downloads>type test.py
exit()
c:\Users\hcche\Downloads>python -i test.py
Traceback (most recent call last):
File "test.py", line 1, in <module>
exit()
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\_sitebuiltins.py", line 26, in __call__
raise SystemExit(code)
SystemExit: None
>>> exit()
c:\Users\hcche\Downloads>
--> It's not a problem. -i switch in command line normal behavior.
--> bye to use os._exit(0) instead of exit() can fix the problem.
[x] exit command to set vm.exit=True to stop the ok() in __main__.py
[/] add bp('prompt') in addition to ok() to avoid the unnecesary awkward
breakpoint instruction
--> Listen to users, don't assume. ok(prompt,loc,cmd) arguments are
all very useful.
[x] how to get vm's parent? so as to show greeting message differently
for different situations. i.e. ok() or peforth.ok() to enter peforth
interpreter
--> 本來的目的不知能不能達到,有 parent 的 data 總是好的。
[x] Bug found
c:\Users\hcche\Documents\GitHub\peforth>python -i -m peforth exit
OK
OK <=== python interpreter prompte expected
--> 因為 vm.exit 有兩個!!!!
peforth module __init__.py __main__.py 的關係不是一個!!!
module 裡面的 __main__.py 專供 -m 執行用,改寫看看。。。。
==> 簡化整個執行方式,決心放棄從 project folder 執行。 ---> 2019-05-11 重新 study 有成
只保留 import peforth 或 python -m peforth 兩種。
--> Since commit c3d7677 on Oct 8, 2017
[x] 因應新檔案配置,setup.bat 的自動化晚點再做 --> Done
[x] Tests before a Release v1.07
[x] 所有 run 法帶 selftest 跑一遍
[x] Run setup.bat 做出有 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. __main__.py [/] selfttest [/] greeting [/] exit [/] bye
[x] 2. python __main__.py version drop [/] .s words [/] exit [/] bye
[x] 3. python -i __main__.py [/] selfttest [/] greeting [/] exit [/] bye
[x] 4. python -i __main__.py version drop [/] .s [/] exit [/] bye
[x] 5. python -i -m peforth [/] selftest .s words exit
[x] 6. python -i -m peforth version drop
[x] 7. python import peforth
[/] selftest peforth.ok() .s words <--- no parent
[/] 1234 bye check echo %errorlevel%
[x] 所有 run 不帶 selftest 再跑一遍
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. __main__.py [/] selfttest [/] greeting [/] exit [/] bye
[x] 2. python -i -m peforth [/] selftest .s words exit bye
[x] 3. python -i -m peforth .' Hello World!!' cr bye
[x] 4. python import peforth
[x] 考慮 README.rst 改良
--> GitHub 版的先弄好
[x] hello world
Ynote: 草稿 peforth wiki article hello world _wiki_
[x] README.md --> README.rst by http://rst.ninjs.org
[x] These words should be moved into selftest section
'description', 'expected_rstack', 'expected_stack', 'test-result',
'[all-pass]', '***', 'all-pass', '[r', 'r]', '[d', 'd]']
[x] while display-off dispaly-on should be moved out!
[x] a new word to include python file directly -- pyclude
supports commands after #__peforth__ comment by simply removing
all #__peforth__
Also comment out "from __future__ import print_function" lines
1. read the file
2. find all #__peforth__ replace with null
3. find "from __future__ import print_function" comment it out.
4. -indent indent
5. add <py> and </py>
6. tib.insert the string
: pyclude ( <pathname.py> -- ... ) // Run the .py file in a <PY>..</PY> space
CR word readTextFile py> re.sub("#__peforth__","",pop())
py> re.sub(r"(from\s+__future__\s+import\s+print_function)",r"#\1",pop())
-indent indent <py> " <p" + "y>\n" + pop() + "\n </p" + "y>\n" </pyV>
tib.insert ;
/// Auto-remove all #__peforth__ marks so we can add debug
/// statements what are only visible when debugging.
/// Auto comment out "from __future__ import print_function"
/// that is not allowed when in a <PY>..</PY> space.
[x] tib.insert is dictate now, an alias.
[x] Tests before a Release v1.08
[x] 所有 run 法帶 selftest 跑一遍
[x] Run setup.bat 做出有 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. python -i -m peforth [/] selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 所有 run 不帶 selftest 再跑一遍
[x] Run setup.bat 做出取消 selftest 的 wheel <-- 注意!改的是 site-packages\peforth
[x] pip uninstall peforth
[x] pip install peforth <==== 啊!不行,會上網抓。
pip install 剛做好的 wheel
[x] 1. python -i -m peforth [/] selftest .s words exit bye
[x] 2. python -i -m peforth .' Hello World!!' cr bye
[x] 3. python import peforth
[x] 考慮 README.rst 改良
[x] version.txt advanced to v1.09
[x] the way I get the path is not good, data files are in a separated folder
in ubuntu. I have to manually copy data files to lib/python3.5
Copy : none .py files are in ~/.local/lib/site-packages/peforth
peforth.f peforth.selftest quit.f version.txt
To : .py files are in ~/.local/lib/python3.5/site-packages/peforth
__init__.py __main__.py projectk.py
Solutions I found on Stackoverflow are bad, do it manually is fine.
[x] A wiki page discusses this. done.
[/] 有機會解掉了。Search my Ynote: "2018/01/17 16:39 插曲,意外發現查出 python
的東西都放哪裡的方法了!peforth 在 Ubuntu 上跑可能有救了。_peforth_
_ubuntu_"
[/] Study this :
c:\Users\hcche\Documents\GitHub\DeepSpeech>py -m site
sys.path = [
'c:\\Users\\hcche\\Documents\\GitHub\\DeepSpeech',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\python36.zip',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\DLLs',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\win32',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\win32\\lib',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\Pythonwin',
]
USER_BASE: 'C:\\Users\\hcche\\AppData\\Roaming\\Python' (doesn't exist)
USER_SITE: 'C:\\Users\\hcche\\AppData\\Roaming\\Python\\Python36\\site-packages' (doesn't exist)
ENABLE_USER_SITE: True
c:\Users\hcche\Documents\GitHub\DeepSpeech>
[/] 從 c:\Users\hcche\Documents\GitHub\Morvan\tutorials\tensorflowTUT\tf17_dropout\full_code-2.py
裡,開發 harry_port() 的經驗看來,有了這麼強大的工具之後,用它臨時定義出來的
words 不希望隨著 breakpoint 結束而被 --- marker --- 清除。怎麼辦?
1. 要保留的東西放到 tutorial 的前面,或先 include 另一個 tool kit
--> 這個好!
2. 如果不用 marker (因為我的 marker 太強了,跨 vocabulary 全清!)
就是要有 forget 能單清本 current vocabulary 的 words 到 ─── 為止。
3. 而且要有 vocabulary, 把要保留的 words 定義到 root 去,平時在 tutorial
vocabulary 工作。
[x] 這個 interpreter for loop 有何問題?
OK 3 [for] t@ 100 + int digit [next]
Failed in </py> (compiling=False): pop from empty list
Body:
push(pop().data[pop()])
OK
==> 問題可能是出在 digit 裡面用到 <text>...</text> dictate 的 macro 形式
證實了,因為不用該 macro 就好了
[/] harry_port() 的使用技巧,要寫進 help 裡!像這個例子,不能用 <py>...</py> block
因為它會先 compile 而這個應用不能先被 compile :
OK <text> locals().update(harry_port());
batch_X, batch_Y = mnist.train.next_batch(100); outport(locals()) </text>
py: exec(pop())
[x] exit 不夠力,會往下做。要再補個 stop 才行。
code stop reset() end-code // ( -- ) Stop the TIB loop
code exit
if compiling: comma(EXIT)
else: vm.exit=True ; reset() <---- 補 reset() 即 stop
end-code immediate
// ( -- ) Exit this colon word.
靠!意外發現這個 bug !! 其實早就看到 exit 之後會暴衝,沒太在意。
[x] <accept> nop </accept> 同一行不行,要改良嗎? ---> Done!
[x] Tests before a Release v1.09
[x] 所有 run 法帶 selftest 跑一遍
[x] Run setup.bat 做出有 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [/] selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 所有 run 不帶 selftest 再跑一遍
[x] 注意!改的是 site-packages\peforth\quit.f 所以要
在 setup.bat 做 wheel 以前插入這個動作!!!!!
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] selftest .s words exit bye
[x] 2. python -i -m peforth .' Hello World!!' cr bye
[x] 3. python import peforth
[x] 考慮 README.rst 改良
[x] version 改成 1.11 (必須跳過 1.10 會變成 1.1)
[/] -indent 可以更聰明一點,目的讓 <text>...</text> 內部更自由。
當 </text> 所在行是 blank line 時,就用它的長度當作 -indent 的最小值,這
麼一來 <text> 之後就可以接著放東西。那它的 space 數比 </text> 之前小,就
會被「加長」到「最小值」。這樣更自由。
[x] exit stop 之外,還需要一個中止 including 的方法。或者是仔細定義 stop, exit
的差別或者合併。vm.exit 是給 ok() 看的,很明顯它用來回到 python interpreter
這已經有點頭痛了,因為 exit 同時也是給 inner loop 看的 instruction 跟 RET
等效。意思是,如果 exit 再有別的意思,恐怕連我自己都糊塗了。那只剩 stop 了,
stop 用來打斷 outer loop 也很明確。所以,需要新的 word ... break-include
因為 sinclude 是用 dictate 來處理 .f file 的,可能把 ntib 改一下就有 break-include
的效果了,試試看,把斷點斷在 xray.f 裡查看半路的 tib 含不含 tutrial 。。。
---> Bingo!!
: break-include ( -- ) // Break including .f file
py: vm.ntib=len(tib) ;
stop 就是 reset()
exit 在 comiling 時是 EXIT==RET; 否則就是 vm.exit=True 而已,把 ok() 停下來。
2020/06/03 10:34:10 該為 proeforth 寫了個 skip2 更有彈性。
[x] peforth 可以用來幫 .py import modules
py> os.getcwd() constant working-directory // ( -- "path" ) Tutorial home directory saved copy
\ my MNIST_data directory is there
cd c:\Users\hcche\Downloads
py:~ from tensorflow.examples.tutorials.mnist import input_data as mnist_data; push(mnist_data)
parent :: ['mnist_data']=pop(1) \ pop(1) 很傷腦筋, in-line 要還原成 python 才看得懂。
[x] *debug* 改寫, 不要用 pdb.set_trace() 了
不用 import 就使用 pdb 的方法
py: sys.modules['pdb'].set_trace()
: *debug* ( <prompt> -- ... ) // FORTH breakpoint
BL word ( prompt ) py: ok(pop(),cmd="cr") ;
/// How to invoke pdb:
/// py: sys.modules['pdb'].set_trace()
[x] now 11 *debug* >> 22 <== but 22 got skipped ! <----- problem
--> fixed
[x] *debug* can not be used in compiling mode (colon definition) yet
because the following prompt needs to read tib immediatedly
[x] Bug found,
OK help a
Word in phaseB <Word 'help'>: 'int' object has no attribute 'help'
help improved
[x] new word "import" works fine
[x] new word __main__ works fine
s" dos title " __main__ :> __file__ + CRLF + dictate drop
Note! 如果沒有 CRLF 則 dos 會抓到 dictate 之後去,連 drop 都當成 command line 的一部份
[x] release 1.11
new words import, __main__, break_include, and improved *debug* and help
[X] ( ... ) comment nested v1.23
[x] CRLF leaves '\r\n' on TOS
[x] Ignore command line when running in jupyter notebook
(Pdb) vm.commandline
'-f C:\\Users\\hcche\\AppData\\Roaming\\jupyter\\runtime\\kernel-be1c3297-f7a9-4cb2-a7aa-b06e29f158ea.json'
(Pdb) sys.argv
['c:\\users\\hcche\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py', '-f', 'C:\\Users\\hcche\\AppData\\Roaming\\jupyter\\runtime\\kernel-be1c3297-f7a9-4cb2-a7aa-b06e29f158ea.json']
(Pdb) sys.argv[0]
'c:\\users\\hcche\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\ipykernel_launcher.py'
(Pdb) sys.argv[0].endswith('ipykernel_launcher.py') --> True , the key to know about the case
跑 jupyter notebook 又發生 Error! -f unknown. 的問題。先前是因為
jupyter notebook 下 import peforth 會有 unexpected command line 如上。
這不是改好了嗎? --> 光排除 py> sys.argv[0].endswith('.py') 不夠
py> sys.argv[0].endswith(('.py','.ipy','.ipynb'))
[x] itchat 執行中,常有這個問題發生:
Traceback (most recent call last):
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 342, in outerExecute
f = float(token) # triggers exception if token is malformed
ValueError: could not convert string to float: '<mmreader><category'
為何 try: exception: 攔不住它?
Reproducing steps (at home on my desktop) :
c:\Users\hcche\Documents\GitHub\ibrainfuck\bfinterpreter>python v1.12 at home
>>> import peforth
>>> peforth.ok()
OK sys . cr
Traceback (most recent call last):
File "C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\projectk.py", line 342, in outerExecute
f = float(token) # triggers exception if token is malformed
ValueError: could not convert string to float: 'sys'
終於找到複製方法了。。。。
--> 改寫 projectk.py > outer() 之後好了。
[x] study how to run brainfuck interpreter
c:\Users\hcche\Documents\GitHub\ibrainfuck
--> See Ynote __brainfuck_
[x] 因 bug 發現 harry_port() 的更佳用法 (quit.f updated)
\ Study 三寶
\ 1. DOS Box title
import peforth; peforth.ok(loc=locals(),cmd="include xray.f")
\ 2. Breakpoint
peforth.ok('11> ',cmd="parent inport")
\ 3. Lab of copy-paste
<accept> <text>
# ---------------------------------------------------------------------------
all locals() can use
# ---------------------------------------------------------------------------
</text> -indent py: exec(pop(),harry_port()) # If only globals is given, locals defaults to it.
</accept> dictate
[x] msg is a forth value and also a peforth global.
blabla bla something wrong.
--> 不是因為繼承 JavaScript 的想法,object 與 dict 不分所造成的混淆。
(::) (:>) 都是中性的 obj :: methed 或 obj :: ['property'] 隨人自己
的認知而定,語法並無問題。
[x] Ipeforth kernel for Jupyter is ok now. Bring peforth to
http://nbviewer.jupyter.org/
How to install Ipeforth kernel for jupyter notebook :
Copy kernel.json to here:
%USERPROFILE%\AppData\Roaming\jupyter\kernels\peforth\kernel.json
c:\Users\hcche\AppData\Roaming\jupyter\kernels\peforth\kernel.json
manually create the directory if
%USERPROFILE%\AppData\Roaming\jupyter\kernels\
is not existing.
[x] Tests before a Release v1.13
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [/] no-selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
Run setup.bat 更新本地版本以供測試
[x] 1. python -i -m peforth [/] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] version 改成 1.14 (必須跳過 1.10 會變成 1.1)
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] 讓 jupyter feature peforth --> 已經加進 jupyter 的 kernel list:
https://github.com/jupyter/jupyter/wiki/Jupyter-kernels
[ ] Like harry_port that brings all wanted variables to projectk
How to make it easier?
[ ] Study when deep in a certain module, how peforth find and bring in
specified variables?
1. debug the toy.. keras exercise, breakpoint deep in a keras module
2. instead of using the trick of loc={**locals(),**{'foo':foo,'bar':bar}}
try to find foo,bar actual parent
3. access volatile variables out of their scope may not be a good idea
but being able to access them at a peforth breakpoint is necessary.
tensor_shape is imported in C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages\tensorflow\python\keras\_impl\keras\layers\wrappers.py
char input_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
char tf <text> \ global variable
__main__ :> {0} constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
* 1. char foobar module ( module )
2. py: setattr(sys.modules['foobar'].projectk,'foobar',v('foobar')) \ add to peforth
* 1. import numpy constant np // ( -- numpy ) module object, method #1
py> sys.modules['numpy'] constant np // ( -- numpy ) method #2
__main__ :> np constant np // ( -- numpy ) method #3
2. np __main__ :: peforth.projectk.np=pop(1) \ peforth global
np __main__ :: np=pop(1) \ __main__ global, see 'help __main__'
* 3. py: setattr(sys.modules['peforth'].projectk,'np',v('np')) \ alt method
char child_input_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
\ make librosa a global in peforth
char librosa py> tick(tos()) execute py: globals()[pop()]=pop()
\ even simpler way
import librosa constant librosa char librosa librosa py: globals()[pop()]=pop()
char input_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
char tensor_shape <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
char selfLayer <text> \ local variable
locals :> ['{0}'] constant {0}
__main__ :: peforth.projectk.{0}=v('{0}')
</text> :> format(pop()) dictate
import peforth # [ ] _debug_
peforth.ok(cmd='''
0 value Count
none value child_output_shape
exit
''')
try:
child_output_shape = child_output_shape.as_list()
except Exception as err:
peforth.ok('33> ',loc={**locals(),**{'tensor_shape':tensor_shape,'self.layer':self.layer,'err':err}})
locals :: pop('peforth') locals inport
tensor_shape :> TensorShape(v('input_shape')).as_list() constant input_shape2
tensor_shape :> TensorShape([v('input_shape2')[0]]+v('input_shape2')[2:])
constant child_input_shape
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> (?, 2048) (<class 'tensorflow.python.framework.tensor_shape.TensorShape'>)
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> (?, 2048) (<class 'tensorflow.python.framework.tensor_shape.TensorShape'>)
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> None (<class 'NoneType'>)
self.layer :> _compute_output_shape(v('child_input_shape')) tib. \ ==> None (<class 'NoneType'>)
[x] jupyter notebook 裡無法 exit , 每次 exit 都會留下一個東西在 stack 裡,出不去。
load> exit
load> .s
0: <IPython.core.autocall.ZMQExitAutocall object at 0x0000020577BF5EF0> (<class 'IPython.core.autocall.ZMQExitAutocall'>)
load>
--> 用 .py 比較看看 --> 沒這問題。
--> 直接進去,直接出來看看 --> 馬上卡住了。
--> 簡化 the peforth cell, 比較結果 ... 在 locals inport 之後多出一個 exit
看起來還是原來的 exit 但多出來就是不對,而且 --- marker clean up 之後好了!
充分證明就是它。
--> 怎麼發生的?--> ipython case 下,當時的 locals() 就是有 exit quit 等一堆東西
正好 exit 撞上了,而 locals :> ['exit'] . cr --> <IPython.core.autocall.ZMQExitAutocall object at 0x000001DBB24B5EF0>
正是那個怪東西。
RI
[ ] 最好 inport 能用挑的。程序如下:
load2> locals keys . cr
dict_keys(['__name__', '__doc__', '__package__', '__loader__', '__spec__', '__builtin__', '__builtins__', '_ih', '_oh', '_dh', 'In', 'Out', 'get_ipython', 'exit', 'quit', '_', '__', '___', '_i', '_ii', '_iii', '_i1', 'tf', '_i2', 'tflearn', '_i3', 'speech_data', '_i4', 'time', 'peforth', 'epoch_count', 'learning_rate', 'training_iters', 'batch_size', 'width', 'height', 'classes', '_i5', 'batch', 'word_batch', '_i6', 'net', 'model', 'x', '_i7'])
\ 從上表裡面挑要用的東西
<py> ['get_ipython', 'tflearn', 'speech_data', 'time', 'epoch_count',
'learning_rate', 'training_iters', 'batch_size', 'width', 'height',
'classes', 'batch', 'word_batch', 'net', 'model', 'x']
</pyV> ( [挑過的keys] )
\ 從 locals 裡面挑這些東西出來
<py> dict([(k,v) for k,v in v('locals').items() if k in tos()])
</pyV> nip ( {挑過的locals} )
\ 可以放心地 inport 成 peforth words 了
inport
[ ] python virtualenv http://docs.python-guide.org/en/latest/dev/virtualenvs/
解決的問題也是 FORTH 的問題,參考人家怎麼解的,可以想想怎麼沿用,看如何只 include 必要的東西。
[x] Ubuntu 的問題好像有解了,
--> Ubuntu 之下
OK site :> USER_BASE . cr 不存在!
/home/hcchen5600/.local
OK site :> USER_SITE . cr 不存在!
/home/hcchen5600/.local/lib/python3.6/site-packages
OK site :> PREFIXES . cr
['/usr', '/usr']
實際東西放在
site.PREFIXES[0] + /local/lib/site-packages/peforth/
--> windows
OK site :> USER_BASE . cr 不存在!
C:\Users\hcche\AppData\Roaming\Python
OK site :> USER_SITE . cr 不存在!
C:\Users\hcche\AppData\Roaming\Python\Python36\site-packages
OK site :> PREFIXES . cr
['C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36', 'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36']
實際東西放在
site.PREFIXES[0] + /lib/site-packages/peforth/
--> Ubuntu virtualenv
>>> import site
>>> site.PREFIXES
['/home/hcchen5600/GitHub/DeepSpeech', '/home/hcchen5600/GitHub/DeepSpeech']
>>> site.USER_BASE
'/home/hcchen5600/.local'
>>> site.USER_SITE
'/home/hcchen5600/.local/lib/python3.6/site-packages'
實際東西放在
site.PREFIXES[0] + /lib/site-packages/peforth/
也就是
\rootfs\home\hcchen5600\GitHub\DeepSpeech\lib\site-packages\peforth\..
\ Windows 下可 normalize the path
照上面實施, windows 下變成
OK py> path . cr
C:\Users\hcche\AppData\Local\Programs\Python\Python36/lib/site-packages/peforth/
\ 這可以用 ntpath.normpath() 解決
OK import ntpath
OK constant ntpath
OK ntpath dir . cr
['__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', '_get_bothseps', '_getfinalpathname', '_getfullpathname', '_getvolumepathname', 'abspath', 'altsep', 'basename', 'commonpath', 'commonprefix', 'curdir', 'defpath', 'devnull', 'dirname', 'exists', 'expanduser', 'expandvars', 'extsep', 'genericpath', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'os', 'pardir', 'pathsep', 'realpath', 'relpath', 'samefile', 'sameopenfile', 'samestat', 'sep', 'split', 'splitdrive', 'splitext', 'splitunc', 'stat', 'supports_unicode_filenames', 'sys']
OK ntpath :> normpath . cr
<function normpath at 0x000001C511337E18>
OK ntpath :> normpath py: help(pop())
Help on function normpath in module ntpath:
normpath(path)
Normalize path, eliminating double slashes, etc.
OK py> path ntpath :> normpath(pop()) . cr
C:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth
OK
\ 或者檢查看是否 Windows
In [8]: sys.modules.get('nt') <--- None 就是沒有,就不是 windows
In [9]: sys.modules.get('sys')
Out[9]: <module 'sys' (built-in)>
In [10]:
\ 更好的方法, yeah! this is it.
-- ubuntu --
In [12]: os.name
Out[12]: 'posix'
-- windows --
OK os :> name . cr
nt
[/] 有了這個 solution 連 jupyter peforth kernel 的 install 都可以自動化了。
[x] Ubuntu 的問題應該已經解決了,要推廣 peforth 必須趕快 release
Tests before a Release v1.14
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version drop
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 5. repeat 以上 in ubuntu
--> copy the wheel to WSL ubuntu
--> use virtualenv is fine
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
Run setup.bat 更新本地版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] version 改成 1.15 (必須跳過 1.10 會變成 1.1)
[x] 直接用測過的 wheel update Pypi
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] WSL Ubuntu virtualenv weired experience
when pip install peforth in a virtualenv --> permission denied
--> so I use sudo and this will success but peforth will be installed
to global instead of the virtualenv! see https://stackoverflow.com/questions/14665330/pip-requirement-already-satisfied
--> The reason why permission denied was peforth-1.14-py3-none-any.whl which
was copied by windows and it needs chmod 777
\ see the correct example below:
(DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ chmod 777 peforth-1.14-py3-none-any.whl
(DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$ pip install peforth-1.14-py3-none-any.whl
Processing ./peforth-1.14-py3-none-any.whl
Installing collected packages: peforth
Successfully installed peforth-1.14
(DeepSpeech) hcchen5600@31ENB667:~/GitHub/DeepSpeech$
[x] peforth.vm.things 的 peforth.things alias
14:59 2018/03/11 讓 vm.execute() vm.dictate() peforth.ok() 都傳回 vm 以便 support function cascade
19:22 2018/03/11 除了以上,連 stack, push, words, ... etc 都加上去了。
[x] %f magic command 暫無 auto-load, 必須 import peforth 才有 --> 解決了,雖然這樣也好。
"c:\Users\hcche\OneDrive\文件\Jupyter Notebooks\Creating an IPython extension with custom magic commands.ipynb"
討論複製過來如下:
[x] 如上述加上 c.InteractiveShellApp.extensions = ["c:\\Users\\hcche\\Downloads\\csvmagic.py"] 之後,無效。參考 [stackoverflow](https://stackoverflow.com/questions/27483637/auto-reload-extension-not-reloading-on-change) 學到用 '%load_ext c:\\Users\\hcche\\Downloads\\csvmagic.py' 在 jupyter notebook 或 ipython 中試試看 . . . 果然是 path 寫法的問題。照以上範例, csvmagic.py 位在 current directory 直接 '%load_ext csvmagic' 就可以了。如果不在 crrent directory 那就是要 importable 則手動放到 site-packages 去亦可,討論如下。
[x] 又或者必須是個 -m 搆得著的 module? 對了!上述的 importable 就是這個意思。--> 手動放進 site-packages (檔名改成 __init__.py) 就 importable 了,試試看 --> 成功!但是必須跑過 '%load_ext csvmagic' 之後才有 %%csv 不會自動 load。
[x] 而且 import csvmagic 也無效;然而經過以下正確安排之後 import peforth 有效,不知何故?
[x] 如何自動 load 應該跟 peforth 的 install 方式類似,這表示 csvmagic.py 所做的工作要由 `GitHub\peforth\peforthkernel.py` 來完成 (錯!要由 peforth 的 `__init__.py` 來負責)。其中 peforth %f 具有 line magic 與 cell magic 雙重腳色,該怎麼寫?看這裡:http://ipython.readthedocs.io/en/stable/config/custommagics.html
# from IPython.core.magic import (register_line_magic, register_cell_magic)
# @register_line_magic
# def f(line):
# peforth.vm.dictate(line)
#
# @register_cell_magic
# def f(line, cell):
# peforth.vm.dictate(cell)
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def f(line, cell=None):
if cell is None:
peforth.vm.dictate(line)
else:
peforth.vm.dictate(cell)
def load_ipython_extension(ipython):
ipython.register_magic_function(f, 'line_cell')
# see http://ipython.readthedocs.io/en/stable/api/generated/IPython.core.interactiveshell.html?highlight=register_magic_function
[x] (錯!) 放進 GitHub\peforth\peforthkernel.py
[x] (錯!) copy 到 c:\Users\hcche\AppData\Roaming\jupyter\kernels\peforth\kernel.json 所指到的位置:"c:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\peforth\\peforthkernel.py"
[x] 重新啟動 jupyter notebook --> 結果無效, 這表示上面這段 code 沒有被執行到。可能放在 GitHub\peforth\peforthkernel.py 不對(確定不對),也可能另有某個 .json 要指對地方。看 document 吧! --> 已知!c.InteractiveShellApp.extensions = ['peforth'] 就這行,所以上面這段要放在 peforth 的 __init__.py 才對 (對了)--> 再試試看 ... 還是無效,必須 import peforth 才行。目前這樣可以滿意了。
[x] 我猜是 c.InteractiveShellApp.extensions = ['csvmagic','peforth'] 所在的
profile_default\ipython_config.py 整個都無效之故。先前嘗試 "28 Jupyter
Notebook tips, tricks and shortcuts" 該檔的另一個設定也是無效。從 path 裡
有個 /test/ 看來,可能不是正確的檔案。--> 由 %f get_ipython :> ().ipython_dir
. cr 得知正確的位置是:`C:\Users\hcche\.ipython` 才對,也就是
`C:\Users\hcche\.ipython\profile_default\ipython_config.py` --> 試試看,
有沒有自動 load_ext . . . 有了!剛改好 `profile_default\ipython_config.py`
就馬上對新開的 jupyter notebook 有效。
[x] ipython 的 magic initialization in __init__.py 要防呆,避免從 python (none ipython)
執行時出問題。判斷有沒有 ipython 的方法要看在哪裡判斷的, peforth __init__.py 裡
好像太早,結果這兩個方法都 always false 而無效,不能自動 load_ext :
if 'get_ipython' in globals():
if '__IPYTHON__' in dir(__builtins__):
我看就算了,需要先 import peforth 有它的好處,例如 greeting 會出現在 import 的時候。
[x] 從 jupyter notebook 裡面 debug peforth 的 __init__.py 很方便!用 pdb.set_trace()
設個斷點在 ipython 判斷式前,查看以上兩個式子 --> 在當時都是 false !! 但我找到
這個可以:
'__IPYTHON__' in __builtins__.keys()
B i n g o ! ! 果然成功了,我發現 __builtins__ 的定義再那之後會變,而
__builtin__ 在那時甚至都還不存在。
[x] Tests before a Release v1.15
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> check errorlevel
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
--> pip3 install (/mnt/...the wheel) to WSL ubuntu
--> use virtualenv is fine
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
Run setup.bat 更新本地版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help bye .s cd help exit
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 直接用測過的 wheel update Pypi
[x] version 改成 1.16 (必須跳過 1.10 會變成 1.1)
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] README.md needs to improve the installation guide for jupyter notebook support
Install peforth kernel for Jupyter Notebook
If you have ipython and jupyter installed, do following steps to add peforth
as a kernel of Jupyter Notebook,
Install peforth kernel for Jupyter Notebook
1. install peforth
pip install peforth
2. copy
c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\kernel.json
到
c:\Users\yourname\AppData\Roaming\jupyter\kernels\peforth\kernel.json
如果上面的 target 目錄 kernels\ 或 peforth\ 不存在,則請手動建立這些目錄
3. 編輯剛才這個檔案
c:\Users\yourname\AppData\Roaming\jupyter\kernels\peforth\kernel.json
照您的電腦實際情況,訂正其中的這個 path
c:\Users\yourname\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\peforthkernel.py
以上是我的電腦的範例
[/] 希望這個 installation 能自動化
refer to Ynote : "怎麼加 javascript kernel 進 jupyter notebook" _ijavascript_
[x] setup.bat update 上 Pypi 成功之後,有個 error :batch not found 之類。
upload v1.15 時發現的。應該是因為把 bye comment 掉了,往下看到 batch 的東西了。
[/] v1.15 %f 也發生了 comment 之後如果沒有 whitespace 會被下一行看到的問題
%f __main__ :> census_train['age'].head(2) . cr \ 奇怪,它怎知這 dtype 是 int64?
13:34 18/05/22 複製不出來, 上面這法都忘了怎來的了。
[x] 不認得的 words 自動到 __main__ 裡去找找看 <-- 成功了! v1.16
不認得的 words 自動到 locals 裡去找找看
不認得的 words 自動到 globals 裡去找找看
似乎 project-k 或看怎麼外掛一個序列 methods 用來處理 unknown workds
--> 執行一個 word 就叫做 unknown ( 'token' -- thing Y|n)
傳回 True 就表示處理過了,轉回 False 就表示沒處理 (default) 然則顯示
unknown 訊息。
--> 先做 __main__ 的比較簡單
: unknown py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=='Ûnknôwn' if drop false else true then ;
// ( token -- thing Y|N) Try to find the unknown in __main__
[x] 開始 support jupyter magics 之後冒出問題,直接跑 ipython -m peforth 出 error 如下。
先進 ipython 之後再 import peforth 就沒問題。
c:\Users\hcche\Documents\GitHub>ipython -i -m peforth
Python 3.6.0 (v3.6.0:41df79263a11, Dec 23 2016, 08:06:12) [MSC v.1900 64 bit (AMD64)]
Type 'copyright', 'credits' or 'license' for more information
IPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.
p e f o r t h v1.16
source code http://github.com/hcchengithub/peforth
Type 'peforth.ok()' to enter forth interpreter, 'exit' to come back.
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in run_module(mod_name, init_globals, run_name, alter_sys)
199 Returns the resulting top level namespace dictionary
200 """
--> 201 mod_name, mod_spec, code = _get_module_details(mod_name)
202 if run_name is None:
203 run_name = mod_name
c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in _get_module_details(mod_name, error)
140 try:
141 pkg_main_name = mod_name + ".__main__"
--> 142 return _get_module_details(pkg_main_name, error)
143 except error as e:
144 if mod_name not in sys.modules:
c:\users\hcche\appdata\local\programs\python\python36\lib\runpy.py in _get_module_details(mod_name, error)
107 # Try importing the parent to avoid catching initialization errors
108 try:
--> 109 __import__(pkg_name)
110 except ImportError as e:
111 # If the parent or higher ancestor package is missing, let the
c:\Users\hcche\Documents\GitHub\peforth\__init__.py in <module>()
166 # Define peforth magic command, %f.
167 @register_line_cell_magic
--> 168 def f(line, cell=None):
169 if cell is None:
170 vm.dictate(line)
c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\magic.py in magic_deco(arg)
227 break
228 else:
--> 229 raise NameError('Decorator can only run in context where '
230 '`get_ipython` exists')
231
NameError: Decorator can only run in context where `get_ipython` exists
c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py:2598: UserWarning: Unknown failure executing module: <peforth>
warn('Unknown failure executing module: <%s>' % mod_name)
[x] ipython -m peforth 會出問題,可能是因為 get_ipython 當時還沒有 ready <-- 對
NameError: Decorator can only run in context where `get_ipython` exists
c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py:2598: UserWarning: Unknown failure executing module: <peforth>
warn('Unknown failure executing module: <%s>' % mod_name)
只要進了 ipython command prompt or jupyter notebook 都沒問題
In [2]: 'get_ipython' in globals()
Out[2]: True
--> 用對的方法檢查 ipython magic 存不存在即可,以上 error message 提供了線索
查看 python token 是否 defined 必須用 try-except:
try:
flag = "InteractiveShell" in str(get_ipython)
except:
flag = False
if flag:
from IPython.core.magic import register_line_cell_magic
... snip ....
注意, 解掉問題之後,如今:
1. jupyter notebook 完全沒問題。
2. 用 ipython -i -m peforth 跑起來的,exit 到 ipython 不認得 magic commands:
In [1]: %f
UsageError: Line magic function `%f` not found.
3. 先進 ipython 然後 import peforth 的才認得 magic commands.
[x] Tests before releasing v1.16
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
[x] 直接從 GitHub folder 執行 python peforth --> .s cd help exit
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[/] 2. ipython -i -m peforth .' Hello World!!' cr bye --> 目前有問題
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
import peforth
%f ." Hello FORTH!"
%%f Now we redefine the 'unknown' command that was doing nothing
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
\ here after, when FORTH come accross an unknown token, instead of an error
\ message, it try to find the token in python __main__ module name space.
y = 'abc'
%f y . cr
%f yy . cr
[x] 考慮 README.rst 改良
[x] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 直接從 GitHub folder 執行 python peforth 先確定一把 --> .s cd help exit
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[x] 3. python import peforth
[x] selftest peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> .s cd help exit
%f %%f magic command
[/] 5. repeat 以上 in ubuntu
--> pip3 install (/mnt/...the wheel) to WSL ubuntu
--> use virtualenv is fine
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 直接用測過的 wheel update Pypi
twine upload dist/*
ID, password search my Ynote with pypi _account_
[x] version 改成 1.17 (必須跳過 1.10 會變成 1.1)
[x] test mybinder.org to view peforth > notebook > *.ipynb
不行, 猜測還是 _the_path_issue_ 的問題 <--- no, setup.py issue, see below.
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] v1.16 released
[x] (create) in peforth.f 認為當有 command line 時就不要有 reDef 警告,讓畫面清爽
且 reDef 是常態。但是到了 jupyter notebook 底下, 他一定有 command line
jupyter notebook 下
%f py> commandline.strip() tib. ==> -f C:\Users\hcche\AppData\Roaming\jupyter\runtime\kernel-17e1c697-6363-49d3-b3af-81708a468835.json (<class 'str'>)
因此 reDef 警告就都消失了也不對。因為 jupyter notebook 之下 command line 完全
無用,因此原來的判斷方法可以保持,但是要排除 jupyter notebook 的場合。
結論是 --> ('jupyter' in str(sys.modules) or not commandline.strip())
[x] Tests before releasing v1.17
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
[x] 直接從 GitHub folder 執行 python peforth --> .s cd help exit
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
kernel > restart and clear outputs
import peforth
%%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
/// here after, when FORTH come accross an unknown token, instead of alerting
/// it try to find the token in python __main__ module name space.
: . tib. ; // ( tos -- ) A better dot that also prints the entire command line
/// For experiments that need to show both question and result.
/// "" . prints the command line only, w/o the TOS.
: path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module
CR word trim ( "path" ) py: sys.path.append(pop()) ;
code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 直接從 GitHub folder 執行 python peforth 先確定一把 --> .s cd help exit
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[x] 3. python import peforth
[x] no selftest, peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
[x] pip uninstall peforth
[x] pip install (/mnt/...the wheel) to WSL ubuntu
[x] ipython -m peforth
[x] ipython , import peforth , magic commands
[x] 直接用測過的 wheel update Pypi
繼續剛才的 setup.bat 即可,必要時: twine upload dist/*
ID, password search my Ynote with pypi _account_
--> 出錯! GFW?
HTTPError: 403 Client Error: Invalid or non-existent authentication information. for url: https://upload.pypi.org/legacy/
--> retry 看看 ... 這次就成功了!
c:\Users\hcche\Desktop\peforth-master>twine upload dist/*
Uploading distributions to https://upload.pypi.org/legacy/
Enter your username: hcchen5600
Enter your password:
Uploading peforth-1.17-py3-none-any.whl
12%|...snip....
c:\Users\hcche\Desktop\peforth-master>
--> 很奇怪,pypi.org 網頁上已經 upgraded 到 1.17 版了, WSL Ubuntu 下試過
pip uninstall peforth -> pip install peforth 也到 1.17 版了,就是
Windows DOS 下怎麼試都還是 1.16 ! 不管了,晚點再看 --> 真的過幾分鐘就好了!!
[x] version 改成 1.18 (必須跳過 1.10 會變成 1.1)
[x] test mybinder.org
[http://github.com/hcchengithub/peforth][master][notebook]
不行, 看來是 setup.py 的問題 --> see Ynote: "mybinder.org FileNotFoundErErrorno 2 No such file or directory"
--> RI: 不是 bug, setup.py 改名不要讓 mybinder.org 看到即可。
2018.12.15 這可能是為何名為 setup.py.whl 的原因,我正在研究 command line:
python setup.py install
也許就是 peforth 的 install from source 的正解。
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] v1.17 released --> verion.txt 跳成 v1.18
[x] v1.14 v1.15 v1.16 on WSL Ubuntu, virtualenv , _the_path_issue_
ipython still failed, message:
...snip...
~/tmp/deepspeech-venv/lib/python3.6/site-packages/peforth/__init__.py in readTextFile(pathname)
33
34 def readTextFile(pathname):
---> 35 f = open(pathname,'r',encoding='utf-8')
36 # for line in f:
37 s = f.read()
FileNotFoundError: [Errno 2] No such file or directory:
'/usr/local/lib/site-packages/peforth/version.txt' <--- 因為 .py 與其他 files 被分開放了
...snip...
[x] https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory
[x] v1.17 還是用 site.getsitepackages() 加上一點暴力
deli = '\\' if os.name == 'nt' else '/'
path = "something wrong peforth path not found"
for p in (pp for pp in site.getsitepackages() if pp.endswith("site-packages")):
dirs = p.split(deli)
if dirs[-2] != 'lib': # expecting 'lib'
dirs = dirs[:-2] + [dirs[-1]]; # if -2 is not 'lib' then remove it (pythonM.N or the likes)
if 'lib' in dirs: # extra check, may not be necessary
path = deli.join(dirs) + deli + "peforth" + deli
[x] test with WSL Ubuntu virtualenv --> failed
[x] v1.17 failed for WSL Ubuntu in both with and without virtualenv. <-- v1.21 FP
問題點:
When without virtualenv:
hcchen5600@WKS-4AEN0404:~$ python -m peforth
Traceback (most recent call last):
...snip...
File "/home/hcchen5600/.local/lib/python3.6/site-packages/peforth/__init__.py", line 67, in <module>
exec(readTextFile(path + "version.txt"),{},locals())
File "/home/hcchen5600/.local/lib/python3.6/site-packages/peforth/__init__.py", line 35, in readTextFile
f = open(pathname,'r',encoding='utf-8')
FileNotFoundError: [Errno 2] No such file or directory: 'something wrong peforth path not foundversion.txt'
When with virtualenv:
(playground) hcchen5600@WKS-4AEN0404:~$ python -m peforth
Traceback (most recent call last):
...snip...
File "/home/hcchen5600/playground/lib/python3.6/site-packages/peforth/__init__.py", line 57, in <module>
for p in (pp for pp in site.getsitepackages() if pp.endswith("site-packages")):
AttributeError: module 'site' has no attribute 'getsitepackages'
答案:
還是這篇文章:https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory
[x] 正確答案先直接列出來
w/o virtualenv /home/hcchen5600/.local/lib/site-packages/peforth/version.txt
with virtualenv /home/hcchen5600/playground/lib/site-packages/peforth/version.txt
w/o virtualenv C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages\peforth\version.txt
[x] 方法一、 sys.path 終極答案
Ubuntu with virtualenv 可用(要剃除"python3.6")
>>> import sys
>>> [f for f in sys.path if f.endswith('site-packages')]
['/home/hcchen5600/playground/lib/python3.6/site-packages']
Ubuntu w/o virtualenv 可用(要剃除"python3.6")
>>> import sys
>>> [f for f in sys.path if f.endswith('site-packages')]
['/home/hcchen5600/.local/lib/python3.6/site-packages']
Windows w/o virtualenv 正確
>>> [f for f in sys.path if f.endswith('site-packages')]
['C:\\Users\\hcche\\AppData\\Roaming\\Python\\Python36\\site-packages',
'C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages']
--> 用這個方法只要把 v1.17 的 __init__.py 原來 "site.getsitepackages()"
改成 "sys.path" 即可,真是的!
[x] 方法二、 site.getsitepackages() <--- v1.16 失敗,三個中最差的,我的媽!
python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])"
Windows w/o virtualenv 正確
python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])"
['C:\\Users\\hcche\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages']
Ubuntu w/o virtualenv 錯誤!
hcchen5600@WKS-4AEN0404:~$ python
Python 3.6.5 (default, May 3 2018, 10:08:28)
[GCC 5.4.0 20160609] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import site
>>> site.getsitepackages()
['/usr/local/lib/python3.6/dist-packages', '/usr/lib/python3/dist-packages', '/usr/lib/python3.6/dist-packages']
Ubuntu with virtualenv 直接陣亡,根本不 support 這個命令!
(playground) hcchen5600@WKS-4AEN0404:~/playground/bin$ python -c "import site; print([f for f in site.getsitepackages() if f.endswith('site-packages')])"
Traceback (most recent call last):
File "<string>", line 1, in <module>
AttributeError: module 'site' has no attribute 'getsitepackages'
[x] 方法三、 不行! <--- python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
Windows w/o virtualenv 正確
c:\Users\hcche\Downloads>python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
C:\Users\hcche\AppData\Local\Programs\Python\Python36\Lib\site-packages
Ubuntu w/o virtualenv 錯誤!
hcchen5600@WKS-4AEN0404:~$ python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
/usr/lib/python3/dist-packages <--- 錯了,不能用。
Ubuntu with virtualenv 可用(要剃除"python3.6")
(playground) hcchen5600@WKS-4AEN0404:~/playground/bin$ python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
/home/hcchen5600/playground/lib/python3.6/site-packages
[x] 意外發現 python -m peforth include 1.f 時, 1.f 裡面不認得 ok() vm.ok()
RI: the recent __init__.py "run once" section that runs quit.f that runs command line
arguments is *before* the definition of ok()! --> I move it down to the bottom
then problem is gone. This solution will be released with v1.18.
[x] mybinder.org 跑不起來, peforth/version.txt file not found <--- RI: setup.py 改名就好了, expecting v1.18
See my Ynote: "mybinder.org FileNotFoundErErrorno 2 No such file or directory"
--> 我猜是 setup.py 的檔案結構,在 Desktop\peforth-master\ 處多用了一個 peforth
folder 如此一來, 從 project 本身的 setup.py 所在之處來看 version.txt 就不在
peforth/version.txt 而直接在 version.txt 才對。 v1.16 先直接修改
Desktop\peforth-master\setup.py
做一版 wheel 在 local 看成不成功, 若成功就證明研究生的檔案結構多一個 peforth
是沒必要的了,改掉就有機會了。
--> 真的做成了, 把 peforth/ 裡的東西都移上來, setup.py 改掉,不要 peforth/,
如下從 working directory 執行, 成功了!
c:\Users\hcche\Desktop\peforth-master>pip wheel --wheel-dir=dist .
Processing c:\users\hcche\desktop\peforth-master
Building wheels for collected packages: peforth
Running setup.py bdist_wheel for peforth ... done
Stored in directory: c:\users\hcche\desktop\peforth-master\dist
Successfully built peforth
c:\Users\hcche\Desktop\peforth-master>
--> 這表示根本不必搞到 Desktop\peforth-master 直接用 local GitHub repo 就可以
了 --> 錯錯錯! Desktop\peforth-master\peforth folder 是必須的
--> peforth/version.txt file not found 應該還是 _the_path_issue_
[x] 做成 1.17 release 以便查看 mybinder.org 解了沒? --> failed !!
see also : Ynote : "研究 peforth 的 path 到底正確該如何"
[x] setup.py 是研究生為了做出 peforth 的 whl 而設。既然 mybinder.org 也要來
看,就一定要更講究一點,我想就是這個原因....
--> 可能要兩個 setup.py, 一個 at peforth folder, the other is for building .whl.
when building .whl, the setup.py is at parent folder and is that a must ?
--> anywhere>pip wheel --wheel-dir=dist c:\Users\hcche\Desktop\peforth-master
c:\Users\hcche\Desktop\peforth-master>pip wheel --wheel-dir=dist peforth
以上都可以 build 出 peforthxxxxx.whl
--> peforth/setup.py 被 mybinder.org 看到了才出的問題, 把它改名成 setup.py.disabled 看看...
RI: 把 setup.py 名字改掉就好了!!! 不要讓 mybinder.org 看到 setup.py 即可。
--> 如前述,我們的 setup.py 是用來做 .whl 的,要給 pip 看的,不是要給 mybinder.org
看的。
[x] Merge 到 master 但可以不必急著 release, 純粹是 setup.py 的問題,跟程式無關。
只要讓 mybinder.org 能跑,改 github 上的 setup.py 成 setup.py.whl 即可。
--> expecting v1.18
[x] Tests before a Release v1.18 <--- on pypi.org already
[x] 所有 run 法帶 selftest:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=True
[取消] 直接從 GitHub folder 執行 python peforth --> 等於是 -m peforth
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
kernel > restart and clear outputs
import peforth
%%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
/// here after, when FORTH come accross an unknown token, instead of alerting
/// it try to find the token in python __main__ module name space.
: . tib. ; // ( tos -- ) A better dot that also prints the entire command line
/// For experiments that need to show both question and result.
/// "" . prints the command line only, w/o the TOS.
: path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module
CR word trim ( "path" ) py: sys.path.append(pop()) ;
code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[x] 5. jupyter notebook --> peforth kernel
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 %USERPROFILE%\Documents\GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[/] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[/] 3. python import peforth
[/] no selftest, peforth.ok() .s words <--- no parent
[/] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
[x] pip uninstall peforth
[x] pip install (/mnt/...the wheel) to WSL ubuntu
[x] ipython -m peforth
[x] ipython , import peforth , magic commands
[x] 直接用測過的 wheel update Pypi
繼續剛才的 setup.bat 即可,必要時: twine upload dist/*
ID, password search my Ynote with pypi _account_
[x] test mybinder.org @ [http://github.com/hcchengithub/peforth][develop][notebook]
這個跟 pypi.org 無關,只要 github 有 push 上去馬上生效。
[x] pypi.org 網頁上已經 upgraded 到 1.18 版了, WSL Ubuntu 下試過
pip uninstall peforth -> pip install peforth 也到 1.17 版了,就是
Windows DOS 下怎麼試都還是 1.16 ! 不管了,晚點再看 --> 真的過幾分鐘就好了!!
[x] WSL Ubuntu w/o virtualenv --> python -m peforth ... ok
[x] WSL Ubuntu with virtualenv --> python -m peforth ... ok
[/] test colab --> v1.18 還是 failed 還是 path 的問題 :-(
[x] version 改成 1.19 (必須跳過 1.10 會變成 1.1)
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] test colab --> v1.18 還是 failed 還是 path 的問題 :-(
v1.18 is failed on colab, the chance is that v1.16 works fine on colab.
[x] use v1.16 (pip install peforth==1.16 on colab) to check sys.path & site.getsitepackages()
---- from collab with peforth v1.16 ----
import site
site.getsitepackages()
['/usr/local/lib/python3.6/dist-packages',
'/usr/lib/python3/dist-packages',
'/usr/lib/python3.6/dist-packages']
import sys
sys.path
['',
'/env/python',
'/usr/lib/python36.zip',
'/usr/lib/python3.6',
'/usr/lib/python3.6/lib-dynload',
'/usr/local/lib/python3.6/dist-packages',
'/usr/lib/python3/dist-packages',
'/usr/local/lib/python3.6/dist-packages/IPython/extensions',
'/content/.ipython']
-------- actual peforth path on Google colab ---------------
!ls /usr/local/lib/python3.6/dist-packages/peforth
__init__.py __main__.py peforthkernel.py projectk.py __pycache__ setup.py
!ls /usr/local/lib/site-packages/peforth
kernel.json peforthkernel.py __pycache__ version.txt
peforth.f peforth.selftest quit.f
[/] So, the answer is clear here . . . try all possible directories with some
guess to find /peforth/version.txt that's doable
[x] can be setup.py's problem. I don't think all modules are facing the same
annoying problem. --> try to simplify setup.py.whl
--> RTFD : https://packaging.python.org/guides/distributing-packages-using-setuptools/?highlight=data_files#data-files
[x] testing c:\Users\hcche\Desktop\peforth-master\setup.py.improved that uses
package_data={...} instead of data_files=[...] in sety.py
--> 用改過的 setup.py 重作 wheel
很奇怪,必須用 github\peforth\setup.bat 做否則 pip wheel 根本不 build 總之有個辦法可行做出了 v1.19
See Ynote: "Pack peforth to peforth.whl" > "2018/07/02 13:06" 的討論。
--> 直接看 ~.whl (zip檔)就知道成功了!
[x] v1.18 用 sys.path 的加工不對了 --> 改掉
[x] path="" 只有 setup.bat 要看才出錯,真的不行嗎?
--> 真的不行,讀 version.txt 時的 os.getcwd() 真的就是當時的 working directory,這樣不行。
--> 所以用 sys.path 的方法還是要用 --> windows 本來就沒錯了呀!
[x] 改掉 setup.py 的好處是 data files 與 .py 都在一起了,但是 path 如何取得
還是個問題 -- Ubuntu and colab 不能兩全 --> 用 sys.path 去 serch peforth/version.txt
還是唯一的辦法 ... 不難:
path = "something wrong peforth path not found"
for p in (pp for pp in sys.path if pp.endswith("site-packages")):
if os.path.isfile(p + deli + 'peforth' + deli + 'version.txt'):
path = p + deli + 'peforth' + deli
break
vm.path = path
pdb.set_trace() # *debug*
[x] windows (none anaconda virtualenv), WSL Ubuntu w/o virtualenv, with virtualenv
--> all pass!
[x] Tests before a Release v1.19 --> v1.21 actually
[x] 所有 run 法帶 selftest:
[x] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[x] Run setup.bat 更新本地 pip installed 版本以供測試
[x] 1. python -i -m peforth [x] with-selftest .s words exit bye
[x] 2. ipython -i -m peforth .' Hello World!!' cr bye
[x] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[x] 4. jupyter notebook
kernel > restart and clear outputs
import peforth
%%f 擴充、修訂一下 peforth 的行為模式,讓它認得 jupyter notebook 下的 globals. Dot . 也改寫了,適合 jupyter notebook 學習環境使用。
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__
py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn")
py> str(tos())=="Ûnknôwn" if drop false else true then ;
/// here after, when FORTH come accross an unknown token, instead of alerting
/// it try to find the token in python __main__ module name space.
: . tib. ; // ( tos -- ) A better dot that also prints the entire command line
/// For experiments that need to show both question and result.
/// "" . prints the command line only, w/o the TOS.
: path-to-find-modules ( <path> -- ) // Add path to sys.path so "import module-name" can find the module
CR word trim ( "path" ) py: sys.path.append(pop()) ;
code \ print(nexttoken('\n')) end-code // Redefine \ command to print the comment line
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[x] 5. jupyter notebook --> peforth kernel --> .s words
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[x] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[x] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[x] Run setup.bat 做出取消 selftest 的 wheel
[x] pip uninstall peforth
[x] pip install peforth-xxxx.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[x] 1. python -i -m peforth [x] no-selftest .s words exit
[x] 2. python -i -m peforth version 12345 bye --> echo %errorlevel%
[x] 3. python import peforth
[x] no selftest, peforth.ok() .s words <--- no parent
[x] 1234 bye check echo %errorlevel%
[x] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[x] 5. repeat 以上 in ubuntu
[x] pip uninstall peforth
[x] pip install (/mnt/...the wheel) to WSL ubuntu
[/] ipython -m peforth
[/] ipython , import peforth , magic commands
[x] 直接用測過的 wheel update Pypi
繼續剛才的 setup.bat 即可,必要時: twine upload dist/*
ID, password search my Ynote with pypi _account_
[x] pypi.org 網頁上已經 upgraded 到 1.19 版了,
若不行,晚點再看,過幾分鐘就好。
[x] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[x] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[x] Windows DOS 下試
[x] test mybinder.org @ [http://github.com/hcchengithub/peforth][develop][notebook]
這個跟 pypi.org 無關,只要 github 有 push 上去馬上生效。
[x] test colab --> v1.19 --> shit, 又錯了! 不能限定要 site-packages, dist-packages 也要接受
deli = '\\' if os.name == 'nt' else '/'
path = "wrong"
for p in sys.path:
if os.path.isfile(p + deli + 'peforth' + deli + 'version.txt'):
path = p + deli + 'peforth' + deli
break
以上這改就對了,出 v1.21 版吧! Shit shit . . .
[x] __init__.py
[x] rebuild setup.bat
[x] release v1.21 to pypi.org
[x] test colab ... !pip install peforth==1.21 要等一等。。。 v1.21 成功了! 嗚嗚嗚
[x] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
[x] version 改成 1.22 (必須跳過 1.20 會變成 1.2)
[x] 14:48 2018-12-09 python object (attributes -> values) and hash table or
dictionary (keys --> values) are confusing me especially when JavaScript sees
both the samething. The python 'dir' function lists an object's attributes and
JSON can stringify a hash table to a readable string. Let's make an experient:
\ o1 is a dict
py> {'a':11,'b':22} constant o1
OK o1 tib. --> {'a': 11, 'b': 22} \ it's a dict so it's shown as a dict
OK o1 :> keys() . cr --> dict_keys(['a', 'b']) \ dict has keys
OK o1 :> values() . cr --> dict_values([11, 22]) \ dict has values
\ it's also an ojbect
OK o1 dir . cr \ so it has attributes
--> ['clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values']
OK o1 stringify . cr
{
"a": 11,
"b": 22
}
OK
這樣看來,dict 與 object 的混淆是 JavaScript user 的問題。 任何東西都是 object 而只有 dict 才有 hash table.
用 (see) dir .members 查看 object 的 attributes, 用 (see) keys values 查看 dict. 用 stringify 查看 dict'fy 之後的
任何東西。
--> 結論是在 help (see) 裡講清楚就好了。 來自 jeforth 的 obj>keys 與 dir 或 keys 重複所以很少用了。
[x] Install peforth from source
---- 2018.12.15 懂得用 python setup.py install 需要修改 ----
[x] Ynote: "研究 install peforth from source 的方法" 已經成功。
[x] 結論是: peforth/ 目錄結構要遷就研究生的安排,改變原先其實不太自然的執行方式:
C:\Users\hcche\Documents\GitHub\>python peforth
變成從 peforth 目錄裡面執行,這很好哇! [X] v1.22 1.23 __main__.py 還是用 import peforth 的,沒意思 --> 有 support test.py 取代 __main__.py 供 developing debugging 用
[x] pywinio repo 裡面也是又有一個 pywinio/ folder, 將來 peforth 也是這樣。
[x] 照研究生的目錄結構改 GitHub/peforth
c:\Users\hcche\Documents\GitHub\peforth\..
Directories Files
-------------------- ---------------------------
.git\ .gitattributes
.ipynb_checkpoints\ LICENCE
__pycache__\ admin.bat
notebook\ requirements.txt
peforth\ LICENSE
peforth.egg-info\ README.md
playground\ README.rst
setup.bat
setup.py
setup.py.whl
log.txt
.gitignore
c:\Users\hcche\Documents\GitHub\peforth\peforth\..
Directories Files
-------------------- ---------------------------
__main__.py
kernel.json
peforthkernel.py
projectk.py
peforth.selftest
version.txt
__init__.py
quit.f
peforth.f
[x] remove existing peforth so aso to try setup.py install
Python on my computer at home is anaconda, so though that I have to remove it
by "conda uninstall" command. That was wrong. Do it by pip as usual works fine.
see Ynote:"研究 install peforth from source 的方法" for the log.
[x] now try "python setup.py install"
it works !!!!
如何查看 setup.py 的 help: c:\Users\hcche\Documents\GitHub\peforth>python setup.py --help
[x] setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[x] 13:27 2019-03-06 code ... end-code 可以取 xt.__doc__ 2nd line 當作 help
code txt2json # ( txt -- dict ) Convert given string to dictionary
push(json.loads("".join([ c if c != "'" else '"' for c in pop()])))
end-code
' txt2json :> xt :> __doc__ --> def xt(_me=None): ### txt2json ###
# ( txt -- dict ) Convert given string to dictionary
push(json.loads("".join([ c if c != "'" else '"' for c in pop()]))) (<class 'str'>)
18:04 2019-05-09 寫好了:
# projectk.py 裡面
# The basic FORTH word 'end-code's run time.
def doendcode(_me=None):
global compiling
if compiling!="code":
panic("Error! 'end-code' a none code word.")
current_word_list().append(Word(newname,newxt))
last().vid = current;
last().wid = len(current_word_list())-1;
last().type = 'code';
# ---------
mm = re.match(r"^.*?#\s*(.*)$", last().xt.__doc__.split('\n')[1])
last().help = mm.groups()[0] if mm and mm.groups()[0] else ""
# ---------
wordhash[last().name] = last();
compiling = False;
--> py> doendcode .source <---- 看到對的 source code 了
[x] 試驗定義一個 code word 查看他的 help 果然第一行的 # foo bar 有被抓進去當 help 了。
[X] unkown debug locals() 的說明 copy 過來
older unsync'ed notes on my LRV2
v1.22 既然 peforth 主要都是用來配合 jupyter notebook trace code, set breakpoints, ... etc.
unknown and ... and # should be added into the built-in words, plus the ability to
view local variables.
[x] I remember that I have done making 'unknown' predefined . . . no.
16:51 2019-01-12 I am now working on making 'unknown' to try locals. __main__ is
an object so global variables are accessed by getattr() however locals and globals
are dictionary that should be accessed by dict.get(key,default) instead.
see https://stackoverflow.com/questions/3089186/python-getattr-equivalent-for-dictionaries
[x] done an example @
http://localhost:8888/notebooks/OneDrive/%E6%96%87%E4%BB%B6/Jupyter%20Notebooks/Siraj%20make_a_neural_net_live_demo.ipynb
Source Code
===========
none value _locals_ // ( -- dict ) locals passed down from ok()
false value debug // ( -- flag ) enable/disable the ok() breakpoint
: unknown ( token -- thing Y|N) // Try to find the unknown token in __main__ or _locals_
_locals_ if \ in a function
( token ) _locals_ :> get(tos(),"Ûnknôwn") ( token, local )
py> str(tos())!="Ûnknôwn" ( token, local, unknown? )
if ( token, local ) nip true exit ( return local Y ) else drop ( token ) then
then
( token ) py> getattr(sys.modules['__main__'],pop(),"Ûnknôwn") ( thing )
py> str(tos())=="Ûnknôwn" if ( thing ) drop false else true then ;
/// Example: Set a breakpoint in python code like this:
/// if peforth.execute('debug').pop() : peforth.push(locals()).ok("bp>",cmd='to _locals_')
/// Example: Save locals for investigations:
/// if peforth.execute('debug').pop() : peforth.push(locals()).dictate('to _locals_')
/// That enters peforth that knows variables in __main__ and locals at the breakpoint.
/// 'exit' to leave the breakpoint and forget locals.
: exit ( -- ) // ( -- ) Exit the breakpoint forget locals and continue the process
none to _locals_ py: vm.exit=True ;
code # print(nexttoken('\n')+'\n') end-code // print the comment line after #
: --> ( result -- ) // Print the result with the command line.
py> tib[:ntib].rfind("\n") py> tib[max(pop(),0):ntib].strip() ( result cmd-line )
s" {} {} ({})" :> format(pop(),tos(),type(pop())) . cr ;
/// Good for experiments that need to show command line and the result.
[X] 10:48 2019-05-11 older note
開發中,不要動到 pip'ed peforth 出錯很麻煩,所以想要從 working folder 執行
不要每次都得先 pip install 改入 site-packages
[x] __main__.py 當初為何他媽 import peforth 有屁用?就是要跑本地版本試驗改過的東西才有意義呀!
--> 15:48 2019-05-11 應該是 path 搞不定,簡化問題 (Since commit c3d7677 on Oct 8, 2017)。
__main__.py 是用 python -m peforth 執行時的 entry,必須照顧。
11:26 2019-05-11 while __init.py__ is 'import peforth' entry point.
--> 11:24 2019-05-11 __main__.py 就是 run
c:\Users\hcche\Documents\GitHub\peforth>python peforth
and
c:\Users\hcche\Documents>python -m peforth
時被執行的入口
see https://www.tuicool.com/articles/iYRfe2
https://stackoverflow.com/questions/44977227/how-to-configure-main-py-init-py-and-setup-py-for-a-basic-package
--> 11:51 2019-05-11 how about to have test.py that does what __main__.py is supposed to do when
running ~GitHub\peforth>python peforth?
--> this is a good idea, but the path in __init__.py will be wrong, deal with it!!
--> 從 __init__.py 裡面處理 path 處添加可能找到 version.txt 的地方即可。 成功。
--> 成功了,能直接執行就好,不一定要堅持像早期一樣執行 peforth 目錄。
執行方法: c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py
__run__.py --> 最終命名為 test.py 最自然
# 各種方法都試過,最後還是用 exec(open().read()) 最像 include
# from . import __init__
# from __init__ import ok
# import subprocess; subprocess.call("__init__.py", shell=True)
exec(open("__init__.py").read()) # this is like: include __init__.py
ok('\n')
[X] __main__.py 還是要用 import peforth 的,若不然一開始 open("__init__.py") 就 file not found 了。
而 test.py 當然是在對的 directory 之下才能執行,所以叫做 test.py ;-D
[x] 若要餵進 "python test.py foo bar" 執行 command line 則 test.py 就要用來分辨
是否「從 ipython, jupyternotebook 執行」 (參見 quit.f) 所以 test.py 檔名
就不能改了,要改連 quit.f 也要一起改。或者改進 quit.f 裡分辨 ipython 的方法。
\ ~~~~~~ quit.f ~~~~~~
\ When in ipython or jupyter notebook the command line is used by
\ ipython already. In jupyter notebook, it looks like:
\
\ vm.commandline ----------------------------------------------------------------------------------.
\ sys.argv[0] --------. |
\ | |
\ V V
\ --------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------
\ c:\users\hcche\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py -f C:\Users\hcche\AppData\Roaming\jupyter\runtime\kernel-4be53345-1ddd-47c2-bef2-5e9801688f3f.json
\ So peforth can't support command line statements for ipython and jupyter notebook.
\ For none ipython cases, I have no better idea than to check sys.argv[0] for '.py'
\ and the likes so far 2019-05-15. See the following code, the filename 'test.py' is
\ fixed-coded here therefore.
\
[X] command line 也是跑 site-package 之外的 .f 檔的方法,例如:
c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py include ..\playground\misc.f
c:\Users\hcche\Documents\>python -m peforth include GitHub\peforth\playground\misc.f
這兩行都可以。
[x] 18:35 2019-05-09 我忘了 peforth 要怎麼 maintain 了!!!! 以上程式要改到哪裡去?
--> 直接在 github working directory 修改
--> 這樣 run 到的還是 installed 到 site-packages 的版本,因為 __main__.py 其實是 import peforth
c:\Users\hcche\Documents\GitHub\peforth>python peforth
16:48 2019-05-11 這個早期的 run 法如今 改成
c:\Users\hcche\Documents\GitHub\peforth\peforth> python test.py
--> 16:48 2019-05-10 奇怪 LRV2 OA 上 pip list 看到的 peforth 是 1.21!!
但是 python -m peforth 跑到的是 1.22,經過 pip uninstall peforth 之後
馬上 pip list 卻看到了 peforth 1.23 (對了)
[x] 16:38 2019-05-22 release v1.23 時在 T550 又看到類似現象: pip uninstall peforth
之後有把 python setup.py install 灌上的 v1.23 uninstall 掉,但是 site-packages 裡面一查,
仍有 v1.22 的 egg 存在 --> 直接再 pip uninstall peforth 一次,才把它 uninstall 掉。
--> pip install peforth 下來的在 site-packages 裡面就沒有 egg 字樣,如此可供分辨。同時
也證實 pip uninstall 不會 remove egg 版的 (python setup.py install上去的) 要下
多次 pip uninstall peforth 才輪得到舊版。
--> 我猜: 剛才改好程式之後用 ~\GitHub\peforth>python setup.py install 安裝進 site-package
的 1.23 並沒有蓋掉原來的, 因為這時裝上的是 egg, path 與用 whl 裝上的不同!
[X] 經過 c:\Users\hcche\Documents\GitHub\peforth>python setup.py install
之後,確實會直接有類似 pip install 的效果 --> 可以 python -m peforth 執行了,但是 path 不同
pip install 的 c:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth\version.txt
setup.py 的 c:\Users\hcche\AppData\Local\Programs\Python\Python36\lib\site-packages\peforth-1.23-py3.6.egg\peforth\version.txt
[X] 用 ~\GitHub\peforth>python setup.py install 安裝進 site-package 雖然 path 不同,jupyter notebook
完全沒問題,頂多 Kernel > Restart 一下,馬上生效。完全符合我 「從 source 直接 install」 的期待,免去 pip install
或先前暴力 setup.bat 的麻煩。
結論:
1. 直接修改 GitHub source code (善用 GitHub 保障各版本安全)
2. pip uninstall peforth 把舊的清乾淨
3. c:\Users\hcche\Documents\GitHub\peforth>python setup.py install 從 source code 安裝
4. 有兩種方式執行、測試
a. 用 Jupyter Notebook 試驗,只要 Kernel > Restart 新版就生效了。
b. 執行 c:\Users\hcche\Documents\GitHub\peforth\peforth>python test.py
5. repeat
[X] 19:07 2019-05-13 這段 code 在 peforth.f 裡面本應處裡 alias 的新 // help, 但是又有問題
\
\ Redefine // to "replace" alias' help message instead of "append".
\
\ Append if last().help has stack diagram but no help message, otherewise replace.
\ Stack diagram might be unexpectedly given again. That can be resolved by putting
\ complete help message to the original word or use the trick of // dummy and then
\ // again or simply don't give it again in the alias' help message.
\
<py>
'''
m = re.match("(?P<before>.*?)(?P<stackdiagram>\(.*\))(?P<after>.*)", last().help)
if m and (m.groupdict()['before'] + m.groupdict()['after']).strip()=="":
last().help += nexttoken('\\n|\\r');
else:
last().help = nexttoken('\\n|\\r');
'''
</pyV> -indent ' // py: pop().xt=genxt("//",pop(1))
問題如下,有些東西 help 裡面的 stack diagram 不見了!!
[r Prepare an array of data to compare with rstack in selftest.
Example: [r 1,2,3 r] [d True d] [p 'word1','word2' p]
[r...r] section is optional, [d...d] section is the judge.
--> 點掉也沒用! --> 13:34 2019-05-15 misc.f 裡面的新 ( comment ) 造成的。
--> 19:15 2019-05-15 已經乾脆放棄讓 (comment) 自動進 help 了,要 help 用 // 就好了。
(comment) 直接改成 nested 的,更好。 v1.23
[X] 14:06 2019-05-15 現在覺得原來的 (comment) 沒有我 gist words4jupyter.py 的 nested (comment) 好。
何必搞個這麼難懂的 (comment) 就會了讓 stack diagram 進 last.help 而已,有 // 就夠了!
[X] 16:39 2019-05-16 本來的 // 一直想著前面有 (comment) 已經進 help 了!所以他是用 += 的,
難怪有這個問題,不要了,直接用 last().help = nexttoken('\n|\r'); 就好了。 v1.23
\ to be
code ( # ( <str> -- ) // Comment down to ')' which can be nested if balanced
nextstring('\(|\)')['str'] # skip TIB to the next delimiter
cc = tib[ntib] # cc must be delimiter '(', ')', or '\n'
vm.ntib+=1 # skip any of them
if cc=='(':
execute(_me) # recursion of (
execute(_me) # recursion of )
end-code immediate
\ was
code ( # ( <stack diagram> -- ) Get stack diagram to the last's help.
a = nexttoken('\\)')
b = nexttoken() # the ')'
if compiling and last().help=="": # skip if help alreay exists
last().help = '( ' + a + b + ' '
end-code immediate
/// Nested not allowed yet.
[X] 經 marker 刪除的 value & constant 留在 vm[context] 裡面的 garbage
沒有回收! marker 還要再加強,forget 也要注意。
--> 123 value x char abc value ss vm.forth dict>keys -->
dict_keys(['CRLF', 'obj2dict', '_locals_', 'debug', 'screen-buffer',
'description', 'expected_rstack', 'expected_stack', 'test-result',
'[all-pass]', 'xxx', 'x', 'y', 'ss'])
^^^ ^^^^ 有在 vm.forth 裡面
--> 執行 marker --> words 裡沒有 x, ss 了, 當然 --> 但是 vm.forth 裡還是存在,造成堆積!!
v1.23 還是有這個問題,不知道該怎麼做。。。。
FP, see below 2020/07/27 08:38:15 value constant to 要重新定義. . . . .
[X] 改寫所有的 code words 把彆扭的 help 用新的 # 功能改自然點。
done! v1.23
[X] quit.f 裡的怪東西都不要了 --> inport, outport, harry_port v1.23
[X] 把 gist 上的東西 include 進來,最主要的是有 support nesting 的 (comment) v1.23
[X] 取消 colon definition 中第一個 ( ... ) 的作用,只用 // 即可留 help
--> 唉,試了就知道,很醜! v1.23 真的實現了
Notepad++ ^h replace regular expression
Find what: "(\(\s+.*\))\s+(//)"
Replace with: "// \1"
[x] dos , cd 太重要了,從 misc.f 移進 peforth.f
[X] 17:53 2019-05-11 接下來考慮出 v1.23 版。
[X] complete self-tests for new words , many are commented out.
[X] 評估 misc.f unknown.f quit.f 的內容要怎麼分配 --> 全部放進 misc.f 加個 marker 全自動 load 進去。
--> 不要的人只要跑一下 marker 就可以全清掉。
--> 這些東西的 self-test 就要自己做,不能放 peforth.selftest 裡。
[X] peforth.f source code 裡還有很多中文
[X] 好像 *debug* 出不來....
--> 喔喔 是給 breakpoint 用的 exit 出的問題。
--> 趁放進 misc.f 的機會給它改名吧! quit
[X] 測試 jupyter notebook
[x] established the method to include misc.f from within quit.f
[X] 測試 ipython (DOS box)
[X] 進 ipython 之後 import peforth 看起來 self-test 都 ok, 但是從此之後 ipython 就無法輸出了。
執行 ipython -m peforth 也一樣。
--> ipython 自己的 display 也被關了,執行 peforth.dictate('display-on') 即可恢復。
--> 是 selftest 的 display-off 造成的? --> 槓掉 self-test 試試看... 真的好了!
連 ipython -m peforth 也好了。
--> 執行 c:\Users\hcche\Documents\GitHub\peforth\peforth>ipython test.py self-test 與之後的
功能都沒問題.
[X] self-test on > 做出問題 > 然後下達 display-on 之後,治好了!證實 root cause 是 display-off. (最後發現,錯!不是這樣)
but where? --> 從 quit.f 裡把 misc.f comment out 也好了, 故問題在他裡面。--> 找到了, pyclude 的
self-test 之前 stop 就好了 --> 查 display-off 怎麼弄的? --> display-on 只是 reset sys.stdout 而已
無可挑剔。算了,有 workaround 就好了。
[x] WSL Ubuntu 之下 display-off 之後的斷點 *debug* 也怪怪的,本想在其中下 display-on 再回來繼續,
結果一 exit 回來就回 Shell 了。試過 time delay 如下也無效。
<py>
# 拖時間
factorial = 1
for i in range(2,10000):
factorial = factorial * i
</py>
[X] 15:24 2019-05-22 靠! 連 Windows DOS 下也出現了這個問題, SRP: working directory 的差別
有問題 c:\Users\hcche\Documents\GitHub\peforth>python -m peforth
沒問題 c:\Users\hcche\Documents\GitHub\peforth\peforth>python -m peforth
發生在 *** (pyclude) 之前 --> 故意先做個 display-off on 看看....
RI: Bingo! Shit! selftest 裡 pyclude hello.py 必須以其所在位置為 working directory
[x] 15:55 2019-05-22 發現這個 root cause 是耐心跑 v1.23 release check-list 時發現的,所以
那個 check-list 還是要好好做。
[/] 測試 ubuntu 的 ipython ---> 放棄,error message 如下:
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ ipython
Command 'ipython' not found, but can be installed with:
sudo apt install ipython
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ sudo apt install ipython
[sudo] password for hcchen:
Reading package lists... Done
Building dependency tree
Reading state information... Done
Package ipython is not available, but is referred to by another package.
This may mean that the package is missing, has been obsoleted, or
is only available from another source
E: Package 'ipython' has no installation candidate
[X] Error! tib. unknown! --> 改成 "-->" 了 1.23
[X] 改寫 pypi.org 上的 readme.rst 本來的例子不太好了, pdb 其實很強。
改用 Azure notebook 介紹 ipython 的 magic command 比較好。
[X] 17:33 2019-05-19 改了 Github.com 上的 README.md , local 的 .rst
[X] jupyter notebook 用 import peforth 就很好用了,
把 readme.md 裡沒有的 peforth kernel 拿掉,移進 Wiki 裡去。
--> 17:34 2019-05-19 done
[X] 把 misc.f hello.py 等都加進 package
[X] Test ubuntu 發現 cd 有必要進 peforth.f 但 dos 就該留在 misc.f 裡,且要判斷 os 是哪個。
--> py> os.name . cr ( posix or nt )
[X] v1.23 測試 ubuntu --> 靠!都忘了怎麼測試了,可以不經過 pip 版嗎?
09:35 2019-05-22
--> T550 ubuntu 16.04 連 pip 都沒有, python 版本也搞不清, 更不用說 virtualenv 了。
--> 感覺用 Linux 很恐慌,乾脆把 T550 上的 Ubuntu 16.04 remove 掉,改用新版的,希望可以避開
python 版本的問題。(See Ynote:"[筆記] Install Mozilla DeepSpeech Project" > "wsl ubuntu install python3.6.txt")
--> 09:39 2019-05-22 T550 Ubuntu removed --> The recent is still 18.04 on
Microsoft Store, so be it --> 10:43 2019-05-22 WSL installed
--> how's the built-in python? --> See Ynote "好久沒玩 WSL Ubuntu, 為了 release peforth v1.23 測試整個再玩一次"
[X] 有 python 3.6.5 built-in 沒有 pip <--- 先不管它,只測 python test.py 過了再說。 --> 一番折騰,過了!
[/] 沒有 pip 可以 python -m peforth 嗎? 試試 python setup.py install 結果失敗
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$ python setup.py install
Traceback (most recent call last):
File "setup.py", line 4, in <module>
from setuptools import setup
ModuleNotFoundError: No module named 'setuptools' <------------------
hcchen@WKS-4AEN0404:/mnt/c/Users/hcche/Documents/GitHub/peforth$
[X] 看了這篇 https://askubuntu.com/questions/861265/python-3-importerror-no-module-named-setuptools-ubuntu-14-04-lts
決定放棄,有測過 test.py 就好了。
[/] 即使上了 pypi.org 也還需要 pip (但 18.04 default 沒有), 不管了,有測過 test.py 就好了。
[/] 上了 pypi.org 之後,再用 Azure Notebooks 測試。
[X] Tests before a Release v1.23
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[X] 所有 run 法帶 selftest:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 1. python -i -m peforth [/] with-selftest .s words exit bye
[X] 2. ipython -i -m peforth .' Hello World!!' cr bye
[X] 3. ipython import peforth .s words
[x] selftest peforth.ok() .s words <--- w/parent
[x] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[X] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[/] 5. jupyter notebook --> peforth kernel --> .s words
[/] 考慮 README.rst 改良
[X] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[X] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[X] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[x] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[X] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[X] pip uninstall peforth
[X] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[X] 1. (i)python -i -m peforth [/] no-selftest .s words exit
[X] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[X] 3. (i)python import peforth
[X] no selftest, peforth.ok() .s words <--- no parent
[X] 1234 bye check echo %errorlevel%
[X] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[/] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[/] pip uninstall peforth
[/] pip install (use /mnt/...the wheel) to WSL ubuntu
[/] ipython -m peforth
[/] ipython , import peforth , magic commands
[X] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
[X] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[/] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[/] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[X] Windows DOS 下試
[X] Test Azure Online Jupyter Notebooks
https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
[X] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[X] version 改成 1.24 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
[X] 11:28 2019-05-26 make a master merge for the article of Febenacci and Decorator
[X] rename the article to 'peforth helps to understand python Decorator'
[/] 11:35 2019-05-26 write an article to introduce 'unknown'
--> forget this, covered already.
[/] 11:35 2019-05-26 find the video I introduce 'unknown' and the other thing
--> forget this, covered already.
[X] 09:11 2019-11-21 本來跑 GitHub\peforth\setup.bat 讓改好的新版生效,在 anaconda 之下還行嗎?
1. 跑 anaconda's prompt make sure python runable
2. peforth runable too, check path
3. cd to GitHub\peforth run setup
4. check peforth
OneNote 筆記:
"Develop peforth in an Anaconda virtual environment"
https://onedrive.live.com/view.aspx?resid=A796EA18AC8C1DA9%2112289&id=documents&wd=target%28Anaconda.one%7CB4E0DFAB-84F7-43D2-A5AB-515B43314252%2FDevelop%20peforth%20in%20an%20Anaconda%20virtual%20environment%7C99DE5C5F-B36D-4949-9471-BC7A857E3C2B%2F%29
[X] 16:54 2019-07-22 從這裡 https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/install.html 讀到
有從 github repo 上直接 pip install 的方法,e.g.:
pip install https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tarball/master
試試看 peforth 可不可以這樣 install ? 可以的話就不用上 pypi 了
pip install https://github.com/hcchengithub/peforth/master
or
pip install https://github.com/hcchengithub/peforth
==> 結果兩個都失敗
[ ] GitHub 有開始做 package hosting 了:
https://help.github.com/en/github/managing-packages-with-github-packages/about-github-packages#supported-clients-and-formats
[X] 2019/11/24 06:10:22 projectk.py 裡 import 好多它本身不用的 modules (它自己只
用到 re regular expression 一個) 我的註解說:
import re # import whatever we want, don't rely on parent module e.g. peforth __init__.py
也是有理,因為 projectk.py kernel 有自己的 space. 然而 modules 應該是 global
的, 不是嗎?從 forth code 裡 import 不行嗎? --> 試了就知道,把 projectk.py 裡
多餘的 imports comment 掉 --> 出問題的時候很晚,只要是 native modules 有機會解決.....
來自 help import 的 hints
\ import os __main__ :: peforth.projectk.os=pop(1) \ peforth global , does not work when run by 'python test.py'
import os py> vm :: os=pop(1) \ this works! when run by 'python test.py'
import inspect py> vm :: inspect=pop(1)
import dis py> vm :: dis=pop(1)
import json py> vm :: json=pop(1)
但是 sys 太根本了必須要在 projectk.py 裡 import 好。
[X] setup.py 裡的 copy right 年份要改成 2019
[/] 2019/11/24 05:20 用 Anaconda 之後似乎 kernel.json 也有問題?
裡面描述的 peforthkernel.py path 是寫死的,在我 OA、Anaconda 上就不對了。
好像只要無意把 peforth 加進 JupyterNotebook 的 kernel 就沒問題。
[X] 05:29 2019-11-21 projectk.py 裡面的 local, Comment, debug 這三個 global token 好像是多
餘的, 有空檢討看看.
[X] local 可能是 ok(prompt='OK ', loc={}, glo={}, cmd="") 或 redefined unknown 用的 <--- 不是
13:47 2019/11/25 delete all suspected things from projectk.py --> dos ok, jupyternotebook ok.
* 注意!setup.bat 不會更新 site-packages 的 peforth\ folder 要手動從 peforth-1.24-py3.7.egg <== 2020.7.28 解了! see OneNote2020 > "Develop peforth in an Anaconda virtual environment"
copy peforth\ 來蓋過 site-packages 裡的 peforth\ folder.
refer to https://onedrive.live.com/view.aspx?resid=A796EA18AC8C1DA9%2112289&id=documents&wd=target%28Anaconda.one%7CB4E0DFAB-84F7-43D2-A5AB-515B43314252%2FDevelop%20peforth%20in%20an%20Anaconda%20virtual%20environment%7C99DE5C5F-B36D-4949-9471-BC7A857E3C2B%2F%29
[X] 14:41 2019/11/25 quit.f 裡這種東西應該要改良,太笨了:
import os py> vm :: os=pop(1) \ 太笨
import os \ 應該改良成這樣
[X] 15:21 2019/11/25 整理 peforth.f quit.f peforth.selftest 的關係,更有系統了。
__init__.py 只 load 進基本的 peforth.f quit.f 其他的都由 quit.f 負責,使 quit.f
成為 eforth 系統的 main program, 統籌者。
[X] 16:07 2019/11/25 一舉搞懂 pop(1)
code test # ( a b c -- ) print given things
print(pop(), pop(), pop()) end-code
1 2 3 test
3 2 1 <-- 結果,顯示三個 pop() 是從左到右抓取 TOS 的。
[X] Tests before a Release v1.24
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑
c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[X] 所有 run 法帶 selftest:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[X] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[X] 1. python -i -m peforth [X] with-selftest .s words exit bye
[X] 2. ipython -i -m peforth .' Hello World!!' cr bye
[X] 3. ipython import peforth .s words
[X] selftest peforth.ok() .s words <--- w/parent
[X] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[X] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[X] 5. jupyter notebook --> peforth kernel --> .s words
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[X] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[X] 同上 python test.py 先試試看
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[X] 同上 repeat 1) python -m peforth 2) ipython -m peforth
[X] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[X] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[X] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[X] pip uninstall peforth
site-packages 下兩個 peforth folder 刪掉了。
setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會
被 skip 過去。
[X] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[X] 1. (i)python -i -m peforth [X] no-selftest .s words exit
[X] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[X] 3. (i)python import peforth
[X] no selftest, peforth.ok() .s words <--- no parent
[X] 1234 bye check echo %errorlevel%
[X] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[/] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[/] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了
[/] pip install (use /mnt/...the wheel) to WSL ubuntu
[/] ipython -m peforth
[/] ipython , import peforth , magic commands
[X] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply.
[X] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[X] Windows DOS 下試
[/] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[/] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[X] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
\ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令
!cat /etc/os-release
%f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu!
%pwd \ 查看 working directory
[x] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT
[X] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
[X] notebooks.ai 也測測看
[X] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[X] version 改成 1.25 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
[X] 要不要把 projectk.py sync 回 project-k
(很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的?
05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是
硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。
[ ] 15:56 2019/11/25 經常要 (see) 東西都會出這個問題:
Callable in phaseB <function compyle_anonymous at 0x00000232B1164A68>: Circular reference detected
問問看有沒有 workaround ?
[ ] 13:42 2019/11/27 下回 release 要在 README.rst ~.md 裡明列有測過的系統:
1. Windows Anaconda DOSBox pyhon 3.7, DOSBox ipython, JupyterNotebook, JupyterLab
2. Colab (Ubuntu,Anaconda), Azure notebooks (Ubuntu), Notebooks.ai (Debian)
[X] 2020/07/27 08:33 可以把 [obj>keys] 'keys' 定義成 dir | dict>keys 這樣就不會與 dir 重複了。又可以與 jeforth 相容。
[X] 2020/07/27 08:38:15 value constant to 要重新定義,不要再用 vm.forth 存放了,改用 variable 自己 word.
See OneNote2020 > "Jeforth variable 變革" --> 成功了。
[ ] 考慮 projectk.py 本身也上 pypi , 可以 pip install projectk 更有意義!
[X] 07:49 2020/10/04 參考 KsanaVM 發現我原先對 prompt 的時機有誤解,改好了。
[ ] 15:49 2020/10/24 v1.25 好了以後 projectk.py 要 sync 回 projectk
[X] 15:30 2020/10/24 準備 release v1.25 to pypi so as to allow gom to have it easily
[X] 15:54 2020/10/24 先試試看 gom ok? --> Pass, 連 selftest 也都 pass.
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑
c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 若無 twine 則 pip install twine 很快很順
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote or Evernote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[X] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[ ] 所有 run 法帶 selftest:
[X] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[X] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py
[X] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
16:27 2020/10/24 不必這樣,因為 python setup.py install 灌好的 peforth v1.25 是
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth-1.25-py3.7.egg
而 pip install peforth 灌好的是另一個 peforth v1.25
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth\
兩個可以並存!而且後者優先。只要把後者 directory name 改成 peforth.disabled
就可以讓前者生效,前者是 local install 測試時有其方便性。
[X] 1. python -i -m peforth [X] with-selftest .s words exit bye
[X] 2. ipython -i -m peforth .' Hello World!!' cr bye
[/] 3. ipython import peforth .s words
[/] selftest peforth.ok() .s words <--- w/parent
[/] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[X] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[X] 5. jupyter notebook --> peforth kernel --> .s words
[X] 6. Gom 手動移除現有的 peforth directories from:
c:\Users\8304018\AppData\Roaming\gom\2020\python\..
然後從 SCRIPTING > Script Choice > pip install peforth > Tools > Install Python Package 灌 peforth 很快很順
import peforth, peforth_gom_port
執行 peforth.ok() 無誤。
新增 peforth_gom_port.py 放到 peforth repo 的 playground directory 裡。
[/] 考慮 README.rst 改良
[/] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[ ] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[ ] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[ ] 同上 python test.py 先試試看
[ ] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[ ] 同上 repeat 1) python -m peforth 2) ipython -m peforth
[ ] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[ ] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[ ] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[ ] pip uninstall peforth
site-packages 下兩個 peforth folder 刪掉了。
setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會
被 skip 過去。
[ ] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[ ] 1. (i)python -i -m peforth [ ] no-selftest .s words exit
[ ] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[ ] 3. (i)python import peforth
[ ] no selftest, peforth.ok() .s words <--- no parent
[ ] 1234 bye check echo %errorlevel%
[ ] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[ ] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[ ] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了
[ ] pip install (use /mnt/...the wheel) to WSL ubuntu
[ ] ipython -m peforth
[ ] ipython , import peforth , magic commands
[ ] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply.
[ ] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[ ] Windows DOS 下試
[ ] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[ ] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[ ] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
\ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令
!cat /etc/os-release
%f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu!
%pwd \ 查看 working directory
[ ] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT
[ ] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
[ ] notebooks.ai 也測測看
[ ] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[X] version 改成 1.25 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
[ ] 要不要把 projectk.py sync 回 project-k
(很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的?
05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是
硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。
[X] 17:01 2020/10/24 v1.25 已經上了 pypi 也測過 Gom 成功,以上測試慢慢做,先上 github 再說。
[X] 14:22 2020/10/29 vm.prompt 是要給 gom port dialog 知道目前 prompt 否則只在 ok() 肚子裡。
[X] 13:52 2020/11/23 把 pypi 的 v1.25 直接換成 local 的 v1.26
--> 直接 copy __init__.py version.txt 蓋過去 c:\Users\8304018\AppData\Roaming\gom\2020\python\peforth
--> 11> <py> ok() </py> --> prompt 變成 ok , exit --> prompt 變回 11> 成功! 這就是 v1.26 無誤。
[X] 10:26 2020/11/26 改良 breakpoint 不需要改 peforth, 從 application 端外掛就可以了。
Usage of breakpoint:
peforth.bp(22,locals()) # drop breakpoint 22 with locals()
for i in [11,22,33]: peforth.bps[i]=0 # disable breakpoints 11,22,33
for i in [11,22,33]: peforth.bps[i]=i # enable breakpoints 11,22,33
peforth.bps=[i for i in range(1000)] # reload and enable all breakpoints
'exit' or ESC leaves the breakpoint and continue running.
'bye' to totally stop the script session.
# breakpoint
# peforth.bp() # drop a breakpoint using default prompt bp>
# peforth.bp(11) # drop a breakpoint using prompt bp11> w/p passing locals()
# peforth.bp(22,locals()) # drop a breakpoint using prompt bp22> with locals()
# peforth.bps=[] # disable all breakpoints
# peforth.dictate("peforth :: bps=[]") # disable all breakpoints
# peforth.dictate("peforth :: bps=[123,345,567]") # enable only listed breakpoints
# peforth.dictate("peforth :: bps[123]=0") # disable the breakpoint 123
# peforth.dictate("peforth :: pop(111)") # disable the breakpoint 111
# for i in [11,22,33]: peforth.bps[i]=0 # disable breakpoints 11,22,33
# peforth.bps=[i for i in range(1000)] # reload and enable all breakpoints
def bp(id=None,locals=None):
if id==None:
id = 0
prompt='bp> '
else:
prompt="bp{}>".format(id)
if id in peforth.bps: peforth.push(locals).ok(prompt, cmd="to _locals_")
peforth.bp = bp
peforth.bps = [i for i in range(1000)]
[X] 17:33 2020/12/07 配合 peforth.bp(22,locals()) 新增 bl be bd be* bd* 等指令
[ ] 17:34 2020/12/07 release v1.26 to pypi
[X] 17:37 2020/12/07 先試試看 gom ok? --> Pass, 連 selftest 也都 pass.
[X] setup.py 裡的 copy right 年份要改成 2019
*** 打包上 pypi.org 的方法 setup.bat 可以大簡化了。
[V1.22之後的新版] 打包步驟 2018/12/16 11:02
See my Ynote: "Pack peforth to peforth.whl"
1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
2. (記得先把 dist , build , peforth.egg-info 等 folder 先殺掉) 跑
c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
3. 若無 twine 則 pip install twine 很快很順
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote or Evernote: "python pypi 研究 -- upload to PyPI ok now.note"
4. pip uninstall peforth 然後再 pip install peforth 試驗看看。
5. 完成!
[ ] See (15:55 2019-05-22) 這個 check-list 要耐心好好做完!
[ ] 所有 run 法帶 selftest:
[ ] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=True
[ ] 先通過最基本的 selftest: GitHub\peforth\peforth>python test.py
[ ] Run python setup.py install 更新本地 site-package 版本以供測試
[ ] 要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
16:27 2020/10/24 不必這樣,因為 python setup.py install 灌好的 peforth v1.25 是
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth-1.25-py3.7.egg
而 pip install peforth 灌好的是另一個 peforth v1.25
c:\Users\8304018\AppData\Local\Continuum\anaconda3\lib\site-packages\peforth\
兩個可以並存!而且後者優先。只要把後者 directory name 改成 peforth.disabled
就可以讓前者生效,前者是 local install 測試時有其方便性。
[ ] 1. python -i -m peforth [X] with-selftest .s words exit bye
[ ] 2. ipython -i -m peforth .' Hello World!!' cr bye
[ ] 3. ipython import peforth .s words
[/] selftest peforth.ok() .s words <--- w/parent
[/] 1234 bye check echo %errorlevel% <-- 從 ipython 下直接出來也無誤。
[ ] 4. jupyter notebook
kernel > restart and clear outputs
x = 123
%f x .
x . \ ==> 123 (<class 'int'>)
[ ] 5. jupyter notebook --> peforth kernel --> .s words
[ ] 6. Gom 手動移除現有的 peforth directories from:
c:\Users\8304018\AppData\Roaming\gom\2020\python\..
然後從 SCRIPTING > Script Choice > pip install peforth > Tools > Install Python Package 灌 peforth 很快很順
import peforth, peforth_gom_port
執行 peforth.ok() 無誤。
新增 peforth_gom_port.py 放到 peforth repo 的 playground directory 裡。
[ ] 考慮 README.rst 改良
[ ] 若有改過 README.rst 則 wheel 就要重做
--> quit.f selftest=False --> 重來
[ ] 所有 run 法不帶 selftest 跑一遍,準備要 release 的版本:
[ ] 改 GitHub\peforth\quit.f
' <selftest> :: enabled=False
[ ] 同上 python test.py 先試試看
[ ] Run python setup.py install 更新本地 site-package 版本以供測試
[X] 可能要 (Anaconda virtualenv 之下) 從 site-packages\peforth-1.24-py3.7.egg
裡 copy peforth\ 去蓋掉 site-packages\peforth\ 這樣 upgrade 才有生效。
[ ] 同上 repeat 1) python -m peforth 2) ipython -m peforth
[ ] 做出取消 selftest 的 wheel
See my Ynote: "Pack peforth to peforth.whl"
[ ] 1. 檢查 ~\GitHub\peforth\setup.py 看有沒有漏掉新檔案,有沒有要去掉的檔案。
test.py hello.py misc.f
[ ] 2. 跑 c:\Users\hcche\Documents\GitHub\peforth>python setup.py sdist bdist_wheel
得到 peforth.whl in c:\Users\hcche\Documents\GitHub\peforth\dist
[ ] pip uninstall peforth
site-packages 下兩個 peforth folder 刪掉了。
setup.bat 建立的 EGG 檔 peforth-1.24-py3.7.egg 也刪掉,否則 pip install 會
被 skip 過去。
[ ] 切 CD 到 c:\Users\hcche\Documents\GitHub\peforth\dist>
pip install peforth-1.23-py3-none-any.whl <== 注意!用剛做好的 wheel 否則會上網抓。
[ ] 1. (i)python -i -m peforth [ ] no-selftest .s words exit
[ ] 2. (i)python -i -m peforth version 12345 bye --> echo %errorlevel%
[ ] 3. (i)python import peforth
[ ] no selftest, peforth.ok() .s words <--- no parent
[ ] 1234 bye check echo %errorlevel%
[ ] 4. jupyter notebook --> *debug* ok> .s cd help exit
%f %%f magic command
[ ] 5. repeat 以上 in ubuntu <------- Ubuntu 18.04 沒有 pip built-in 不想搞了
[ ] pip uninstall peforth 已知 Colab & Azure 都是 Ubuntu 故不必自己多測了
[ ] pip install (use /mnt/...the wheel) to WSL ubuntu
[ ] ipython -m peforth
[ ] ipython , import peforth , magic commands
[ ] 直接用測過的 wheel update Pypi
執行 c:\Users\hcche\Documents\GitHub\peforth>twine upload dist/*
需要帳號密碼,看這裡 Ynote: "python pypi 研究 -- upload to PyPI ok now.note"
ID, password search my Ynote with pypi _account_
Note: Anaconda base 沒有 twine, 在 Anaconda Navigator 裡找到 twine 把它勾起來 Apply.
[ ] 查看 pypi.org 網頁,若不行,晚點 (過幾分鐘就好) 再看。
[ ] Windows DOS 下試
[ ] WSL Ubuntu 下試 pip uninstall peforth -> pip install peforth
[ ] WSL Ubuntu with and w/o w/o virtualenv --> python -m peforth
[ ] Test Online Jupyter Notebooks Google Colab, Microsoft Azure, and Notebooks.ai
!pip install peforth
import peforth
%f version drop
x = 12345
%f x --> \ 查看 unknown 的效果
\ Colab & Azure 都用 Ubuntu 查版本, Notebooks.ai 用 Debian 都可用這行指令
!cat /etc/os-release
%f py> path --> \ 查看 path 發現 Azure 就是用 Anaconda 所以它有 support Ubuntu!
%pwd \ 查看 working directory
[ ] Colab https://colab.research.google.com/drive/1nZpybQryEiwYzpMvG1tHg4qbNnd_rMey#scrollTo=yAuF9DZcrFaT
[ ] Azure https://peforthplayground-hcchen1471.notebooks.azure.com/j/notebooks/peforth-playground.ipynb
[ ] notebooks.ai 也測測看
[ ] Make a master release up to GitHub --> 用 GitHub Windows 很簡單。
1. 先 commit 上 develop branch, upload 上網上 Github.
2. 切到 master
3. 用 GitHub for Windows desktop 的 Branch > Merge into current branch 選 develop 把它 merge 過來。
解決 conflicts 之後完成 merge.
4. 再 repeat 2-3 但切到 develop 把 master merge 過去。
Master 上的應該是些 README.md 的修改。
[ ] 要不要把 projectk.py sync 回 project-k
(很早以前) projectk.py 改了一點,忘了如何 sync 回 project-k 的?
05:30 2019-11-21 peforth source code 裡的 projectk.py 本身不是從 github 直接下來的, 而是
硬放上去的,因此不會與 project-k github 自動同步 <--- 想想看怎麼辦。
[X] version 改成 1.27 (必須跳過 1.20 直接到 1.21 否則會變成 1.2)
| mit | 4,925,997,226,757,895,000 | 48.328847 | 698 | 0.578373 | false |
ocaisa/easybuild-easyblocks | easybuild/easyblocks/p/psi.py | 1 | 8503 | ##
# Copyright 2013-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = {
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
}
return CMakeMake.extra_options(extra_vars)
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5") and self.name == "PSI":
self.log.info("Using configure based build")
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.log.info("Using CMake based build")
self.cfg.update('configopts', ' -DPYTHON_INTERPRETER=%s' % os.path.join(pythonroot, 'bin', 'python'))
self.cfg.update('configopts', ' -DCMAKE_BUILD_TYPE=Release')
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', " -DENABLE_MPI=ON")
if get_software_root('impi'):
self.cfg.update('configopts', " -DENABLE_CSR=ON -DBLAS_TYPE=MKL")
if self.name == 'PSI4':
pcmsolverroot = get_software_root('PCMSolver')
if pcmsolverroot:
self.cfg.update('configopts', " -DENABLE_PCMSOLVER=ON -DPCMSOLVER_ROOT=%s" % pcmsolverroot)
chempsroot = get_software_root('CheMPS2')
if chempsroot:
self.cfg.update('configopts', " -DENABLE_CHEMPS2=ON -DCHEMPS2_ROOT=%s" % chempsroot)
CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for PSI."""
super(EB_PSI, self).install_step()
# the obj and unpacked sources must remain available for working with plugins
try:
for subdir in ['obj', self.psi_srcdir]:
# copy symlinks as symlinks to work around broken symlinks
shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir),
symlinks=True)
except OSError, err:
raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err)
def test_step(self):
"""
Run the testsuite of PSI4
"""
testdir = tempfile.mkdtemp()
env.setvar('PSI_SCRATCH', testdir)
super(EB_PSI, self).test_step()
try:
shutil.rmtree(testdir)
except OSError, err:
raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err)
def sanity_check_step(self):
"""Custom sanity check for PSI."""
custom_paths = {
'files': ['bin/psi4'],
'dirs': ['include', ('share/psi', 'share/psi4')],
}
super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom variables for PSI module."""
txt = super(EB_PSI, self).make_module_extra()
share_dir = os.path.join(self.installdir, 'share')
if os.path.exists(share_dir):
psi4datadir = glob.glob(os.path.join(share_dir, 'psi*'))
if len(psi4datadir) == 1:
txt += self.module_generator.set_environment('PSI4DATADIR', psi4datadir[0])
else:
raise EasyBuildError("Failed to find exactly one PSI4 data dir: %s", psi4datadir)
return txt
| gpl-2.0 | 2,786,891,578,329,963,000 | 40.478049 | 116 | 0.618958 | false |
tobegit3hub/cinder_docker | cinder/volume/drivers/san/hp/hp_3par_iscsi.py | 1 | 33445 | # (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HP 3PAR Storage array.
This driver requires 3.1.3 firmware on the 3PAR array, using
the 3.x version of the hp3parclient.
You will need to install the python hp3parclient.
sudo pip install --upgrade "hp3parclient>=3.1"
Set the following in the cinder.conf file to enable the
3PAR iSCSI Driver along with the required flags:
volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
"""
import re
import sys
try:
from hp3parclient import exceptions as hpexceptions
except ImportError:
hpexceptions = None
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume import driver
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
DEFAULT_ISCSI_PORT = 3260
CHAP_USER_KEY = "HPQ-cinder-CHAP-name"
CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret"
class HP3PARISCSIDriver(driver.TransferVD,
driver.ManageableVD,
driver.ExtendVD,
driver.SnapshotVD,
driver.MigrateVD,
driver.ConsistencyGroupVD,
driver.BaseVD):
"""OpenStack iSCSI driver to enable 3PAR storage array.
Version history:
1.0 - Initial driver
1.1 - QoS, extend volume, multiple iscsi ports, remove domain,
session changes, faster clone, requires 3.1.2 MU2 firmware.
1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored
the drivers to use the new APIs.
1.2.1 - Synchronized extend_volume method.
1.2.2 - Added try/finally around client login/logout.
1.2.3 - log exceptions before raising
1.2.4 - Fixed iSCSI active path bug #1224594
1.2.5 - Added metadata during attach/detach bug #1258033
1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515
This update now requires 3.1.2 MU3 firmware
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Added support for managing/unmanaging of volumes
2.0.4 - Added support for volume retype
2.0.5 - Added CHAP support, requires 3.1.3 MU1 firmware
and hp3parclient 3.1.0.
2.0.6 - Fixing missing login/logout around attach/detach bug #1367429
2.0.7 - Add support for pools with model update
2.0.8 - Migrate without losing type settings bug #1356608
2.0.9 - Removing locks bug #1381190
2.0.10 - Add call to queryHost instead SSH based findHost #1398206
2.0.11 - Added missing host name during attach fix #1398206
2.0.12 - Removed usage of host name cache #1398914
2.0.13 - Update LOG usage to fix translations. bug #1384312
2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be
used during live-migration. bug #1423958
2.0.15 - Added support for updated detach_volume attachment.
2.0.16 - Added encrypted property to initialize_connection #1439917
2.0.17 - Python 3 fixes
2.0.18 - Improved VLUN creation and deletion logic. #1469816
2.0.19 - Changed initialize_connection to use getHostVLUNs. #1475064
2.0.20 - Adding changes to support 3PAR iSCSI multipath.
2.0.21 - Adds consistency group support
2.0.22 - Update driver to use ABC metaclasses
2.0.23 - Added update_migrated_volume. bug # 1492023
"""
VERSION = "2.0.23"
def __init__(self, *args, **kwargs):
super(HP3PARISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(hpcommon.hp3par_opts)
self.configuration.append_config_values(san.san_opts)
def _init_common(self):
return hpcommon.HP3PARCommon(self.configuration)
def _login(self):
common = self._init_common()
common.do_setup(None)
common.client_login()
return common
def _logout(self, common):
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hp3par_api_url', 'hp3par_username',
'hp3par_password', 'san_ip', 'san_login',
'san_password']
common.check_flags(self.configuration, required_flags)
def get_volume_stats(self, refresh=False):
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = 'iSCSI'
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
common.client_login()
try:
self.initialize_iscsi_ports(common)
finally:
self._logout(common)
def initialize_iscsi_ports(self, common):
# map iscsi_ip-> ip_port
# -> iqn
# -> nsp
self.iscsi_ips = {}
temp_iscsi_ip = {}
# use the 3PAR ip_addr list for iSCSI configuration
if len(self.configuration.hp3par_iscsi_ips) > 0:
# add port values to ip_addr, if necessary
for ip_addr in self.configuration.hp3par_iscsi_ips:
ip = ip_addr.split(':')
if len(ip) == 1:
temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
elif len(ip) == 2:
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
else:
LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr)
# add the single value iscsi_ip_address option to the IP dictionary.
# This way we can see if it's a valid iSCSI IP. If it's not valid,
# we won't use it and won't bother to report it, see below
if (self.configuration.iscsi_ip_address not in temp_iscsi_ip):
ip = self.configuration.iscsi_ip_address
ip_port = self.configuration.iscsi_port
temp_iscsi_ip[ip] = {'ip_port': ip_port}
# get all the valid iSCSI ports from 3PAR
# when found, add the valid iSCSI ip, ip port, iqn and nsp
# to the iSCSI IP dictionary
iscsi_ports = common.get_active_iscsi_target_ports()
for port in iscsi_ports:
ip = port['IPAddr']
if ip in temp_iscsi_ip:
ip_port = temp_iscsi_ip[ip]['ip_port']
self.iscsi_ips[ip] = {'ip_port': ip_port,
'nsp': port['nsp'],
'iqn': port['iSCSIName']
}
del temp_iscsi_ip[ip]
# if the single value iscsi_ip_address option is still in the
# temp dictionary it's because it defaults to $my_ip which doesn't
# make sense in this context. So, if present, remove it and move on.
if (self.configuration.iscsi_ip_address in temp_iscsi_ip):
del temp_iscsi_ip[self.configuration.iscsi_ip_address]
# lets see if there are invalid iSCSI IPs left in the temp dict
if len(temp_iscsi_ip) > 0:
LOG.warning(_LW("Found invalid iSCSI IP address(s) in "
"configuration option(s) hp3par_iscsi_ips or "
"iscsi_ip_address '%s.'"),
(", ".join(temp_iscsi_ip)))
if not len(self.iscsi_ips) > 0:
msg = _('At least one valid iSCSI IP address must be set.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host.
This driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value:
{
'driver_volume_type': 'iscsi'
'data': {
'encrypted': False,
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_protal': '127.0.0.1:3260',
'volume_id': 1,
}
}
Steps to export a volume on 3PAR
* Get the 3PAR iSCSI iqn
* Create a host on the 3par
* create vlun on the 3par
"""
common = self._login()
try:
# we have to make sure we have a host
host, username, password = self._create_host(
common,
volume,
connector)
if connector['multipath']:
ready_ports = common.client.getiSCSIPorts(
state=common.client.PORT_STATE_READY)
target_portals = []
target_iqns = []
target_luns = []
# Target portal ips are defined in cinder.conf.
target_portal_ips = self.iscsi_ips.keys()
# Collect all existing VLUNs for this volume/host combination.
existing_vluns = common.find_existing_vluns(volume, host)
# Cycle through each ready iSCSI port and determine if a new
# VLUN should be created or an existing one used.
for port in ready_ports:
iscsi_ip = port['IPAddr']
if iscsi_ip in target_portal_ips:
vlun = None
# check for an already existing VLUN matching the
# nsp for this iSCSI IP. If one is found, use it
# instead of creating a new VLUN.
for v in existing_vluns:
portPos = common.build_portPos(
self.iscsi_ips[iscsi_ip]['nsp'])
if v['portPos'] == portPos:
vlun = v
break
else:
vlun = common.create_vlun(
volume, host, self.iscsi_ips[iscsi_ip]['nsp'])
iscsi_ip_port = "%s:%s" % (
iscsi_ip, self.iscsi_ips[iscsi_ip]['ip_port'])
target_portals.append(iscsi_ip_port)
target_iqns.append(port['iSCSIName'])
target_luns.append(vlun['lun'])
else:
LOG.warning(_LW("iSCSI IP: '%s' was not found in "
"hp3par_iscsi_ips list defined in "
"cinder.conf."), iscsi_ip)
info = {'driver_volume_type': 'iscsi',
'data': {'target_portals': target_portals,
'target_iqns': target_iqns,
'target_luns': target_luns,
'target_discovered': True
}
}
else:
least_used_nsp = None
# check if a VLUN already exists for this host
existing_vlun = common.find_existing_vlun(volume, host)
if existing_vlun:
# We override the nsp here on purpose to force the
# volume to be exported out the same IP as it already is.
# This happens during nova live-migration, we want to
# disable the picking of a different IP that we export
# the volume to, or nova complains.
least_used_nsp = common.build_nsp(existing_vlun['portPos'])
if not least_used_nsp:
least_used_nsp = self._get_least_used_nsp_for_host(
common,
host['name'])
vlun = None
if existing_vlun is None:
# now that we have a host, create the VLUN
vlun = common.create_vlun(volume, host, least_used_nsp)
else:
vlun = existing_vlun
if least_used_nsp is None:
LOG.warning(_LW("Least busy iSCSI port not found, "
"using first iSCSI port in list."))
iscsi_ip = self.iscsi_ips.keys()[0]
else:
iscsi_ip = self._get_ip_using_nsp(least_used_nsp)
iscsi_ip_port = self.iscsi_ips[iscsi_ip]['ip_port']
iscsi_target_iqn = self.iscsi_ips[iscsi_ip]['iqn']
info = {'driver_volume_type': 'iscsi',
'data': {'target_portal': "%s:%s" %
(iscsi_ip, iscsi_ip_port),
'target_iqn': iscsi_target_iqn,
'target_lun': vlun['lun'],
'target_discovered': True
}
}
if self.configuration.hp3par_iscsi_chap_enabled:
info['data']['auth_method'] = 'CHAP'
info['data']['auth_username'] = username
info['data']['auth_password'] = password
encryption_key_id = volume.get('encryption_key_id', None)
info['data']['encrypted'] = encryption_key_id is not None
return info
finally:
self._logout(common)
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
common = self._login()
try:
hostname = common._safe_hostname(connector['host'])
common.terminate_connection(
volume,
hostname,
iqn=connector['initiator'])
self._clear_chap_3par(common, volume)
finally:
self._logout(common)
def _clear_chap_3par(self, common, volume):
"""Clears CHAP credentials on a 3par volume.
Ignore exceptions caused by the keys not being present on a volume.
"""
vol_name = common._get_3par_vol_name(volume['id'])
try:
common.client.removeVolumeMetaData(vol_name, CHAP_USER_KEY)
except hpexceptions.HTTPNotFound:
pass
except Exception:
raise
try:
common.client.removeVolumeMetaData(vol_name, CHAP_PASS_KEY)
except hpexceptions.HTTPNotFound:
pass
except Exception:
raise
def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain,
persona_id):
"""Create a 3PAR host.
Create a 3PAR host, if there is already a host on the 3par using
the same iqn but with a different hostname, return the hostname
used by 3PAR.
"""
# first search for an existing host
host_found = None
hosts = common.client.queryHost(iqns=[iscsi_iqn])
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
host_found = hosts['members'][0]['name']
if host_found is not None:
return host_found
else:
if isinstance(iscsi_iqn, six.string_types):
iqn = [iscsi_iqn]
else:
iqn = iscsi_iqn
persona_id = int(persona_id)
common.client.createHost(hostname, iscsiNames=iqn,
optional={'domain': domain,
'persona': persona_id})
return hostname
def _modify_3par_iscsi_host(self, common, hostname, iscsi_iqn):
mod_request = {'pathOperation': common.client.HOST_EDIT_ADD,
'iSCSINames': [iscsi_iqn]}
common.client.modifyHost(hostname, mod_request)
def _set_3par_chaps(self, common, hostname, volume, username, password):
"""Sets a 3PAR host's CHAP credentials."""
if not self.configuration.hp3par_iscsi_chap_enabled:
return
mod_request = {'chapOperation': common.client.HOST_EDIT_ADD,
'chapOperationMode': common.client.CHAP_INITIATOR,
'chapName': username,
'chapSecret': password}
common.client.modifyHost(hostname, mod_request)
def _create_host(self, common, volume, connector):
"""Creates or modifies existing 3PAR host."""
# make sure we don't have the host already
host = None
username = None
password = None
hostname = common._safe_hostname(connector['host'])
cpg = common.get_cpg(volume, allowSnap=True)
domain = common.get_domain(cpg)
# Get the CHAP secret if CHAP is enabled
if self.configuration.hp3par_iscsi_chap_enabled:
vol_name = common._get_3par_vol_name(volume['id'])
username = common.client.getVolumeMetaData(
vol_name, CHAP_USER_KEY)['value']
password = common.client.getVolumeMetaData(
vol_name, CHAP_PASS_KEY)['value']
try:
host = common._get_3par_host(hostname)
except hpexceptions.HTTPNotFound:
# get persona from the volume type extra specs
persona_id = common.get_persona_type(volume)
# host doesn't exist, we have to create it
hostname = self._create_3par_iscsi_host(common,
hostname,
connector['initiator'],
domain,
persona_id)
self._set_3par_chaps(common, hostname, volume, username, password)
host = common._get_3par_host(hostname)
else:
if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1:
self._modify_3par_iscsi_host(
common, hostname,
connector['initiator'])
self._set_3par_chaps(
common,
hostname,
volume,
username,
password)
host = common._get_3par_host(hostname)
elif (not host['initiatorChapEnabled'] and
self.configuration.hp3par_iscsi_chap_enabled):
LOG.warning(_LW("Host exists without CHAP credentials set and "
"has iSCSI attachments but CHAP is enabled. "
"Updating host with new CHAP credentials."))
self._set_3par_chaps(
common,
hostname,
volume,
username,
password)
return host, username, password
def _do_export(self, common, volume):
"""Gets the associated account, generates CHAP info and updates."""
model_update = {}
if not self.configuration.hp3par_iscsi_chap_enabled:
model_update['provider_auth'] = None
return model_update
# CHAP username will be the hostname
chap_username = volume['host'].split('@')[0]
chap_password = None
try:
# Get all active VLUNs for the host
vluns = common.client.getHostVLUNs(chap_username)
# Host has active VLUNs... is CHAP enabled on host?
host_info = common.client.getHost(chap_username)
if not host_info['initiatorChapEnabled']:
LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled."))
except hpexceptions.HTTPNotFound:
chap_password = volume_utils.generate_password(16)
LOG.warning(_LW("No host or VLUNs exist. Generating new "
"CHAP key."))
else:
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
# key assigned to one of them. Use that CHAP key if present,
# otherwise create a new one. Skip any VLUNs that are missing
# CHAP credentials in metadata.
chap_exists = False
active_vluns = 0
for vlun in vluns:
if not vlun['active']:
continue
active_vluns += 1
# iSCSI connections start with 'iqn'.
if ('remoteName' in vlun and
re.match('iqn.*', vlun['remoteName'])):
try:
chap_password = common.client.getVolumeMetaData(
vlun['volumeName'], CHAP_PASS_KEY)['value']
chap_exists = True
break
except hpexceptions.HTTPNotFound:
LOG.debug("The VLUN %s is missing CHAP credentials "
"but CHAP is enabled. Skipping.",
vlun['remoteName'])
else:
LOG.warning(_LW("Non-iSCSI VLUN detected."))
if not chap_exists:
chap_password = volume_utils.generate_password(16)
LOG.warning(_LW("No VLUN contained CHAP credentials. "
"Generating new CHAP key."))
# Add CHAP credentials to the volume metadata
vol_name = common._get_3par_vol_name(volume['id'])
common.client.setVolumeMetaData(
vol_name, CHAP_USER_KEY, chap_username)
common.client.setVolumeMetaData(
vol_name, CHAP_PASS_KEY, chap_password)
model_update['provider_auth'] = ('CHAP %s %s' %
(chap_username, chap_password))
return model_update
def create_export(self, context, volume, connector):
common = self._login()
try:
return self._do_export(common, volume)
finally:
self._logout(common)
def ensure_export(self, context, volume):
"""Ensure the volume still exists on the 3PAR.
Also retrieves CHAP credentials, if present on the volume
"""
common = self._login()
try:
vol_name = common._get_3par_vol_name(volume['id'])
common.client.getVolume(vol_name)
except hpexceptions.HTTPNotFound:
LOG.error(_LE("Volume %s doesn't exist on array."), vol_name)
else:
metadata = common.client.getAllVolumeMetaData(vol_name)
username = None
password = None
model_update = {}
model_update['provider_auth'] = None
for member in metadata['members']:
if member['key'] == CHAP_USER_KEY:
username = member['value']
elif member['key'] == CHAP_PASS_KEY:
password = member['value']
if username and password:
model_update['provider_auth'] = ('CHAP %s %s' %
(username, password))
return model_update
finally:
self._logout(common)
def remove_export(self, context, volume):
pass
def _get_least_used_nsp_for_host(self, common, hostname):
"""Get the least used NSP for the current host.
Steps to determine which NSP to use.
* If only one iSCSI NSP, return it
* If there is already an active vlun to this host, return its NSP
* Return NSP with fewest active vluns
"""
iscsi_nsps = self._get_iscsi_nsps()
# If there's only one path, use it
if len(iscsi_nsps) == 1:
return iscsi_nsps[0]
# Try to reuse an existing iscsi path to the host
vluns = common.client.getVLUNs()
for vlun in vluns['members']:
if vlun['active']:
if vlun['hostname'] == hostname:
temp_nsp = common.build_nsp(vlun['portPos'])
if temp_nsp in iscsi_nsps:
# this host already has an iscsi path, so use it
return temp_nsp
# Calculate the least used iscsi nsp
least_used_nsp = self._get_least_used_nsp(common,
vluns['members'],
self._get_iscsi_nsps())
return least_used_nsp
def _get_iscsi_nsps(self):
"""Return the list of candidate nsps."""
nsps = []
for value in self.iscsi_ips.values():
nsps.append(value['nsp'])
return nsps
def _get_ip_using_nsp(self, nsp):
"""Return IP associated with given nsp."""
for (key, value) in self.iscsi_ips.items():
if value['nsp'] == nsp:
return key
def _get_least_used_nsp(self, common, vluns, nspss):
"""Return the nsp that has the fewest active vluns."""
# return only the nsp (node:server:port)
# count the number of nsps
nsp_counts = {}
for nsp in nspss:
# initialize counts to zero
nsp_counts[nsp] = 0
current_least_used_nsp = None
for vlun in vluns:
if vlun['active']:
nsp = common.build_nsp(vlun['portPos'])
if nsp in nsp_counts:
nsp_counts[nsp] = nsp_counts[nsp] + 1
# identify key (nsp) of least used nsp
current_smallest_count = sys.maxint
for (nsp, count) in nsp_counts.items():
if count < current_smallest_count:
current_least_used_nsp = nsp
current_smallest_count = count
return current_least_used_nsp
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
def create_consistencygroup(self, context, group):
common = self._login()
try:
common.create_consistencygroup(context, group)
finally:
self._logout(common)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
common = self._login()
try:
return common.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots, source_cg,
source_vols)
finally:
self._logout(common)
def delete_consistencygroup(self, context, group):
common = self._login()
try:
return common.delete_consistencygroup(context, group)
finally:
self._logout(common)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
common = self._login()
try:
return common.update_consistencygroup(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
def create_cgsnapshot(self, context, cgsnapshot):
common = self._login()
try:
return common.create_cgsnapshot(context, cgsnapshot)
finally:
self._logout(common)
def delete_cgsnapshot(self, context, cgsnapshot):
common = self._login()
try:
return common.delete_cgsnapshot(context, cgsnapshot)
finally:
self._logout(common)
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
common = self._login()
try:
common.attach_volume(volume, instance_uuid)
finally:
self._logout(common)
def detach_volume(self, context, volume, attachment=None):
common = self._login()
try:
common.detach_volume(volume, attachment)
finally:
self._logout(common)
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
protocol = host['capabilities']['storage_protocol']
if protocol != 'iSCSI':
LOG.debug("3PAR ISCSI driver cannot migrate in-use volume "
"to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
| apache-2.0 | 910,044,834,579,531,300 | 37.844367 | 79 | 0.535297 | false |
PyBossa/app-twitter | get_tweets.py | 1 | 1365 | # -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
from twitter import Twitter, OAuth
def get_tweets(hashtag="#finalcopatve"):
"""
Gets tweets for a given hashtag
:arg string hashtag: Twitter hashtag to get the tweets
:returns: A list of Tweets.
:rtype: list
"""
try:
import config
t = Twitter(auth=OAuth(config.OAUTH_TOKEN, config.OAUTH_SECRET,
config.CONSUMER_KEY, config.CONSUMER_SECRET))
tweets = t.search.tweets(q=hashtag)
return tweets['statuses']
except:
raise Exception("config.py file not found, please copy config.py.template \
to config.py and fill in the OAuth parameters")
| agpl-3.0 | 6,916,551,567,034,951,000 | 35.891892 | 83 | 0.685714 | false |
marcus-oscarsson/mxcube3 | mxcube3/routes/ra.py | 1 | 7501 | # -*- coding: utf-8 -*-
import gevent
import logging
from flask import (
session,
jsonify,
Response,
request,
make_response,
copy_current_request_context,
)
from mxcube3 import socketio
from mxcube3 import mxcube
from mxcube3 import server
from mxcube3 import blcontrol
from mxcube3.core import loginutils
@server.route("/mxcube/api/v0.1/ra/request_control", methods=["POST"])
@server.restrict
def request_control():
"""
"""
@copy_current_request_context
def handle_timeout_gives_control(sid, timeout=30):
gevent.sleep(timeout)
if mxcube.TIMEOUT_GIVES_CONTROL:
user = loginutils.get_user_by_sid(sid)
# Pass control to user if still waiting
if user.get("requestsControl"):
toggle_operator(sid, "Timeout expired, you have control")
data = request.get_json()
remote_addr = loginutils.remote_addr()
# Is someone already asking for control
for observer in loginutils.get_observers():
if observer["requestsControl"] and observer["host"] != remote_addr:
msg = "Another user is already asking for control"
return make_response(msg, 409)
user = loginutils.get_user_by_sid(session.sid)
user["name"] = data["name"]
user["requestsControl"] = data["control"]
user["message"] = data["message"]
observers = loginutils.get_observers()
gevent.spawn(handle_timeout_gives_control, session.sid, timeout=10)
socketio.emit("observersChanged", observers, namespace="/hwr")
return make_response("", 200)
@server.route("/mxcube/api/v0.1/ra/take_control", methods=["POST"])
@server.restrict
def take_control():
"""
"""
# Already master do nothing
if loginutils.is_operator(session.sid):
return make_response("", 200)
# Not inhouse user so not allowed to take control by force,
# return error code
if not session["loginInfo"]["loginRes"]["Session"]["is_inhouse"]:
return make_response("", 409)
toggle_operator(session.sid, "You were given control")
return make_response("", 200)
@server.route("/mxcube/api/v0.1/ra/give_control", methods=["POST"])
@server.restrict
def give_control():
"""
"""
sid = request.get_json().get("sid")
toggle_operator(sid, "You were given control")
return make_response("", 200)
def toggle_operator(new_op_sid, message):
current_op = loginutils.get_operator()
new_op = loginutils.get_user_by_sid(new_op_sid)
loginutils.set_operator(new_op["sid"])
new_op["message"] = message
observers = loginutils.get_observers()
# Append the new data path so that it can be updated on the client
new_op["rootPath"] = blcontrol.beamline.session.get_base_image_directory()
# Current op might have logged out, while this is happening
if current_op:
current_op["rootPath"] = blcontrol.beamline.session.get_base_image_directory()
current_op["message"] = message
socketio.emit(
"setObserver", current_op, room=current_op["socketio_sid"], namespace="/hwr"
)
socketio.emit("observersChanged", observers, namespace="/hwr")
socketio.emit("setMaster", new_op, room=new_op["socketio_sid"], namespace="/hwr")
def remain_observer(observer_sid, message):
observer = loginutils.get_user_by_sid(observer_sid)
observer["message"] = message
socketio.emit(
"setObserver", observer, room=observer["socketio_sid"], namespace="/hwr"
)
@server.route("/mxcube/api/v0.1/ra", methods=["GET"])
@server.restrict
def observers():
"""
"""
data = {
"observers": loginutils.get_observers(),
"sid": session.sid,
"master": loginutils.is_operator(session.sid),
"observerName": loginutils.get_observer_name(),
"allowRemote": mxcube.ALLOW_REMOTE,
"timeoutGivesControl": mxcube.TIMEOUT_GIVES_CONTROL,
}
return jsonify(data=data)
@server.route("/mxcube/api/v0.1/ra/allow_remote", methods=["POST"])
@server.restrict
def allow_remote():
"""
"""
allow = request.get_json().get("allow")
if mxcube.ALLOW_REMOTE and allow == False:
socketio.emit("forceSignoutObservers", {}, namespace="/hwr")
mxcube.ALLOW_REMOTE = allow
return Response(status=200)
@server.route("/mxcube/api/v0.1/ra/timeout_gives_control", methods=["POST"])
@server.restrict
def timeout_gives_control():
"""
"""
control = request.get_json().get("timeoutGivesControl")
mxcube.TIMEOUT_GIVES_CONTROL = control
return Response(status=200)
def observer_requesting_control():
observer = None
for o in loginutils.get_observers():
if o["requestsControl"]:
observer = o
return observer
@server.route("/mxcube/api/v0.1/ra/request_control_response", methods=["POST"])
@server.restrict
def request_control_response():
"""
"""
data = request.get_json()
new_op = observer_requesting_control()
# Request was denied
if not data["giveControl"]:
remain_observer(new_op["sid"], data["message"])
else:
toggle_operator(new_op["sid"], data["message"])
new_op["requestsControl"] = False
return make_response("", 200)
@server.route("/mxcube/api/v0.1/ra/chat", methods=["POST"])
@server.restrict
def append_message():
message = request.get_json().get("message", "")
sid = request.get_json().get("sid", "")
if message and sid:
loginutils.append_message(message, sid)
return Response(status=200)
@server.route("/mxcube/api/v0.1/ra/chat", methods=["GET"])
@server.restrict
def get_all_mesages():
return jsonify({"messages": loginutils.get_all_messages()})
@socketio.on("connect", namespace="/hwr")
@server.ws_restrict
def connect():
user = loginutils.get_user_by_sid(session.sid)
# Make sure user is logged, session may have been closed i.e by timeout
if user:
user["socketio_sid"] = request.sid
# (Note: User is logged in if operator)
if loginutils.is_operator(session.sid):
loginutils.emit_pending_events()
if (
not blcontrol.beamline.queue_manager.is_executing()
and not loginutils.DISCONNECT_HANDLED
):
loginutils.DISCONNECT_HANDLED = True
socketio.emit("resumeQueueDialog", namespace="/hwr")
msg = "Client reconnected, Queue was previously stopped, asking "
msg += "client for action"
logging.getLogger("HWR").info(msg)
@socketio.on("disconnect", namespace="/hwr")
@server.ws_restrict
def disconnect():
if (
loginutils.is_operator(session.sid)
and blcontrol.beamline.queue_manager.is_executing()
):
loginutils.DISCONNECT_HANDLED = False
blcontrol.beamline.queue_manager.pause(True)
logging.getLogger("HWR").info("Client disconnected, pausing queue")
@socketio.on("setRaMaster", namespace="/hwr")
@server.ws_restrict
def set_master(data):
loginutils.emit_pending_events()
return session.sid
@socketio.on("setRaObserver", namespace="/hwr")
@server.ws_restrict
def set_observer(data):
name = data.get("name", "")
observers = loginutils.get_observers()
observer = loginutils.get_user_by_sid(session.sid)
if observer and name:
observer["name"] = name
socketio.emit("observerLogin", observer, include_self=False, namespace="/hwr")
socketio.emit("observersChanged", observers, namespace="/hwr")
return session.sid
| gpl-2.0 | -4,123,180,664,764,723,000 | 26.47619 | 88 | 0.654313 | false |
joshspeagle/frankenz | frankenz/bruteforce.py | 1 | 23629 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Object used to fit data and compute PDFs using brute-force methods.
"""
from __future__ import (print_function, division)
import six
from six.moves import range
import sys
import os
import warnings
import math
import numpy as np
import warnings
from .pdf import *
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
__all__ = ["BruteForce"]
class BruteForce():
"""
Fits data and generates predictions using a simple brute-force approach.
"""
def __init__(self, models, models_err, models_mask):
"""
Load the model data into memory.
Parameters
----------
models : `~numpy.ndarray` of shape (Nmodel, Nfilt)
Model values.
models_err : `~numpy.ndarray` of shape (Nmodel, Nfilt)
Associated errors on the model values.
models_mask : `~numpy.ndarray` of shape (Nmodel, Nfilt)
Binary mask (0/1) indicating whether the model value was observed.
"""
# Initialize values.
self.models = models
self.models_err = models_err
self.models_mask = models_mask
self.NMODEL, self.NDIM = models.shape
self.fit_lnprior = None
self.fit_lnlike = None
self.fit_lnprob = None
self.fit_Ndim = None
self.fit_chi2 = None
self.fit_scale = None
self.fit_scale_err = None
def fit(self, data, data_err, data_mask, lprob_func=None,
lprob_args=None, lprob_kwargs=None, track_scale=False,
verbose=True):
"""
Fit all input models to the input data to compute the associated
log-posteriors.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
Ndata = len(data)
# Fit data.
for i, results in enumerate(self._fit(data, data_err, data_mask,
lprob_func=lprob_func,
lprob_args=lprob_args,
lprob_kwargs=lprob_kwargs,
track_scale=track_scale,
save_fits=True)):
if verbose:
sys.stderr.write('\rFitting object {0}/{1}'.format(i+1, Ndata))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
def _fit(self, data, data_err, data_mask, lprob_func=None,
lprob_args=None, lprob_kwargs=None, track_scale=False,
save_fits=True):
"""
Internal generator used to compute fits.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
save_fits : bool, optional
Whether to save fits internally while computing predictions.
Default is `True`.
Returns
-------
results : tuple
Output of `lprob_func` yielded from the generator.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
Ndata = len(data)
Nmodels = self.NMODEL
self.NDATA = Ndata
if save_fits:
self.fit_lnprior = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnlike = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnprob = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_Ndim = np.zeros((Ndata, Nmodels), dtype='int')
self.fit_chi2 = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_scale = np.ones((Ndata, Nmodels), dtype='float')
self.fit_scale_err = np.zeros((Ndata, Nmodels), dtype='float')
# Fit data.
for i, (x, xe, xm) in enumerate(zip(data, data_err, data_mask)):
results = lprob_func(x, xe, xm, self.models, self.models_err,
self.models_mask, *lprob_args, **lprob_kwargs)
if save_fits:
self.fit_lnprior[i] = results[0] # ln(prior)
self.fit_lnlike[i] = results[1] # ln(like)
self.fit_lnprob[i] = results[2] # ln(prob)
self.fit_Ndim[i] = results[3] # dimensionality of fit
self.fit_chi2[i] = results[4] # chi2
if track_scale:
self.fit_scale[i] = results[5] # scale-factor
self.fit_scale_err[i] = results[6] # std(s)
yield results
def predict(self, model_labels, model_label_errs, label_dict=None,
label_grid=None, logwt=None, kde_args=None, kde_kwargs=None,
return_gof=False, verbose=True):
"""
Compute photometric 1-D predictions to the target distribution.
Parameters
----------
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
logwt : `~numpy.ndarray` of shape (Ndata, Nmodel), optional
A new set of log-weights used to compute the marginalized 1-D
PDFs in place of the log-probability.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
return_gof : bool, optional
Whether to return a tuple containing the ln(MAP) and
ln(evidence) values for the predictions
along with the pdfs. Default is `False`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
Returns
-------
pdfs : `~numpy.ndarray` of shape (Nobj, Ngrid)
Collection of 1-D PDFs for each object.
(lmap, levid) : 2-tuple of `~numpy.ndarray` with shape (Nobj), optional
Set of ln(MAP) and ln(evidence) values for each object.
"""
# Initialize values.
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if logwt is None:
logwt = self.fit_lnprob
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
if self.fit_lnprob is None and logwt is None:
raise ValueError("Fits have not been computed and weights have "
"not been provided.")
if label_dict is not None:
Nx = label_dict.Ngrid
else:
Nx = len(label_grid)
Ndata = self.NDATA
pdfs = np.zeros((Ndata, Nx))
if return_gof:
lmap = np.zeros(Ndata)
levid = np.zeros(Ndata)
# Compute PDFs.
for i, res in enumerate(self._predict(model_labels, model_label_errs,
label_dict=label_dict,
label_grid=label_grid,
logwt=logwt, kde_args=kde_args,
kde_kwargs=kde_kwargs)):
pdf, gof = res
pdfs[i] = pdf
if return_gof:
lmap[i], levid[i] = gof
if verbose:
sys.stderr.write('\rGenerating PDF {0}/{1}'
.format(i+1, Ndata))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
if return_gof:
return pdfs, (lmap, levid)
else:
return pdfs
def _predict(self, model_labels, model_label_errs, label_dict=None,
label_grid=None, logwt=None, kde_args=None, kde_kwargs=None):
"""
Internal generator used to compute photometric 1-D predictions.
Parameters
----------
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
logwt : `~numpy.ndarray` of shape (Ndata, Nmodel), optional
A new set of log-weights used to compute the marginalized 1-D
PDFs in place of the log-posterior.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
Returns
-------
pdf : `~numpy.ndarray` of shape (Ngrid)
1-D PDF yielded by the generator.
(lmap, levid) : 2-tuple of floats
ln(MAP) and ln(evidence) values yielded by the generator.
"""
# Initialize values.
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if logwt is None:
logwt = self.fit_lnprob
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
if label_dict is not None:
y_idx, y_std_idx = label_dict.fit(model_labels, model_label_errs)
# Generate PDFs.
for i, lwt in enumerate(logwt):
lmap, levid = max(lwt), logsumexp(lwt)
wt = np.exp(lwt - levid)
if label_dict is not None:
# Use dictionary if available.
pdf = gauss_kde_dict(label_dict, y_idx=y_idx,
y_std_idx=y_std_idx, y_wt=wt,
*kde_args, **kde_kwargs)
else:
# Otherwise just use KDE.
pdf = gauss_kde(model_labels, model_label_errs, label_grid,
y_wt=wt, *kde_args, **kde_kwargs)
pdf /= pdf.sum()
yield pdf, (lmap, levid)
def fit_predict(self, data, data_err, data_mask, model_labels,
model_label_errs, lprob_func=None, label_dict=None,
label_grid=None, kde_args=None, kde_kwargs=None,
lprob_args=None, lprob_kwargs=None, return_gof=False,
track_scale=False, verbose=True, save_fits=True):
"""
Fit all input models to the input data to compute the associated
log-posteriors and 1-D predictions.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
return_gof : bool, optional
Whether to return a tuple containing the ln(MAP) and
ln(evidence) values for the predictions
along with the pdfs. Default is `False`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
save_fits : bool, optional
Whether to save fits internally while computing predictions.
Default is `True`.
Returns
-------
pdfs : `~numpy.ndarray` of shape (Nobj, Ngrid)
Collection of 1-D PDFs for each object.
(lmap, levid) : 2-tuple of `~numpy.ndarray` with shape (Nobj), optional
Set of ln(MAP) and ln(evidence) values for each object.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
if label_dict is not None:
Nx = label_dict.Ngrid
else:
Nx = len(label_grid)
Ndata = len(data)
pdfs = np.zeros((Ndata, Nx))
if return_gof:
lmap = np.zeros(Ndata)
levid = np.zeros(Ndata)
# Generate predictions.
for i, res in enumerate(self._fit_predict(data, data_err, data_mask,
model_labels,
model_label_errs,
lprob_func=lprob_func,
label_dict=label_dict,
label_grid=label_grid,
kde_args=kde_args,
kde_kwargs=kde_kwargs,
lprob_args=lprob_args,
lprob_kwargs=lprob_kwargs,
track_scale=track_scale,
save_fits=save_fits)):
pdf, gof = res
pdfs[i] = pdf
if return_gof:
lmap[i], levid[i] = gof # save gof metrics
if verbose:
sys.stderr.write('\rGenerating PDF {0}/{1}'
.format(i+1, Ndata))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
if return_gof:
return pdfs, (lmap, levid)
else:
return pdfs
def _fit_predict(self, data, data_err, data_mask, model_labels,
model_label_errs, lprob_func=None, label_dict=None,
label_grid=None, kde_args=None, kde_kwargs=None,
lprob_args=None, lprob_kwargs=None,
track_scale=False, save_fits=True):
"""
Internal generator used to fit and compute predictions.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
save_fits : bool, optional
Whether to save fits internally while computing predictions.
Default is `True`.
Returns
-------
pdfs : `~numpy.ndarray` of shape (Ngrid)
1-D PDF for each object yielded by the generator.
(lmap, levid) : 2-tuple of floats
ln(MAP) and ln(evidence) values for each object.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
Ndata = len(data)
Nmodels = self.NMODEL
if save_fits:
self.fit_lnprior = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnlike = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnprob = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_Ndim = np.zeros((Ndata, Nmodels), dtype='int')
self.fit_chi2 = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_scale = np.ones((Ndata, Nmodels), dtype='float')
self.fit_scale_err = np.zeros((Ndata, Nmodels), dtype='float')
self.NDATA = Ndata
if label_dict is not None:
y_idx, y_std_idx = label_dict.fit(model_labels, model_label_errs)
# Run generator.
for i, (x, xe, xm) in enumerate(zip(data, data_err, data_mask)):
# Compute fit.
results = lprob_func(x, xe, xm, self.models, self.models_err,
self.models_mask, *lprob_args, **lprob_kwargs)
if save_fits:
self.fit_lnprior[i] = results[0] # ln(prior)
self.fit_lnlike[i] = results[1] # ln(like)
self.fit_lnprob[i] = results[2] # ln(prob)
self.fit_Ndim[i] = results[3] # dimensionality of fit
self.fit_chi2[i] = results[4] # chi2
if track_scale:
self.fit_scale[i] = results[5] # scale-factor
self.fit_scale_err[i] = results[6] # std(s)
lnprob = results[2]
# Compute PDF and GOF metrics.
lmap, levid = max(lnprob), logsumexp(lnprob)
wt = np.exp(lnprob - levid)
if label_dict is not None:
pdf = gauss_kde_dict(label_dict, y_idx=y_idx,
y_std_idx=y_std_idx, y_wt=wt,
*kde_args, **kde_kwargs)
else:
pdf = gauss_kde(model_labels, model_label_errs,
label_grid, y_wt=wt,
*kde_args, **kde_kwargs)
pdf /= pdf.sum()
yield pdf, (lmap, levid)
| mit | -5,926,666,856,018,205,000 | 36.44691 | 79 | 0.534936 | false |
kevinpt/ripyl | test/test_support.py | 1 | 5322 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''Ripyl protocol decode library
test support functions
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import struct
import os
import array
import sys
import unittest
import random
import time
import gc
from ripyl.util.eng import eng_si
import ripyl.util.color as color
def relativelyEqual(a, b, epsilon):
''' Adapted from: http://floating-point-gui.de/errors/comparison/ '''
if a == b: # take care of the inifinities
return True
elif a * b == 0.0: # either a or b is zero
return abs(a - b) < epsilon ** 2
else: # relative error
return abs(a - b) / (abs(a) + abs(b)) < epsilon
class RandomSeededTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', seedVarName='TEST_SEED'):
unittest.TestCase.__init__(self, methodName=methodName)
self.seed_var_name = seedVarName
self.test_name = 'Unnamed test'
self.trial = 0
self.trial_count = 0
def setUp(self):
# In sub classes use the following to call this setUp() from an overrided setUp()
# super(<sub-class>, self).setUp()
# Use seed from enviroment if it is set
try:
seed = long(os.environ[self.seed_var_name])
except KeyError:
random.seed()
seed = long(random.random() * 1e9)
print(color.note('\n * Random seed: {} *'.format(seed)))
random.seed(seed)
def update_progress(self, cur_trial, dotted=True):
self.trial = cur_trial
if not dotted:
print('\r {} {} / {} '.format(self.test_name, self.trial, self.trial_count), end='')
else:
if self.trial == 1:
print(' {} '.format(self.test_name), end='')
endc = '' if self.trial % 100 else '\n'
print('.', end=endc)
sys.stdout.flush()
def assertRelativelyEqual(self, a, b, epsilon, msg=None):
if not relativelyEqual(a, b, epsilon):
if msg is None:
msg = '{} != {}'.format(a, b)
raise self.failureException(msg)
def timedtest(f):
'''Decorator that times execution of a test case'''
def wrapper(self, *args, **kwargs):
gc.disable()
try:
t_start = time.time()
result = f(self, *args, **kwargs)
t_end = time.time()
try:
_t_start = self._t_start
t_start = _t_start if isinstance(_t_start, float) else t_start
self._t_start = None
except:
pass
finally:
gc.enable()
delta = t_end - t_start
iterations = None
units_processed = 1
unit_name = 'units'
if result:
try:
if len(result) >= 2:
iterations = result[0]
units_processed = result[1]
if len(result) >= 3:
unit_name = result[2]
except TypeError:
iterations = result
if iterations:
per_iter = delta / iterations
else:
per_iter = delta
processing_rate = units_processed / delta
print('* Test duration: total {}, per iteration {}, rate {}'.format( \
eng_si(delta, 's'), eng_si(per_iter, 's'), eng_si(processing_rate, unit_name + '/s') ))
return wrapper
def write_bin_file(fname, samples, sample_period, start_time):
'''Write samples to binary file'''
with open(fname, 'wb') as fo:
fo.write(struct.pack('<f', sample_period))
fo.write(struct.pack('<f', start_time))
for s in samples:
fo.write(struct.pack('<f', s))
def read_bin_file(fname):
'''Read samples from binary file'''
with open(fname, 'rb') as fo:
sample_period = struct.unpack('<f', fo.read(4))[0]
start_time = struct.unpack('<f', fo.read(4))[0]
num_samples = (os.path.getsize(fname) - (2 * 4)) // 4
samples = array.array('f')
try:
samples.fromfile(fo, num_samples)
except EOFError:
raise EOFError('Missing samples in file')
# On a big-endian machine the samples need to be byteswapped
if sys.byteorder == 'big':
samples.byteswap()
return (samples, sample_period, start_time)
| lgpl-3.0 | 2,962,859,695,577,808,000 | 29.672619 | 99 | 0.549145 | false |
archatas/whoosh | whoosh/highlight.py | 1 | 17889 | #===============================================================================
# Copyright 2008 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""The highlight module contains classes and functions for displaying short
excerpts from hit documents in the search results you present to the user, with
query terms highlighted.
"""
from __future__ import division
from heapq import nlargest
from cgi import escape as htmlescape
from whoosh.util.anyall import *
# Fragment object
class Fragment(object):
"""Represents a fragment (extract) from a hit document. This object is
mainly used to keep track of the start and end points of the fragment; it
does not contain the text of the fragment or do much else.
"""
def __init__(self, tokens, charsbefore=0, charsafter=0, textlen=999999):
"""
:param tokens: list of the Token objects in the fragment.
:param charsbefore: approx. how many characters before the start of the
first matched term to include in the fragment.
:param charsafter: approx. how many characters after the end of the
last matched term to include in the fragment.
:param textlen: length in characters of the document text.
"""
#: index of the first character of the fragment in the original
# document
self.startchar = max(0, tokens[0].startchar - charsbefore)
#: index after the last character of the fragment in the original
#document
self.endchar = min(textlen, tokens[-1].endchar + charsafter)
self.matches = [t for t in tokens if t.matched]
self.matched_terms = frozenset(t.text for t in self.matches)
def __len__(self):
return self.endchar - self.startchar
def overlaps(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return (fsc > sc and fsc < ec) or (fec > sc and fec < ec)
def overlapped_length(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return max(ec, fec) - min(sc, fsc)
def has_matches(self):
return any(t.matched for t in self.tokens)
# Filters
def copyandmatchfilter(termset, tokens):
for t in tokens:
t = t.copy()
t.matched = t.text in termset
yield t
# Fragmenters
def NullFragmenter(text, tokens):
"""Doesn't fragment the token stream. This object just returns the entire
stream as one "fragment". This is useful if you want to highlight the
entire text.
"""
tokens = list(tokens)
before = after = 0
if tokens:
before = tokens[0].startchar
after = len(text) - tokens[-1].endchar
return [Fragment(tokens, charsbefore=before, charsafter=after)]
class SimpleFragmenter(object):
"""Simply splits the text into roughly equal sized chunks.
"""
def __init__(self, size=70):
"""
:param size: size (in characters) to chunk to. The chunking is based on
tokens, so the fragments will usually be smaller.
"""
self.size = size
def __call__(self, text, tokens):
size = self.size
first = None
frag = []
for t in tokens:
if first is None:
first = t.startchar
if t.endchar - first > size:
first = None
if frag:
yield Fragment(frag)
frag = []
frag.append(t)
if frag:
yield Fragment(frag)
class SentenceFragmenter(object):
"""Breaks the text up on sentence end punctuation characters
(".", "!", or "?"). This object works by looking in the original text for a
sentence end as the next character after each token's 'endchar'.
When highlighting with this fragmenter, you should use an analyzer that
does NOT remove stop words, for example::
sa = StandardAnalyzer(stoplist=None)
"""
def __init__(self, maxchars=200, sentencechars=".!?"):
"""
:param maxchars: The maximum number of characters allowed in a fragment.
"""
self.maxchars = maxchars
self.sentencechars = frozenset(sentencechars)
def __call__(self, text, tokens):
maxchars = self.maxchars
sentencechars = self.sentencechars
textlen = len(text)
first = None
frag = []
for t in tokens:
if first is None:
first = t.startchar
endchar = t.endchar
if endchar - first > maxchars:
first = None
if frag:
yield Fragment(frag)
frag = []
frag.append(t)
if frag and endchar < textlen and text[endchar] in sentencechars:
# Don't break for two periods in a row (e.g. ignore "...")
if endchar + 1 < textlen and text[endchar + 1] in sentencechars:
continue
yield Fragment(frag, charsafter=0)
frag = []
first = None
if frag:
yield Fragment(frag)
class ContextFragmenter(object):
"""Looks for matched terms and aggregates them with their surrounding
context.
This fragmenter only yields fragments that contain matched terms.
"""
def __init__(self, termset, maxchars=200, surround=20):
"""
:param termset: A collection (probably a set or frozenset) containing
the terms you want to match to token.text attributes.
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
"""
self.maxchars = maxchars
self.charsbefore = self.charsafter = surround
def __call__(self, text, tokens):
maxchars = self.maxchars
charsbefore = self.charsbefore
charsafter = self.charsafter
current = []
currentlen = 0
countdown = -1
for t in tokens:
if t.matched:
countdown = charsafter
current.append(t)
length = t.endchar - t.startchar
currentlen += length
if countdown >= 0:
countdown -= length
if countdown < 0 or currentlen >= maxchars:
yield Fragment(current)
current = []
currentlen = 0
else:
while current and currentlen > charsbefore:
t = current.pop(0)
currentlen -= t.endchar - t.startchar
if countdown >= 0:
yield Fragment(current)
#class VectorFragmenter(object):
# def __init__(self, termmap, maxchars=200, charsbefore=20, charsafter=20):
# """
# :param termmap: A dictionary mapping the terms you're looking for to
# lists of either (posn, startchar, endchar) or
# (posn, startchar, endchar, boost) tuples.
# :param maxchars: The maximum number of characters allowed in a fragment.
# :param charsbefore: The number of extra characters of context to add before
# the first matched term.
# :param charsafter: The number of extra characters of context to add after
# the last matched term.
# """
#
# self.termmap = termmap
# self.maxchars = maxchars
# self.charsbefore = charsbefore
# self.charsafter = charsafter
#
# def __call__(self, text, tokens):
# maxchars = self.maxchars
# charsbefore = self.charsbefore
# charsafter = self.charsafter
# textlen = len(text)
#
# vfrags = []
# for term, data in self.termmap.iteritems():
# if len(data) == 3:
# t = Token(startchar = data[1], endchar = data[2])
# elif len(data) == 4:
# t = Token(startchar = data[1], endchar = data[2], boost = data[3])
# else:
# raise ValueError(repr(data))
#
# newfrag = VFragment([t], charsbefore, charsafter, textlen)
# added = False
#
# for vf in vfrags:
# if vf.overlaps(newfrag) and vf.overlapped_length(newfrag) < maxchars:
# vf.merge(newfrag)
# added = True
# break
# Fragment scorers
def BasicFragmentScorer(f):
# Add up the boosts for the matched terms in this passage
score = sum(t.boost for t in f.matches)
# Favor diversity: multiply score by the number of separate
# terms matched
score *= len(f.matched_terms) * 100
return score
# Fragment sorters
def SCORE(fragment):
"Sorts higher scored passages first."
return None
def FIRST(fragment):
"Sorts passages from earlier in the document first."
return fragment.startchar
def LONGER(fragment):
"Sorts longer passages first."
return 0 - len(fragment)
def SHORTER(fragment):
"Sort shorter passages first."
return len(fragment)
# Formatters
class UppercaseFormatter(object):
"""Returns a string in which the matched terms are in UPPERCASE.
"""
def __init__(self, between="..."):
"""
:param between: the text to add between fragments.
"""
self.between = between
def _format_fragment(self, text, fragment):
output = []
index = fragment.startchar
for t in fragment.matches:
if t.startchar > index:
output.append(text[index:t.startchar])
ttxt = text[t.startchar:t.endchar]
if t.matched: ttxt = ttxt.upper()
output.append(ttxt)
index = t.endchar
output.append(text[index:fragment.endchar])
return "".join(output)
def __call__(self, text, fragments):
return self.between.join((self._format_fragment(text, fragment)
for fragment in fragments))
class HtmlFormatter(object):
"""Returns a string containing HTML formatting around the matched terms.
This formatter wraps matched terms in an HTML element with two class names.
The first class name (set with the constructor argument ``classname``) is
the same for each match. The second class name (set with the constructor
argument ``termclass`` is different depending on which term matched. This
allows you to give different formatting (for example, different background
colors) to the different terms in the excerpt.
>>> hf = HtmlFormatter(tagname="span", classname="match", termclass="term")
>>> hf(mytext, myfragments)
"The <span class="match term0">template</span> <span class="match term1">geometry</span> is..."
This object maintains a dictionary mapping terms to HTML class names (e.g.
``term0`` and ``term1`` above), so that multiple excerpts will use the same
class for the same term. If you want to re-use the same HtmlFormatter
object with different searches, you should call HtmlFormatter.clear()
between searches to clear the mapping.
"""
template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s</%(tag)s>'
def __init__(self, tagname="strong", between="...",
classname="match", termclass="term", maxclasses=5,
attrquote='"'):
"""
:param tagname: the tag to wrap around matching terms.
:param between: the text to add between fragments.
:param classname: the class name to add to the elements wrapped around
matching terms.
:param termclass: the class name prefix for the second class which is
different for each matched term.
:param maxclasses: the maximum number of term classes to produce. This
limits the number of classes you have to define in CSS by recycling
term class names. For example, if you set maxclasses to 3 and have
5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,
``term2``, ``term0``, ``term1``.
"""
self.between = between
self.tagname = tagname
self.classname = classname
self.termclass = termclass
self.attrquote = attrquote
self.maxclasses = maxclasses
self.seen = {}
def _format_fragment(self, text, fragment, seen):
htmlclass = " ".join((self.classname, self.termclass))
output = []
index = fragment.startchar
for t in fragment.matches:
if t.startchar > index:
output.append(text[index:t.startchar])
ttxt = htmlescape(text[t.startchar:t.endchar])
if t.matched:
if t.text in seen:
termnum = seen[t.text]
else:
termnum = len(seen) % self.maxclasses
seen[t.text] = termnum
ttxt = self.template % {"tag": self.tagname,
"q": self.attrquote,
"cls": htmlclass,
"t": ttxt, "tn": termnum}
output.append(ttxt)
index = t.endchar
if index < fragment.endchar:
output.append(text[index:fragment.endchar])
return "".join(output)
def __call__(self, text, fragments):
seen = self.seen
return self.between.join(self._format_fragment(text, fragment, seen)
for fragment in fragments)
def clear(self):
"""Clears the dictionary mapping terms to HTML classnames.
"""
self.seen = {}
class GenshiFormatter(object):
"""Returns a Genshi event stream containing HTML formatting around the
matched terms.
"""
def __init__(self, qname="strong", between="..."):
"""
:param qname: the QName for the tag to wrap around matched terms.
:param between: the text to add between fragments.
"""
self.qname = qname
self.between = between
from genshi.core import START, END, TEXT, Attrs, Stream #@UnresolvedImport
self.START, self.END, self.TEXT = START, END, TEXT
self.Attrs, self.Stream = Attrs, Stream
def _add_text(self, text, output):
if output and output[-1][0] == self.TEXT:
output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])
else:
output.append((self.TEXT, text, (None, -1, -1)))
def _format_fragment(self, text, fragment):
START, TEXT, END, Attrs = self.START, self.TEXT, self.END, self.Attrs
qname = self.qname
output = []
index = fragment.startchar
lastmatched = False
for t in fragment.matches:
if t.startchar > index:
if lastmatched:
output.append((END, qname, (None, -1, -1)))
lastmatched = False
self._add_text(text[index:t.startchar], output)
ttxt = text[t.startchar:t.endchar]
if not lastmatched:
output.append((START, (qname, Attrs()), (None, -1, -1)))
lastmatched = True
output.append((TEXT, ttxt, (None, -1, -1)))
index = t.endchar
if lastmatched:
output.append((END, qname, (None, -1, -1)))
return output
def __call__(self, text, fragments):
output = []
first = True
for fragment in fragments:
if not first:
self._add_text(self.between, output)
first = False
output += self._format_fragment(text, fragment)
return self.Stream(output)
# Highlighting
def top_fragments(text, terms, analyzer, fragmenter, top=3,
scorer=BasicFragmentScorer, minscore=1):
termset = frozenset(terms)
tokens = copyandmatchfilter(termset, analyzer(text, chars=True,
keeporiginal=True))
scored_frags = nlargest(top, ((scorer(f), f)
for f in fragmenter(text, tokens)))
return [sf for score, sf in scored_frags if score > minscore]
def highlight(text, terms, analyzer, fragmenter, formatter, top=3,
scorer=BasicFragmentScorer, minscore=1,
order=FIRST):
fragments = top_fragments(text, terms, analyzer, fragmenter,
top=top, minscore=minscore)
fragments.sort(key=order)
return formatter(text, fragments)
if __name__ == '__main__':
pass
| apache-2.0 | -1,190,348,764,071,662,300 | 33.335893 | 99 | 0.565711 | false |
JensTimmerman/easybuild-easyblocks | easybuild/easyblocks/n/ncl.py | 1 | 6733 | ##
# Copyright 2009-2012 Ghent University
# Copyright 2009-2012 Stijn De Weirdt
# Copyright 2010 Dries Verdegem
# Copyright 2010-2012 Kenneth Hoste
# Copyright 2011 Pieter De Baets
# Copyright 2011-2012 Jens Timmerman
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NCL, implemented as an easyblock
"""
import fileinput
import os
import re
import sys
from distutils.version import LooseVersion
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.filetools import run_cmd
from easybuild.tools.modules import get_software_root, get_software_version
class EB_NCL(EasyBlock):
"""Support for building/installing NCL."""
def configure_step(self):
"""Configure build:
- create Makefile.ini using make and run ymake script to create config file
- patch config file with correct settings, and add missing config entries
- create config/Site.local file to avoid interactive install
- generate Makefile using config/ymkmf sciprt
-
"""
try:
os.chdir('config')
except OSError, err:
self.log.error("Failed to change to the 'config' dir: %s" % err)
cmd = "make -f Makefile.ini"
run_cmd(cmd, log_all=True, simple=True)
cmd = "./ymake -config $PWD"
run_cmd(cmd, log_all=True, simple=True)
# figure out name of config file
cfg_regexp = re.compile('^\s*SYSTEM_INCLUDE\s*=\s*"(.*)"\s*$', re.M)
f = open("Makefile", "r")
txt = f.read()
f.close()
cfg_filename = cfg_regexp.search(txt).group(1)
# adjust config file as needed
ctof_libs = ''
ifort = get_software_root('ifort')
if ifort:
if LooseVersion(get_software_version('ifort')) < LooseVersion('2011.4'):
ctof_libs = '-lm -L%s/lib/intel64 -lifcore -lifport' % ifort
else:
ctof_libs = '-lm -L%s/compiler/lib/intel64 -lifcore -lifport' % ifort
elif get_software_root('GCC'):
ctof_libs = '-lgfortran -lm'
macrodict = {
'CCompiler': os.getenv('CC'),
'FCompiler': os.getenv('F77'),
'CcOptions': '-ansi %s' % os.getenv('CFLAGS'),
'FcOptions': os.getenv('FFLAGS'),
'COptimizeFlag': os.getenv('CFLAGS'),
'FOptimizeFlag': os.getenv('FFLAGS'),
'ExtraSysLibraries': os.getenv('LDFLAGS'),
'CtoFLibraries': ctof_libs
}
# replace config entries that are already there
for line in fileinput.input(cfg_filename, inplace=1, backup='%s.orig' % cfg_filename):
for (key, val) in macrodict.items():
regexp = re.compile("(#define %s\s*).*" % key)
match = regexp.search(line)
if match:
line = "#define %s %s\n" % (key, val)
macrodict.pop(key)
sys.stdout.write(line)
# add remaining config entries
f = open(cfg_filename, "a")
for (key, val) in macrodict.items():
f.write("#define %s %s\n" % (key, val))
f.close()
f = open(cfg_filename, "r")
self.log.debug("Contents of %s: %s" % (cfg_filename, f.read()))
f.close()
# configure
try:
os.chdir(self.cfg['start_dir'])
except OSError, err:
self.log.error("Failed to change to the build dir %s: %s" % (self.cfg['start_dir'], err))
# instead of running the Configure script that asks a zillion questions,
# let's just generate the config/Site.local file ourselves...
# order of deps is important
# HDF needs to go after netCDF, because both have a netcdf.h include file
deps = ["HDF5", "JasPer", "netCDF", "HDF", "g2lib", "g2clib", "Szip"]
libs = ''
includes = ''
for dep in deps:
root = get_software_root(dep)
if not root:
self.log.error('%s not available' % dep)
libs += ' -L%s/lib ' % root
includes += ' -I%s/include ' % root
cfgtxt="""#ifdef FirstSite
#endif /* FirstSite */
#ifdef SecondSite
#define YmakeRoot %(installdir)s
#define LibSearch %(libs)s
#define IncSearch %(includes)s
#define BuildNCL 1
#define HDFlib
#define HDFEOSlib
#define UdUnitslib
#define BuildGRIB2 1
#define BuildRasterHDF 0
#define BuildHDF4 0
#define BuildTRIANGLE 0
#define BuildUdunits 0
#define BuildHDFEOS 0
#define BuildHDFEOS5 0
#endif /* SecondSite */
""" % {
'installdir': self.installdir,
'libs': libs,
'includes': includes
}
f = open("config/Site.local", "w")
f.write(cfgtxt)
f.close()
# generate Makefile
cmd = "./config/ymkmf"
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Building is done in install_step."""
pass
def install_step(self):
"""Build in install dir using build_step."""
cmd = "make Everything"
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""
Custom sanity check for NCL
"""
custom_paths = {
'files': ["bin/ncl", "lib/libncl.a", "lib/libncarg.a"],
'dirs': ["include/ncarg"]
}
super(EB_NCL, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set NCARG_ROOT environment variable in module."""
txt = super(EB_NCL, self).make_module_extra()
txt += "setenv\tNCARG_ROOT\t$root\n"
return txt
| gpl-2.0 | 1,901,864,687,388,564 | 32.167488 | 101 | 0.596168 | false |
detly/dumat | dumat/cubicsuperpath.py | 1 | 5417 | #!/usr/bin/env python
# Copyright (C) 2005 Aaron Spike, [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from dumat import simplepath
from math import *
def matprod(mlist):
prod=mlist[0]
for m in mlist[1:]:
a00=prod[0][0]*m[0][0]+prod[0][1]*m[1][0]
a01=prod[0][0]*m[0][1]+prod[0][1]*m[1][1]
a10=prod[1][0]*m[0][0]+prod[1][1]*m[1][0]
a11=prod[1][0]*m[0][1]+prod[1][1]*m[1][1]
prod=[[a00,a01],[a10,a11]]
return prod
def rotmat(teta):
return [[cos(teta),-sin(teta)],[sin(teta),cos(teta)]]
def applymat(mat, pt):
x=mat[0][0]*pt[0]+mat[0][1]*pt[1]
y=mat[1][0]*pt[0]+mat[1][1]*pt[1]
pt[0]=x
pt[1]=y
def norm(pt):
return sqrt(pt[0]*pt[0]+pt[1]*pt[1])
def ArcToPath(p1,params):
A=p1[:]
rx,ry,teta,longflag,sweepflag,x2,y2=params[:]
teta = teta*pi/180.0
B=[x2,y2]
if rx==0 or ry==0 or A==B:
return([[A[:],A[:],A[:]],[B[:],B[:],B[:]]])
mat=matprod((rotmat(teta),[[1/rx,0],[0,1/ry]],rotmat(-teta)))
applymat(mat, A)
applymat(mat, B)
k=[-(B[1]-A[1]),B[0]-A[0]]
d=k[0]*k[0]+k[1]*k[1]
k[0]/=sqrt(d)
k[1]/=sqrt(d)
d=sqrt(max(0,1-d/4))
if longflag==sweepflag:
d*=-1
O=[(B[0]+A[0])/2+d*k[0],(B[1]+A[1])/2+d*k[1]]
OA=[A[0]-O[0],A[1]-O[1]]
OB=[B[0]-O[0],B[1]-O[1]]
start=acos(OA[0]/norm(OA))
if OA[1]<0:
start*=-1
end=acos(OB[0]/norm(OB))
if OB[1]<0:
end*=-1
if sweepflag and start>end:
end +=2*pi
if (not sweepflag) and start<end:
end -=2*pi
NbSectors=int(abs(start-end)*2/pi)+1
dTeta=(end-start)/NbSectors
#v=dTeta*2/pi*0.552
#v=dTeta*2/pi*4*(sqrt(2)-1)/3
v = 4*tan(dTeta/4)/3
#if not sweepflag:
# v*=-1
p=[]
for i in range(0,NbSectors+1,1):
angle=start+i*dTeta
v1=[O[0]+cos(angle)-(-v)*sin(angle),O[1]+sin(angle)+(-v)*cos(angle)]
pt=[O[0]+cos(angle) ,O[1]+sin(angle) ]
v2=[O[0]+cos(angle)- v *sin(angle),O[1]+sin(angle)+ v *cos(angle)]
p.append([v1,pt,v2])
p[ 0][0]=p[ 0][1][:]
p[-1][2]=p[-1][1][:]
mat=matprod((rotmat(teta),[[rx,0],[0,ry]],rotmat(-teta)))
for pts in p:
applymat(mat, pts[0])
applymat(mat, pts[1])
applymat(mat, pts[2])
return(p)
def CubicSuperPath(simplepath):
csp = []
subpath = -1
subpathstart = []
last = []
lastctrl = []
for s in simplepath:
cmd, params = s
if cmd == 'M':
if last:
csp[subpath].append([lastctrl[:],last[:],last[:]])
subpath += 1
csp.append([])
subpathstart = params[:]
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
csp[subpath].append([lastctrl[:],last[:],last[:]])
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
csp[subpath].append([lastctrl[:],last[:],params[:2]])
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
q0=last[:]
q1=params[0:2]
q2=params[2:4]
x0= q0[0]
x1=1./3*q0[0]+2./3*q1[0]
x2= 2./3*q1[0]+1./3*q2[0]
x3= q2[0]
y0= q0[1]
y1=1./3*q0[1]+2./3*q1[1]
y2= 2./3*q1[1]+1./3*q2[1]
y3= q2[1]
csp[subpath].append([lastctrl[:],[x0,y0],[x1,y1]])
last = [x3,y3]
lastctrl = [x2,y2]
elif cmd == 'A':
arcp=ArcToPath(last[:],params[:])
arcp[ 0][0]=lastctrl[:]
last=arcp[-1][1]
lastctrl = arcp[-1][0]
csp[subpath]+=arcp[:-1]
elif cmd == 'Z':
csp[subpath].append([lastctrl[:],last[:],last[:]])
last = subpathstart[:]
lastctrl = subpathstart[:]
#append final superpoint
csp[subpath].append([lastctrl[:],last[:],last[:]])
return csp
def unCubicSuperPath(csp):
a = []
for subpath in csp:
if subpath:
a.append(['M',subpath[0][1][:]])
for i in range(1,len(subpath)):
a.append(['C',subpath[i-1][2][:] + subpath[i][0][:] + subpath[i][1][:]])
return a
def parsePath(d):
return CubicSuperPath(simplepath.parsePath(d))
def formatPath(p, terminate=False):
# Modified by JH to add 'Z' termination when needed
simple_path = unCubicSuperPath(p)
if terminate:
simple_path.append(['Z', []])
return simplepath.formatPath(simple_path)
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| gpl-3.0 | -6,287,155,482,373,087,000 | 30.678363 | 88 | 0.512461 | false |
pytorch/text | benchmark/benchmark_sentencepiece.py | 1 | 1914 | import time
import argparse
from torchtext.experimental.transforms import load_sp_model as load_pybind_sp_model
from torchtext.data.functional import load_sp_model as load_torchbind_sp_model
from torchtext.utils import download_from_url
from torchtext.datasets import DATASETS
def benchmark_sentencepiece(args):
def _run_benchmark(train, spm_processor):
t0 = time.monotonic()
for (_, text) in train:
spm_processor(text)
print("Sentencepiece processor time:", time.monotonic() - t0)
# Download a pretrained sentencepiece model
sp_model_path = download_from_url('https://pytorch.s3.amazonaws.com/models/text/pretrained_spm/text_unigram_15000.model')
# existing sentencepiece model with torchbind
train = DATASETS[args.dataset](split='train')
sp_model = load_torchbind_sp_model(sp_model_path)
print("SentencePiece EncodeAsIds - torchbind")
_run_benchmark(train, sp_model.EncodeAsIds)
# experimental sentencepiece model with pybind
train = DATASETS[args.dataset](split='train')
sp_model = load_pybind_sp_model(sp_model_path)
print("SentencePiece EncodeAsIds - pybind")
_run_benchmark(train, sp_model.EncodeAsIds)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SentencePiece benchmark')
parser.add_argument('--dataset', type=str, default='AG_NEWS',
help='Dataset for performance benchmark')
args = parser.parse_args()
benchmark_sentencepiece(args)
# Running with AG_NEWS
# SentencePiece EncodeAsIds - torchbind
# Sentencepiece processor time: 11.536989663727582
# SentencePiece EncodeAsIds - pybind
# Sentencepiece processor time: 11.38821320142597
# Running with YelpReviewFull
# SentencePiece EncodeAsIds - torchbind
# Sentencepiece processor time: 224.23954573180526
# SentencePiece EncodeAsIds - pybind
# Sentencepiece processor time: 217.134037473239
| bsd-3-clause | -530,106,896,059,488,200 | 38.061224 | 125 | 0.738767 | false |
google/flax | flax/core/nn/attention.py | 1 | 18495 | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attention core modules for Flax."""
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
import warnings
from typing import Any
from . import stochastic
from flax import jax_utils
from flax import struct
from flax.nn import initializers
from flax.core import Scope
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
from .linear import default_kernel_init
from .linear import dense_general
import numpy as np
def dot_product_attention(scope,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights. This
function supports multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of `[batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels]`.
key: keys for calculating attention with shape of `[batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels]`.
value: values to be used in attention with shape of `[batch_size, dim1,
dim2,..., dimN, num_heads, value_channels]`.
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`.
"""
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
depth = query.shape[-1]
n = key.ndim
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(np.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, channels, <attention dims>)
v_perm = batch_dims + (n - 1,) + axis
value = value.transpose(v_perm)
query = query / jnp.sqrt(depth).astype(dtype)
batch_dims_t = tuple(range(len(batch_dims)))
attn_weights = lax.dot_general(
query,
key, (((n - 1,), (n - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision)
# apply attention bias: masking, droput, proximity bias, ect.
if bias is not None:
attn_weights = attn_weights + bias
# normalize the attention weights
norm_dims = tuple(range(attn_weights.ndim - len(axis), attn_weights.ndim))
attn_weights = lax.exp(
attn_weights -
jax.scipy.special.logsumexp(attn_weights, axis=norm_dims, keepdims=True))
attn_weights = attn_weights.astype(dtype)
# apply dropout
if not deterministic and dropout_rate > 0.:
if dropout_rng is None:
dropout_rng = scope.make_rng('dropout')
keep_prob = jax.lax.tie_in(attn_weights, 1.0 - dropout_rate)
if broadcast_dropout:
# dropout is broadcast across the batch+head+non-attention dimension
dropout_dims = attn_weights.shape[-(2 * len(axis)):]
dropout_shape = (tuple([1] * len(batch_dims_t)) + dropout_dims)
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = (keep.astype(attn_weights.dtype) /
jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# compute the new values given the attention weights
wv_contracting_dims = (norm_dims, range(value.ndim - len(axis), value.ndim))
y = lax.dot_general(
attn_weights,
value, (wv_contracting_dims, (batch_dims_t, batch_dims_t)),
precision=precision)
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
y = y.transpose(perm_inv)
return y
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
@struct.dataclass
class CacheEntry:
key: np.ndarray
value: np.ndarray
i: np.ndarray
def multi_head_dot_product_attention(
scope: Scope,
inputs_q,
inputs_kv,
num_heads,
dtype=jnp.float32,
qkv_features=None,
out_features=None,
attention_axis=None,
causal_mask=False,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
cache=False,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None,
kernel_init=default_kernel_init,
bias_init=initializers.zeros,
bias=True,
attention_fn=dot_product_attention):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`.
inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]`
or None for self-attention, inn which case key/values will be derived
from inputs_q.
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
attention_axis: axes over which the attention is applied ( 'None' means
attention over all axes, but batch, heads, and features).
causal_mask: boolean specifying whether to apply a causal mask on the
attention weights. If True, the output at timestep `t` will not depend
on inputs at timesteps strictly greater than `t`.
padding_mask: boolean specifying query tokens that are pad token.
key_padding_mask: boolean specifying key-value tokens that are pad token.
segmentation: segment indices for packed inputs_q data.
key_segmentation: segment indices for packed inputs_kv data.
cache: an instance of `flax.nn.attention.Cache` used for efficient
autoregressive decoding.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
bias: bool: whether pointwise QKVO dense transforms use bias.
attention_fn: dot_product_attention or compatible function. Accepts
query, key, value, and returns output of shape
`[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]``
Returns:
output of shape `[bs, dim1, dim2, ..., dimN, features]`.
"""
assert causal_mask or not cache, (
'Caching is only support for causal attention.')
if inputs_kv is None:
inputs_kv = inputs_q
if attention_axis is None:
attention_axis = tuple(range(1, inputs_q.ndim - 1))
features = out_features or inputs_q.shape[-1]
qkv_features = qkv_features or inputs_q.shape[-1]
assert qkv_features % num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // num_heads
dense = functools.partial(
dense_general,
axis=-1,
dtype=dtype,
features=(num_heads, head_dim),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
precision=precision)
# project inputs_q to multi-headed q/k/v
# dimensions are then [bs, dims..., n_heads, n_features_per_head]
query = scope.child(dense, 'query')(inputs_q)
key = scope.child(dense, 'key')(inputs_kv)
value = scope.child(dense, 'value')(inputs_kv)
if cache:
if not scope.has_variable('cache', 'entry'):
ndim, tail_shape = (key.ndim, key.shape[-2:])
def init_fn(shape, dtype=jnp.float32):
full_shape = shape + tail_shape
if len(full_shape) != ndim:
raise ValueError('Shape should be a tuple with the shape of the batch'
'and attention dims.')
return CacheEntry(
key=jnp.zeros(full_shape, dtype),
value=jnp.zeros(full_shape, dtype),
i=jnp.zeros((), jnp.uint32))
cache_entry = init_fn
else:
cache_entry = scope.get_variable('cache', 'entry')
if not isinstance(cache_entry, CacheEntry):
raise ValueError('Cache is not initialized.')
expected_shape = list(cache_entry.key.shape[:-2])
for attn_dim in attention_axis:
expected_shape[attn_dim] = 1
expected_shape = tuple(expected_shape) + inputs_q.shape[-1:]
if expected_shape != inputs_q.shape:
raise ValueError('Invalid shape provided, '
'expected shape %s instead got %s.' %
(expected_shape, inputs_q.shape))
cshape = cache_entry.key.shape
indices = [0] * len(cshape)
i = cache_entry.i
attn_size = np.prod(np.take(cshape, attention_axis))
for attn_dim in attention_axis:
attn_size //= cshape[attn_dim]
indices[attn_dim] = i // attn_size
i = i % attn_size
key = lax.dynamic_update_slice(cache_entry.key, key, indices)
value = lax.dynamic_update_slice(cache_entry.value, value, indices)
one = jnp.array(1, jnp.uint32)
cache_entry = cache_entry.replace(i=cache_entry.i + one,
key=key,
value=value)
# TODO(levskaya): verify this is still needed in translation decoding.
key_padding_mask = jnp.broadcast_to(
(jnp.arange(cshape[1]) < cache_entry.i), cshape[:2])
key_padding_mask = key_padding_mask.astype(jnp.float32)[..., None]
scope.put_variable('cache', 'entry', cache_entry)
# create attention masks
mask_components = []
if causal_mask:
if cache and isinstance(cache_entry, CacheEntry):
bias_pre_shape = (1,) * (key.ndim - 1)
attn_shape = tuple(np.take(key.shape, attention_axis))
attn_size = np.prod(attn_shape)
ii = jnp.arange(attn_size, dtype=jnp.uint32)
mask = ii < cache_entry.i
mask_components.append(mask.reshape(bias_pre_shape + attn_shape))
else:
mask_components.append(_make_causal_mask(key, attention_axis))
if padding_mask is not None:
if key_padding_mask is None:
key_padding_mask = padding_mask
padding_mask = make_padding_mask(
padding_mask_query=padding_mask,
padding_mask_key=key_padding_mask,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis)
mask_components.append(padding_mask)
if segmentation is not None:
if key_segmentation is None:
key_segmentation = segmentation
segmentation_mask = make_padding_mask(
padding_mask_query=segmentation,
padding_mask_key=key_segmentation,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis,
segmentation_mask=True)
mask_components.append(segmentation_mask)
if mask_components:
attention_mask = mask_components[0]
for component in mask_components[1:]:
attention_mask = jnp.logical_and(attention_mask, component)
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0, jnp.full(attention_mask.shape, 0.).astype(dtype),
jnp.full(attention_mask.shape, -1e10).astype(dtype))
else:
attention_bias = None
# apply attention
x = scope.child(attention_fn)(
query,
key,
value,
dtype=dtype,
axis=attention_axis,
bias=attention_bias,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic)
# back to the original inputs dimensions
out = scope.child(dense_general, name='out')(
x,
features=features,
axis=(-2, -1),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
dtype=dtype,
precision=precision)
return out
# TODO(flax-dev): Consider refactoring MultiHeadDotProductAttention and moving
# causal_mask and cache support into this class instead.
#SelfAttention = MultiHeadDotProductAttention.partial(inputs_kv=None)
def make_padding_mask(padding_mask_query,
padding_mask_key,
query_shape,
key_shape,
attention_axis=None,
segmentation_mask=False):
"""Makes padding mask for attention weights.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len].
Args:
padding_mask_query: padding mask of query <bs, qdim1,.., qdimn>
padding_mask_key: padding mask of query <bs, key1,.., keyn>
query_shape: shape of the query
key_shape: shape of the key, which is equal to the shape of value.
attention_axis: axis over which attention is applied.
segmentation_mask: bool: if true use equality on cartesian product rather
than outer product for constructing segmentation masks.
Returns:
The padding mask for attention weights.
"""
assert query_shape[0] == key_shape[0]
assert len(query_shape) == len(key_shape)
ndim = len(key_shape)
if attention_axis is None:
attention_axis = tuple(range(1, ndim - 2))
assert isinstance(attention_axis, tuple)
for ax in attention_axis:
if not (ndim >= 3 and 1 <= ax < ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
mask_shape_final = (query_shape[0], 1) # batch_size, 1 (for all heads)s
for ax in attention_axis:
mask_shape_final += (query_shape[ax],)
for ax in attention_axis:
mask_shape_final += (key_shape[ax],)
padding_mask_query = padding_mask_query[..., None]
padding_mask_key = padding_mask_key[..., None]
perm = (0,) + tuple(np.flip(np.arange(padding_mask_key.ndim)))[:-1]
if segmentation_mask:
mask = jnp.equal(padding_mask_query, padding_mask_key.transpose(perm))
else:
mask = jnp.multiply(padding_mask_query, padding_mask_key.transpose(perm))
mask = mask.reshape(mask_shape_final)
mask = jax.lax.convert_element_type(mask, jnp.float32)
return mask
def _make_causal_mask(key, attention_axis=None, self_mask=False):
"""Makes a causal mask, to be used for masking out the future for attention.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len] with
zeros in upper triangle and ones in lower triangle.
Args:
key: shape of the key, which is equal to the shape of value and is
assumed to be equal to the shape of the query (since this is used in
self-attention when decoding).
attention_axis: axis over which attention is applied.
self_mask: if mask out the diagonal or not.
Returns:
A causal mask to be used to mask out future positions.
"""
if attention_axis is None:
attention_axis = tuple(range(1, key.ndim - 2))
assert isinstance(attention_axis, tuple)
for ax in attention_axis:
if not (key.ndim >= 3 and 1 <= ax < key.ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
mask_shape = tuple([1] * (key.ndim - len(attention_axis) - 1))
mask_shape_final = mask_shape
for _ in range(2):
flatten_dim = 1
for ax in attention_axis:
mask_shape_final += (key.shape[ax],)
flatten_dim *= key.shape[ax]
mask_shape += (flatten_dim,)
def tri(n, m, k=0):
# Tie in the key to avoid the mask becoming a constant.
# This way XLA can construct the mask during computation and fuse it
# with the attention ops.
x = lax.tie_in(key, jnp.arange(n, dtype=jnp.int32))
y = lax.tie_in(key, jnp.arange(m, dtype=jnp.int32))
mask = lax.ge(
(lax.broadcast_in_dim(x, shape=(n, m), broadcast_dimensions=(0,))) + k,
lax.broadcast(y, [n]))
return mask
k = -1 if self_mask else 0
mask = tri(*mask_shape[-2:], k=k).reshape(mask_shape_final)
return mask
| apache-2.0 | -5,751,349,227,438,841,000 | 35.916168 | 80 | 0.656394 | false |
priestc/giotto | giotto/controllers/__init__.py | 1 | 7907 | from collections import deque
import inspect
import json
from giotto import get_config
from giotto.exceptions import (GiottoException, InvalidInput, ProgramNotFound,
MockNotFound, ControlMiddlewareInterrupt, NotAuthorized, InvalidInvocation)
from giotto.primitives import GiottoPrimitive, RAW_INVOCATION_ARGS
from giotto.keyvalue import DummyKeyValue
from giotto.control import GiottoControl
class GiottoController(object):
middleware_interrupt = None
persist_data = None
def __init__(self, request, manifest, model_mock=False, errors=None):
self.request = request
self.model_mock = model_mock
self.cache = get_config('cache_engine', DummyKeyValue())
self.errors = errors
self.manifest = manifest
self.middleware_interrupt_exc = None
self.middleware_control = None
self.display_data = 'Not calculated yet'
# the program that corresponds to this invocation
invocation = self.get_invocation()
name = self.get_controller_name()
parsed = self.manifest.parse_invocation(invocation, controller_tag=name)
self.raw_args = parsed['raw_args']
self.program = parsed['program']
self.program.name_on_manifest = parsed['program_name']
self.path_args = parsed['args']
if parsed['superformat']:
self.mimetype = parsed['superformat_mime'] or parsed['superformat']
else:
self.mimetype = self.mimetype_override() or self.default_mimetype
def get_response(self):
"""
High level function for getting a response. This is what the concrete
controller should call. Returns a controller specific response.
"""
last_good_request = self.request
middleware_result = None
try:
last_good_request, middleware_result = self.program.execute_input_middleware_stream(self.request, self)
except GiottoException as exc:
# save this exception so it can be re-raised from within
# get_data_response() so that get_concrete_response() can handle it
self.middleware_interrupt_exc = exc
self.request = last_good_request
else:
self.request = middleware_result # middleware ended cleanly
if GiottoControl in type(middleware_result).mro():
# middleware returned a control object
self.middleware_control = middleware_result
self.request = last_good_request
response = self.get_concrete_response()
if self.persist_data:
response = self.persist(self.persist_data, response)
return self.program.execute_output_middleware_stream(self.request, response, self)
def get_data_response(self):
"""
Execute the model and view, and handle the cache.
Returns controller-agnostic response data.
"""
if self.middleware_interrupt_exc:
## the middleware raised an exception, re-raise it here so
## get_concrete_response (defined in subclasses) can catch it.
raise self.middleware_interrupt_exc
if self.middleware_control:
## this redirect object came from middleware but return it as if it
## came from a view.
return {'body': self.middleware_control}
if self.model_mock and self.program.has_mock_defined():
model_data = self.program.get_model_mock()
else:
args, kwargs = self.program.get_model_args_kwargs()
data = self.get_data_for_model(args, kwargs)
self.display_data = data # just for displaying in __repr__
if self.program.cache and not self.errors:
key = self.get_cache_key(data)
hit = self.cache.get(key)
if hit:
return hit
model_data = self.program.execute_model(data)
response = self.program.execute_view(model_data, self.mimetype, self.errors)
if self.program.cache and not self.errors and not self.model_mock:
self.cache.set(key, response, self.program.cache)
if 'persist' in response:
self.persist_data = response['persist']
return response
def get_data_for_model(self, args, kwargs):
"""
In comes args and kwargs expected for the model. Out comes the data from
this invocation that will go to the model.
In other words, this function does the "data negotiation" between the
controller and the model.
"""
kwargs_from_invocation = self.get_raw_data()
args_from_invocation = deque(self.path_args)
defaults = kwargs
values = args + list(kwargs.keys())
output = {}
raw = False
for i, field in enumerate(values):
## going through each bit of data that the model needs
## `field` here is the name of each needed var.
# the 'default' value that may be defined in the model.
# this variable might be a string or int or might even be a primitive object.
# NotImplemented here is used as to preserve if a default value is None.
# it is used here as a sort of MetaNone.
default_defined_in_model = defaults.get(field, NotImplemented)
# the value in kwarg arguments such as --values and GET params
from_data_kwargs = kwargs_from_invocation.get(field, None)
# The value that will end up being used.
value_to_use = None
if default_defined_in_model == RAW_INVOCATION_ARGS:
# flag that the RAW_INVOCATION_ARGS primitive has been invoked
# used later to suppress errors for unused program args
# when this primitive is invoked, all positional args are invalid.
raw = True
if type(default_defined_in_model) == GiottoPrimitive:
value_to_use = self.get_primitive(default_defined_in_model.name)
elif from_data_kwargs:
value_to_use = from_data_kwargs
elif not raw and args_from_invocation:
value_to_use = args_from_invocation.popleft()
elif default_defined_in_model is not NotImplemented:
value_to_use = default_defined_in_model
else:
raise InvalidInvocation("Data Missing For Program. Missing: %s" % field)
output[field] = value_to_use
if args_from_invocation and not raw:
msg = "Too many arguments. Program `%s` takes %s arguments, %s given" % (
self.program.name, len(args) + len(kwargs), len(args_from_invocation)
)
raise InvalidInvocation(msg)
return output
def persist(self, values):
"""
Persist this data between the user and the server.
"""
raise NotImplementedError("This controller does not support persistance")
def __repr__(self):
controller = self.get_controller_name()
model = self.program.name
data = self.display_data
return "<%s %s - %s - %s>" % (
self.__class__.__name__, controller, model, data
)
def mimetype_override(self):
"""
In some circumstances, the returned mimetype can be changed. Return that here.
Otherwise the default or superformat will be used.
"""
return None
def get_cache_key(self, data):
try:
controller_args = json.dumps(data, separators=(',', ':'), sort_keys=True)
except TypeError:
# controller contains info that can't be json serialized:
controller_args = str(data)
program = self.program.name
return "%s(%s)(%s)" % (controller_args, program, self.mimetype)
| bsd-2-clause | 1,328,981,605,514,870,500 | 38.934343 | 115 | 0.615404 | false |
EuroPython/ep-tools | tasks.py | 1 | 4203 | """
Invoke tasks to be run from the command line.
"""
import os
from invoke import task
from eptools import talks, people
from eptools.gspread_utils import get_api_key_file
from eptools.config import (
conference,
sponsors_billing_worksheet,
finaid_submissions_worksheet
)
@task
def sponsor_agreement(ctx, company_name, output_dir, template_file="", api_key_file=""):
""" Call docstamp to produce a sponsor agreement for `company_name`
using `template_file`. The output will be saved in `output_dir`.
Parameters
----------
company_name: str
Can be a substring of the company name in the spreadsheet.
template_file: str
output_dir: str
api_key_file: str
The path to the Google Credentials json file.
If left empty will try to look for its path in the config.py file.
"""
from eptools.sponsors import (
get_sponsor,
get_sponsors_ws_data,
create_sponsor_agreement,
contract_template,
company_name_column,
)
if not template_file:
template_file = contract_template
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_sponsors_ws_data(api_key_file=api_key_file, doc_key=sponsors_billing_worksheet[0])
try:
sponsor_data = get_sponsor(sponsor_name=company_name, sponsors=responses, col_name=company_name_column)
except Exception:
raise KeyError("Could not find data for sponsor {}.".format(company_name))
else:
fpath = create_sponsor_agreement(sponsor_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def finaid_receipt(ctx, applicant_name, output_dir, template_file="", api_key_file=""):
""" Call docstamp to produce a financial aid receipt
for `applicant_name` using `template_file`.
The output will be saved in `output_dir`.
Parameters
----------
applicant_name: str
template_file: str
output_dir: str
api_key_file: str
Path to the Google credentials json file.
If left empty will try to look for its path in the config.py file.
"""
from eptools.finaid import get_finaid_ws_data, get_applicant, receipt_template_spa, create_receipt
if not template_file:
template_file = receipt_template_spa
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_finaid_ws_data(api_key_file=api_key_file, doc_key=finaid_submissions_worksheet[0])
try:
applicant_data = get_applicant(applicant_name=applicant_name, submissions=responses, col_name="full_name")
except Exception:
raise KeyError("Could not find data for applicant {}.".format(applicant_name))
else:
fpath = create_receipt(applicant_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def fetch_ticket_profiles(ctx, out_filepath, conf=conference, status="all", nondups=False, raise_=False, ticket_id=""):
""" Create a json file with the all the tickets of the conference.
make_option('--status',
choices=['all', 'complete', 'incomplete'],
help='Status of the orders related with the tickets.',
make_option('--nondups',
help='If enables will remove the tickets with '
'same owner/email.',
make_option('--raise',
help='If enabled will raise any error that it may find.',
make_option('--ticket-id',
help='Will output the profile of the given ticket only.',
"""
return people.fetch_files(out_filepath, conf=conf, status=status, nondups=nondups, raise_=raise_, ticket_id=ticket_id)
@task
def fetch_talks_json(ctx, out_filepath="", status="proposed", conf=conference, host="europython.io", with_votes=False):
""" Return the talks in a json format. `status` choices: ['accepted', 'proposed']
"""
return talks.fetch_talks_json(out_filepath=out_filepath, status=status, conf=conf, host=host, with_votes=with_votes)
| mit | 804,876,676,924,397,200 | 32.624 | 122 | 0.656674 | false |
guildai/guild | guild/op.py | 1 | 14996 | # Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import subprocess
import sys
import time
from guild import config
from guild import exit_code
from guild import log as loglib
from guild import op_dep
from guild import op_util
from guild import run as runlib
from guild import util
log = logging.getLogger("guild")
OP_RUNFILE_PATHS = [
["guild", "external"],
]
PROC_TERM_TIMEOUT_SECONDS = 30
LOG_WAITING_DELAY_SECONDS = 2
###################################################################
# Exception classes
###################################################################
class InvalidOpDef(ValueError):
def __init__(self, opdef, msg):
super(InvalidOpDef, self).__init__(opdef, msg)
self.opdef = opdef
self.msg = msg
def __str__(self):
return self.msg
class ProcessError(Exception):
pass
###################################################################
# State
###################################################################
class Operation(object):
def __init__(self):
self.opref = None
self.cmd_args = []
self.cmd_env = {}
self.private_env = []
self.sourcecode_paths = []
self.run_dir = None
self.run_attrs = {}
self.deps = []
self.callbacks = None
class OperationCallbacks(object):
def __init__(self, init_output_summary=None, run_initialized=None):
self.init_output_summary = init_output_summary
self.run_initialized = run_initialized
def _callback(name, op, *rest_args):
if op.callbacks:
cb = getattr(op.callbacks, name, None)
if cb:
cb(op, *rest_args)
###################################################################
# Init run
###################################################################
def init_run(op, run_dir=None):
run = _op_init_pending_run(op, run_dir)
_op_init_run_attrs(op, run)
_callback("run_initialized", op, run)
return run
def _op_init_pending_run(op, run_dir):
run_dir = run_dir or op.run_dir
run = op_util.init_run(run_dir)
log.debug("initializing run in %s", run.dir)
run.init_skel()
op_util.set_run_pending(run)
return run
def _op_init_run_attrs(op, run):
run.write_opref(op.opref)
run.write_attr("cmd", op.cmd_args)
for name, val in (op.run_attrs or {}).items():
run.write_attr(name, val)
###################################################################
# Stage
###################################################################
def stage(op, continue_on_deps_error=False):
run = init_run(op)
try:
_stage_run_proc_env(op, run)
_resolve_deps(op, run, for_stage=True, continue_on_error=continue_on_deps_error)
op_util.set_run_staged(run)
finally:
op_util.clear_run_pending(run)
return run
def _stage_run_proc_env(op, run):
env = _op_proc_env(op, run)
skip_env = ("PWD", "_")
with open(run.guild_path("ENV"), "w") as out:
for name in sorted(env):
if name in skip_env:
continue
out.write("export %s=%s\n" % (name, util.env_var_quote(env[name])))
###################################################################
# Run
###################################################################
def run(
op,
quiet=False,
pidfile=None,
stop_after=None,
extra_env=None,
continue_on_deps_error=False,
):
run = init_run(op)
op_util.clear_run_marker(run, "STAGED")
try:
_resolve_deps(op, run, continue_on_error=continue_on_deps_error)
finally:
op_util.clear_run_pending(run)
op_util.set_run_started(run)
if pidfile:
_run_op_in_background(run, op, pidfile, quiet, stop_after, extra_env)
return run, None
else:
exit_status = _run_op(run, op, quiet, stop_after, extra_env)
return run, exit_status
def _run_op_in_background(run, op, pidfile, quiet, stop_after, extra_env):
import daemonize
action = lambda: _run_op(run, op, quiet, stop_after, extra_env)
daemon = daemonize.Daemonize(
app="guild_op", action=action, pid=pidfile, chdir=config.cwd()
)
# Need to log before starting daemon, otherwise output isn't
# visible.
if not quiet:
log.info(
"%s started in background as %s (pidfile %s)",
run.opref.to_opspec(config.cwd()),
run.id,
pidfile,
)
try:
daemon.start()
except SystemExit:
op_util.clear_run_pending(run)
raise
def _run_op(run, op, quiet, stop_after, extra_env):
proc = _op_start_proc(op, run, quiet, extra_env)
exit_status = _op_wait_for_proc(op, proc, run, quiet, stop_after)
_op_finalize_run_attrs(run, exit_status)
return exit_status
def _op_start_proc(op, run, quiet, extra_env):
env = _op_proc_env(op, run)
if extra_env:
env.update(extra_env)
run.write_attr("env", _safe_env(env, op))
log.debug("starting run %s in %s", run.id, run.dir)
log.debug("operation command: %s", op.cmd_args)
log.debug("operation env: %s", _hide_secret_env(env))
stdout, stderr = _proc_streams(quiet)
try:
proc = subprocess.Popen(
op.cmd_args,
env=env,
cwd=run.dir,
stdout=stdout,
stderr=stderr,
)
except OSError as e:
raise ProcessError(e)
else:
op_util.write_proc_lock(proc.pid, run)
return proc
def _safe_env(env, op):
return _remove_secrets(_remove_private_env(env, op))
def _remove_private_env(env, op):
return {name: env[name] for name in env if name not in op.private_env}
def _remove_secrets(env):
def is_secret_env(name):
is_secret = _is_secret_env(name)
if is_secret:
log.debug("found op env secret %r - will be removed", name)
return is_secret
return {name: env[name] for name in env if not is_secret_env(name)}
def _is_secret_env(name):
"""Returns True if name might be a secret.
Uses a simple heuristic to test for a secret name.
"""
name_lower = name.lower()
secret_parts = ("password", "token", "secret")
return any(part in name_lower for part in secret_parts)
def _hide_secret_env(env):
return {name: _maybe_hide_secret(name, env) for name in env}
def _maybe_hide_secret(name, env):
if _is_secret_env(name):
return "***"
return env[name]
def _proc_streams(quiet):
"""Returns a tuple of stdout, stderr streams for use in op subprocess."""
if os.getenv("NO_RUN_OUTPUT") == "1":
if quiet:
return _devnull(), _devnull()
else:
return None, None
elif os.getenv("SYNC_RUN_OUTPUT") == "1":
return subprocess.PIPE, subprocess.STDOUT
else:
return subprocess.PIPE, subprocess.PIPE
def _devnull():
try:
from subprocess import DEVNULL
except ImportError:
return open(os.devnull, 'wb')
else:
return DEVNULL
def _op_wait_for_proc(op, proc, run, quiet, stop_after):
try:
return _op_watch_proc(op, proc, run, quiet, stop_after)
except KeyboardInterrupt:
return _handle_proc_keyboard_interrupt(proc)
def _op_watch_proc(op, proc, run, quiet, stop_after):
if os.getenv("NO_RUN_OUTPUT") != "1":
output_summary = _output_summary_for_run(run, op)
return _proc_wait_with_run_output(proc, run, quiet, output_summary, stop_after)
else:
return _proc_wait(proc, stop_after)
def _output_summary_for_run(run, op):
if not op.callbacks or not op.callbacks.init_output_summary:
return None
return op.callbacks.init_output_summary(op, run)
def _proc_wait_with_run_output(proc, run, quiet, output_summary, stop_after):
with _RunOutput(run, proc, quiet, output_summary):
return _proc_wait(proc, stop_after)
class _RunOutput(object):
def __init__(self, run, proc, quiet, output_summary):
self._output = None
self._run = run
self._proc = proc
self._quiet = quiet
self._output_summary = output_summary
def __enter__(self):
self._output = op_util.RunOutput(self._run, self._quiet, self._output_summary)
self._output.open(self._proc)
def __exit__(self, *_exc):
assert self._output
self._output.wait_and_close()
self._output = None
def _proc_wait(proc, stop_after):
if stop_after is None:
return proc.wait()
else:
return _proc_wait_minutes(proc, stop_after)
def _proc_wait_minutes(proc, minutes):
poll_interval = util.get_env("STOP_AFTER_POLL_INTERVAL", float)
kill_delay = util.get_env("STOP_AFTER_KILL_DELAY", float)
return op_util.wait_for_proc(
proc, minutes, poll_interval=poll_interval, kill_delay=kill_delay
)
def _handle_proc_keyboard_interrupt(proc):
log_waiting_after = time.time() + LOG_WAITING_DELAY_SECONDS
kill_after = time.time() + PROC_TERM_TIMEOUT_SECONDS
while time.time() < kill_after:
if log_waiting_after and time.time() > log_waiting_after:
if os.getenv("NO_OP_INTERRUPTED_MSG") != "1":
log.info("Operation interrupted - waiting for process to exit")
log_waiting_after = None
if proc.poll() is not None:
break
time.sleep(1)
if proc.poll() is None:
log.warning("Operation process did not exit - stopping forcefully")
util.kill_process_tree(proc.pid, force=True)
return exit_code.KEYBOARD_INTERRUPT
def _op_exit_status(proc_exit_status, opdef):
if (
proc_exit_status in (exit_code.SIGTERM, exit_code.KEYBOARD_INTERRUPT)
and opdef.stoppable
):
return 0
return proc_exit_status
def _op_finalize_run_attrs(run, exit_status):
if not os.path.exists(run.dir):
log.warning("run directory has been deleted, unable to finalize")
return
if not os.path.exists(run.guild_path()):
log.warning("run Guild directory has been deleted, unable to finalize")
return
stopped = runlib.timestamp()
run.write_attr("exit_status", exit_status)
run.write_attr("stopped", stopped)
op_util.delete_proc_lock(run)
# =================================================================
# Proc env
# =================================================================
def _op_proc_env(op, run):
"""Returns the proc env for op and associated run.
Proc env is made up of system env, op env, and run env. System env
is passed through unless otherwise defined by op env or run
env. Run env takes precedence when it conflicts with op end.
"""
env = {}
env.update(_op_proc_system_env())
env.update(_op_proc_op_env(op))
env.update(_op_proc_run_env(run))
return env
def _op_proc_system_env():
return util.safe_osenv()
def _op_proc_op_env(op):
env = {}
env.update(op.cmd_env)
if op.opref:
env["GUILD_OP"] = op.opref.to_opspec()
env["GUILD_HOME"] = config.guild_home()
env["GUILD_SOURCECODE"] = _guild_sourcecode_env(op)
env["LOG_LEVEL"] = _log_level()
env["PYTHONPATH"] = _python_path(op)
env["CMD_DIR"] = os.getcwd()
return env
def _op_proc_run_env(run):
return {
"RUN_DIR": run.dir,
"RUN_ID": run.id,
}
def _guild_sourcecode_env(op):
return os.path.pathsep.join(op.sourcecode_paths)
def _log_level():
try:
return os.environ["LOG_LEVEL"]
except KeyError:
return str(logging.getLogger().getEffectiveLevel())
def _python_path(op):
paths = _op_pythonpath_env(op) + op.sourcecode_paths + _guild_paths() + _env_paths()
return os.path.pathsep.join(paths)
def _op_pythonpath_env(op):
env = op.cmd_env.get("PYTHONPATH")
if not env:
return []
return env.split(os.path.pathsep)
def _guild_paths():
guild_path = os.path.dirname(os.path.dirname(__file__))
abs_guild_path = os.path.abspath(guild_path)
return _runfile_paths() + [abs_guild_path]
def _runfile_paths():
return [os.path.abspath(path) for path in sys.path if _is_runfile_pkg(path)]
def _is_runfile_pkg(path):
for runfile_path in OP_RUNFILE_PATHS:
split_path = path.split(os.path.sep)
if split_path[-len(runfile_path) :] == runfile_path:
return True
return False
def _env_paths():
env = os.getenv("PYTHONPATH")
return env.split(os.path.pathsep) if env else []
# =================================================================
# Resolve deps
# =================================================================
def _resolve_deps(op, run, for_stage=False, continue_on_error=False):
resolve_context = op_dep.ResolveContext(run)
deps_attr = run.get("deps") or {}
for dep in op.deps or []:
resolved_sources = deps_attr.setdefault(dep.resdef.name, {})
try:
_apply_resolve_dep_sources(
dep, resolve_context, run, for_stage, resolved_sources
)
except op_dep.OpDependencyError as e:
if not continue_on_error:
raise
log.warning("a dependency was not met: %s", e)
run.write_attr("deps", deps_attr)
def _apply_resolve_dep_sources(dep, resolve_context, run, for_stage, resolved):
log.info(loglib.dim("Resolving %s dependency"), dep.resdef.name)
for source in dep.resdef.sources:
if source.name in resolved:
log.info(
"Skipping resolution of %s because it's already resolved", source.name
)
continue
if for_stage and _is_operation_source(source):
log.info("Skipping resolution of %s because it's being staged", source.name)
continue
run_rel_resolved_paths = _resolve_dep_source(source, dep, resolve_context, run)
resolved[source.name] = source_info = {
"uri": source.uri,
"paths": run_rel_resolved_paths,
}
if dep.config:
source_info["config"] = dep.config
def _resolve_dep_source(source, dep, resolve_context, run):
resolved_abs_paths = op_dep.resolve_source(source, dep, resolve_context)
return [os.path.relpath(path, run.dir) for path in resolved_abs_paths]
def _is_operation_source(source):
return source.uri.startswith("operation:")
| apache-2.0 | -2,611,442,354,210,539,500 | 27.783109 | 88 | 0.584823 | false |
previtus/MGR-Project-Code | Settings/independent_experiments/effect_of_data_shuffling/shuffle_effective_1200.py | 1 | 3067 | def Setup(Settings,DefaultModel):
# shuffle_effective_1200.py
# - in this case always shuffled is better than not shuffled
# - and then osm only val is best, osm img mix is second and last is img only
Settings["experiment_name"] = "Test_Shuffling_3 models vs 3 models_1200x_markable_299x299_shuffleNowInMod6"
Settings["graph_histories"] = ['together', [0,3], [1,4], [2,5],[0,1,2],[3,4,5]]
n=0
Settings["models"][n]["dataset_name"] = "1200x_markable_299x299" # "1200x_markable_299x299", "5556x_mark_res_299x299", "5556x_markable_640x640"
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'notShuffled_mix'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = False
Settings["models"].append(DefaultModel.copy())
n=1
Settings["models"][n]["dataset_pointer"] = 0 # 0 - reuse the first dataset
Settings["models"][n]["model_type"] = 'osm_only'
Settings["models"][n]["unique_id"] = 'notShuffled_osm_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = False
Settings["models"].append(DefaultModel.copy())
n=2
Settings["models"][n]["dataset_pointer"] = 0 # 0 - reuse the first dataset
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'notShuffled_img_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = False
Settings["models"].append(DefaultModel.copy())
n=3
Settings["models"][n]["dataset_pointer"] = -1 # 0 - reuse the first dataset
Settings["models"][n]["dataset_name"] = "1200x_markable_299x299" # "1200x_markable_299x299", "5556x_mark_res_299x299", "5556x_markable_640x640"
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'Shuffled_img_osm_mix'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = True
Settings["models"].append(DefaultModel.copy())
n=4
Settings["models"][n]["dataset_pointer"] = 1
Settings["models"][n]["model_type"] = 'osm_only'
Settings["models"][n]["unique_id"] = 'Shuffled_osm_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = True
Settings["models"].append(DefaultModel.copy())
n=5
Settings["models"][n]["dataset_pointer"] = 1
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'Shuffled_img_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = True
return Settings
| mit | 7,533,434,955,248,147,000 | 41.597222 | 147 | 0.632214 | false |
skasamatsu/vaspgrid | zav.py | 1 | 2327 | # Beware! Only tested for non-spin-polarized case
import re
import sys
import rlcompleter
import readline
#from numpy import *
from enterfi import enterfi
from outputfi import outputfi
gridfname = enterfi("Enter VASP field data (CHGCAR, LOCPOT, etc.)")
outfname = outputfi("Enter output file name ")
gridfi = open(gridfname,"r")
gridfi.readline() # Skip system name
# Read lattice scaling constant
li = gridfi.readline().split()
scale = [0.0,0.0,0.0]
if len(li) == 1:
li = float(li[0])
for i in range(3):
scale[i] = li
else:
if len(li) == 3:
for i in range(3):
scale[i] = float(li[i])
# Read lattice vectors
latcons = [[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]]
for i in range(3):
li = gridfi.readline().split()
for j in range(3):
latcons[i][j] = float(li[j])*scale[j]
print latcons
# Is this lattice orthorhombic in z direction?
assert latcons[0][2] <= 1.0e-8
assert latcons[1][2] <= 1.0e-8
assert latcons[2][0] <= 1.0e-8
assert latcons[2][1] <= 1.0e-8
# Read number of atoms
# Is this from vasp5 or vasp4? vasp5 has element names on the sixth line
# while vasp 4 does not.
li = gridfi.readline().split()
if re.match("[0-9]",li[0].strip()):
# It's vasp4
nspecs = len(li)
natoms = 0
for i in range(nspecs):
li[i] = int(li[i])
natoms = natoms + li[i]
else:
# It's vasp5. Read one more line.
li = gridfi.readline().split()
nspecs = len(li)
natoms = 0
for i in range(nspecs):
li[i] = int(li[i])
natoms = natoms + li[i]
print natoms
gridfi.readline() # Skip one line. It probably says "Direct".
for i in range(natoms+1):
gridfi.readline() # Skip the atom coordinates plus 1 blank line
# Read the grid dimensions
grid = gridfi.readline().split()
for i in range(len(grid)):
grid[i]=int(grid[i])
ngrid = grid[0] * grid[1] * grid[2]
dz = latcons[2][2]/grid[2]
# Now read the rest of the file
data=gridfi.read().split()
for i in range(ngrid):
data[i]=float(data[i])
zavg=[]
for i in range(grid[2]):
zavgtmp=0.0
for j in range(grid[0]*grid[1]):
zavgtmp+=data[i*grid[0]*grid[1]+j]
zavgtmp=zavgtmp/(grid[0]*grid[1])
zavg.append(zavgtmp)
outfi = open(outfname,"w")
for i in range(len(zavg)):
outfi.write(str(dz*i) + " " + str(zavg[i]) + "\n")
#print zavg
| mit | -8,205,839,579,492,689,000 | 23.755319 | 72 | 0.62312 | false |
harikishen/addons-server | src/olympia/addons/models.py | 1 | 81352 | # -*- coding: utf-8 -*-
import collections
import itertools
import json
import os
import posixpath
import re
import time
from operator import attrgetter
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage as storage
from django.db import models, transaction
from django.db.models import F, Max, Q, signals as dbsignals
from django.dispatch import receiver
from django.utils.functional import cached_property
from django.utils.translation import trans_real, ugettext_lazy as _
import caching.base as caching
from django_extensions.db.fields.json import JSONField
from django_statsd.clients import statsd
from jinja2.filters import do_dictsort
import olympia.core.logger
from olympia import activity, amo, core
from olympia.amo.models import (
SlugField, OnChangeMixin, ModelBase, ManagerBase, manual_order)
from olympia.access import acl
from olympia.addons.utils import (
get_creatured_ids, get_featured_ids, generate_addon_guid)
from olympia.amo import helpers
from olympia.amo.decorators import use_master, write
from olympia.amo.utils import (
attach_trans_dict, cache_ns_key, chunked,
no_translation, send_mail, slugify, sorted_groupby, timer, to_language,
urlparams, find_language, AMOJSONEncoder)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.files.models import File
from olympia.files.utils import (
extract_translations, resolve_i18n_message, parse_addon)
from olympia.reviews.models import Review
from olympia.tags.models import Tag
from olympia.translations.fields import (
LinkifiedField, PurifiedField, save_signal, TranslatedField, Translation)
from olympia.users.models import UserForeignKey, UserProfile
from olympia.versions.compare import version_int
from olympia.versions.models import inherit_nomination, Version
from . import signals
log = olympia.core.logger.getLogger('z.addons')
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible as it's used by Addons
and Collections, and maybe more in the future.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation,
# or the id of the instance, or in last resort the model name.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
elif instance.id:
slug = str(instance.id)
else:
slug = instance.__class__.__name__
max_length = instance._meta.get_field_by_name(slug_field)[0].max_length
slug = slugify(slug)[:max_length]
if DeniedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
# Leave space for 99 clashes.
slug = slugify(slug)[:max_length - 2]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
# Try numbers between 1 and the number of clashes + 1 (+ 1 because we
# start the range at 1, not 0):
# if we have two clashes "foo1" and "foo2", we need to try "foox"
# for x between 1 and 3 to be absolutely sure to find an available one.
for idx in range(1, len(clashes) + 2):
new = ('%s%s' % (slug, idx))[:max_length]
if new not in clashes:
slug = new
break
else:
# This could happen. The current implementation (using
# ``[:max_length -3]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 3`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError
setattr(instance, slug_field, slug)
return instance
class AddonQuerySet(caching.CachingQuerySet):
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
if isinstance(val, basestring) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def enabled(self):
"""Get add-ons that haven't been disabled by their developer(s)."""
return self.filter(disabled_by_user=False)
def public(self):
"""Get public add-ons only"""
return self.filter(self.valid_q([amo.STATUS_PUBLIC]))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.VALID_ADDON_STATUSES))
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
statuses = (list(amo.VALID_ADDON_STATUSES) +
[amo.STATUS_DISABLED, amo.STATUS_PENDING])
return (self.filter(Q(status__in=statuses) | Q(disabled_by_user=True))
.exclude(type=amo.ADDON_EXTENSION,
_current_version__isnull=True))
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
ids = get_featured_ids(app, lang, type)
return manual_order(self.listed(app), ids, 'addons.id')
def listed(self, app, *status):
"""
Return add-ons that support a given ``app``, have a version with a file
matching ``status`` and are not disabled.
"""
if len(status) == 0:
status = [amo.STATUS_PUBLIC]
return self.filter(self.valid_q(status), appsupport__app=app.id)
def valid_q(self, status=None, prefix=''):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
``prefix`` can be used if you're not working with Addon directly and
need to hop across a join, e.g. ``prefix='addon__'`` in
CollectionAddon.
"""
if not status:
status = [amo.STATUS_PUBLIC]
def q(*args, **kw):
if prefix:
kw = dict((prefix + k, v) for k, v in kw.items())
return Q(*args, **kw)
return q(q(_current_version__isnull=False),
disabled_by_user=False, status__in=status)
class AddonManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(AddonManager, self).get_queryset()
qs = qs._clone(klass=AddonQuerySet)
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
return self.get_queryset().id_or_slug(val)
def enabled(self):
"""Get add-ons that haven't been disabled by their developer(s)."""
return self.get_queryset().enabled()
def public(self):
"""Get public add-ons only"""
return self.get_queryset().public()
def valid(self):
"""Get valid, enabled add-ons only"""
return self.get_queryset().valid()
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
return self.get_queryset().valid_and_disabled_and_pending()
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
return self.get_queryset().featured(app, lang=lang, type=type)
def listed(self, app, *status):
"""
Return add-ons that support a given ``app``, have a version with a file
matching ``status`` and are not disabled.
"""
return self.get_queryset().listed(app, *status)
class Addon(OnChangeMixin, ModelBase):
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField(default=None)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
type = models.PositiveIntegerField(
choices=amo.ADDON_TYPE.items(), db_column='addontype_id', default=0)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), db_index=True, default=0)
icon_type = models.CharField(max_length=25, blank=True,
db_column='icontype')
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
the_reason = PurifiedField()
the_future = PurifiedField()
average_rating = models.FloatField(max_length=255, default=0, null=True,
db_column='averagerating')
bayesian_rating = models.FloatField(default=0, db_index=True,
db_column='bayesianrating')
total_reviews = models.PositiveIntegerField(default=0,
db_column='totalreviews')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads', db_index=True)
total_downloads = models.PositiveIntegerField(
default=0, db_column='totaldownloads')
hotness = models.FloatField(default=0, db_index=True)
average_daily_downloads = models.PositiveIntegerField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(
db_index=True, null=True,
help_text='Last time this add-on had a file/version update')
disabled_by_user = models.BooleanField(default=False, db_index=True,
db_column='inactive')
view_source = models.BooleanField(default=True, db_column='viewsource')
public_stats = models.BooleanField(default=False, db_column='publicstats')
admin_review = models.BooleanField(default=False, db_column='adminreview')
external_software = models.BooleanField(default=False,
db_column='externalsoftware')
dev_agreement = models.BooleanField(
default=False, help_text="Has the dev agreement been signed?")
auto_repackage = models.BooleanField(
default=True, help_text='Automatically upgrade jetpack add-on to a '
'new sdk version?')
target_locale = models.CharField(
max_length=255, db_index=True, blank=True, null=True,
help_text="For dictionaries and language packs")
locale_disambiguation = models.CharField(
max_length=255, blank=True, null=True,
help_text="For dictionaries and language packs")
wants_contributions = models.BooleanField(default=False)
paypal_id = models.CharField(max_length=255, blank=True)
charity = models.ForeignKey('Charity', null=True)
suggested_amount = models.DecimalField(
max_digits=9, decimal_places=2, blank=True,
null=True, help_text=_('Users have the option of contributing more '
'or less than this amount.'))
total_contributions = models.DecimalField(max_digits=9, decimal_places=2,
blank=True, null=True)
annoying = models.PositiveIntegerField(
choices=amo.CONTRIB_CHOICES, default=0,
help_text=_(u'Users will always be asked in the Add-ons'
u' Manager (Firefox 4 and above).'
u' Only applies to desktop.'))
enable_thankyou = models.BooleanField(
default=False, help_text='Should the thank you note be sent to '
'contributors?')
thankyou_note = TranslatedField()
authors = models.ManyToManyField('users.UserProfile', through='AddonUser',
related_name='addons')
categories = models.ManyToManyField('Category', through='AddonCategory')
dependencies = models.ManyToManyField('self', symmetrical=False,
through='AddonDependency',
related_name='addons')
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
whiteboard = models.TextField(blank=True)
is_experimental = models.BooleanField(default=False,
db_column='experimental')
reputation = models.SmallIntegerField(default=0, null=True)
# The order of those managers is very important:
# The first one discovered, if it has "use_for_related_fields = True"
# (which it has if it's inheriting from caching.base.CachingManager), will
# be used for relations like `version.addon`. We thus want one that is NOT
# filtered in any case, we don't want a 500 if the addon is not found
# (because it has the status amo.STATUS_DELETED for example).
# The CLASS of the first one discovered will also be used for "many to many
# relations" like `collection.addons`. In that case, we do want the
# filtered version by default, to make sure we're not displaying stuff by
# mistake. You thus want the CLASS of the first one to be filtered by
# default.
# We don't control the instantiation, but AddonManager sets include_deleted
# to False by default, so filtering is enabled by default. This is also why
# it's not repeated for 'objects' below.
unfiltered = AddonManager(include_deleted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
@staticmethod
def __new__(cls, *args, **kw):
try:
type_idx = Addon._meta._type_idx
except AttributeError:
type_idx = (idx for idx, f in enumerate(Addon._meta.fields)
if f.attname == 'type').next()
Addon._meta._type_idx = type_idx
return object.__new__(cls)
def __unicode__(self):
return u'%s: %s' % (self.id, self.name)
def __init__(self, *args, **kw):
super(Addon, self).__init__(*args, **kw)
if self.type == amo.ADDON_PERSONA:
self.STATUS_CHOICES = Persona.STATUS_CHOICES
def save(self, **kw):
self.clean_slug()
super(Addon, self).save(**kw)
@classmethod
def search_public(cls):
"""Legacy search method for public add-ons.
Note that typically, code using this method do a search in ES but then
will fetch the relevant objects from the database using Addon.objects,
so deleted addons won't be returned no matter what ES returns. See
amo.search.ES and amo.search.ObjectSearchResults for more details.
In new code, use elasticsearch-dsl instead.
"""
return cls.search().filter(
is_disabled=False,
status__in=amo.REVIEWED_STATUSES,
current_version__exists=True)
@use_master
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
def is_soft_deleteable(self):
return self.status or Version.unfiltered.filter(addon=self).exists()
@transaction.atomic
def delete(self, msg='', reason=''):
# To avoid a circular import
from . import tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE) with no versions.
soft_deletion = self.is_soft_deleteable()
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
log.debug('Deleting add-on: %s' % self.id)
to = [settings.FLIGTAR]
user = core.get_user()
# Don't localize email to admins, use 'en-US' always.
with no_translation():
# The types are lazy translated in apps/constants/base.py.
atype = amo.ADDON_TYPE.get(self.type).upper()
context = {
'atype': atype,
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'total_downloads': self.total_downloads,
'url': helpers.absolutify(self.get_url_path()),
'user_str': ("%s, %s (%s)" % (user.display_name or
user.username, user.email,
user.id) if user else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
TOTAL DOWNLOADS: %(total_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.debug('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._reviews.all().delete()
# The last parameter is needed to automagically create an AddonLog.
activity.log_create(amo.LOG.DELETE_ADDON, self.pk,
unicode(self.guid), self)
self.update(status=amo.STATUS_DELETED, slug=None,
_current_version=None, modified=datetime.now())
models.signals.post_delete.send(sender=Addon, instance=self)
send_mail(subject, email_msg, recipient_list=to)
else:
# Real deletion path.
super(Addon, self).delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
return True
@classmethod
def initialize_addon_from_upload(cls, data, upload, channel):
fields = cls._meta.get_all_field_names()
guid = data.get('guid')
old_guid_addon = None
if guid: # It's an extension.
# Reclaim GUID from deleted add-on.
try:
old_guid_addon = Addon.unfiltered.get(guid=guid)
old_guid_addon.update(guid=None)
except ObjectDoesNotExist:
pass
generate_guid = (
not data.get('guid', None) and
data.get('is_webextension', False)
)
if generate_guid:
data['guid'] = guid = generate_addon_guid()
data = cls.resolve_webext_translations(data, upload)
addon = Addon(**dict((k, v) for k, v in data.items() if k in fields))
addon.status = amo.STATUS_NULL
locale_is_set = (addon.default_locale and
addon.default_locale in (
settings.AMO_LANGUAGES +
settings.HIDDEN_LANGUAGES) and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(trans_real.get_language())
addon.save()
if old_guid_addon:
old_guid_addon.update(guid='guid-reused-by-pk-{}'.format(addon.pk))
old_guid_addon.save()
return addon
@classmethod
def create_addon_from_upload_data(cls, data, upload, channel, user=None,
**kwargs):
addon = cls.initialize_addon_from_upload(data, upload, channel,
**kwargs)
AddonUser(addon=addon, user=user).save()
return addon
@classmethod
def from_upload(cls, upload, platforms, source=None,
channel=amo.RELEASE_CHANNEL_LISTED, parsed_data=None):
if not parsed_data:
parsed_data = parse_addon(upload)
addon = cls.initialize_addon_from_upload(parsed_data, upload, channel)
if upload.validation_timeout:
addon.update(admin_review=True)
Version.from_upload(upload, addon, platforms, source=source,
channel=channel)
activity.log_create(amo.LOG.CREATE_ADDON, addon)
log.debug('New addon %r from %r' % (addon, upload))
return addon
@classmethod
def resolve_webext_translations(cls, data, upload):
"""Resolve all possible translations from an add-on.
This returns a modified `data` dictionary accordingly with proper
translations filled in.
"""
default_locale = find_language(data.get('default_locale'))
if not data.get('is_webextension') or not default_locale:
# Don't change anything if we don't meet the requirements
return data
fields = ('name', 'homepage', 'summary')
messages = extract_translations(upload)
for field in fields:
data[field] = {
locale: resolve_i18n_message(
data[field],
locale=locale,
default_locale=default_locale,
messages=messages)
for locale in messages
}
return data
def get_url_path(self, more=False, add_prefix=True):
if not self.current_version:
return ''
# If more=True you get the link to the ajax'd middle chunk of the
# detail page.
view = 'addons.detail_more' if more else 'addons.detail'
return reverse(view, args=[self.slug], add_prefix=add_prefix)
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
type_ = 'themes' if self.type == amo.ADDON_PERSONA else 'addons'
if not prefix_only:
prefix += '.%s' % type_
view_name = '{prefix}.{action}'.format(prefix=prefix,
action=action)
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=None):
if args is None:
args = []
return reverse('addons.%s' % action, args=[self.slug] + args)
def meet_the_dev_url(self):
return reverse('addons.meet', args=[self.slug])
@property
def reviews_url(self):
return helpers.url('addons.reviews.list', self.slug)
def get_ratings_url(self, action='list', args=None, add_prefix=True):
return reverse('ratings.themes.%s' % action,
args=[self.slug] + (args or []),
add_prefix=add_prefix)
@classmethod
def get_type_url(cls, type):
try:
type = amo.ADDON_SLUGS[type]
except KeyError:
return None
return reverse('browse.%s' % type)
def type_url(self):
"""The url for this add-on's type."""
return Addon.get_type_url(self.type)
def share_url(self):
return reverse('addons.share', args=[self.slug])
@cached_property
def listed_authors(self):
return UserProfile.objects.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def reviews(self):
return Review.objects.filter(addon=self, reply_to=None)
def get_category(self, app_id):
categories = self.app_categories.get(amo.APP_IDS.get(app_id))
return categories[0] if categories else None
def language_ascii(self):
lang = trans_real.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_PUBLIC:
return [amo.STATUS_PUBLIC]
return amo.VALID_FILE_STATUSES
def find_latest_public_listed_version(self):
"""Retrieve the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions)."""
if self.type == amo.ADDON_PERSONA:
return
try:
statuses = self.valid_file_statuses
status_list = ','.join(map(str, statuses))
fltr = {
'channel': amo.RELEASE_CHANNEL_LISTED,
'files__status__in': statuses
}
return self.versions.no_cache().filter(**fltr).extra(
where=["""
NOT EXISTS (
SELECT 1 FROM files AS f2
WHERE f2.version_id = versions.id AND
f2.status NOT IN (%s))
""" % status_list])[0]
except (IndexError, Version.DoesNotExist):
return None
def find_latest_version(
self, channel, exclude=(amo.STATUS_DISABLED, amo.STATUS_BETA)):
"""Retrieve the latest version of an add-on for the specified channel.
If channel is None either channel is returned.
Keyword arguments:
exclude -- exclude versions for which all files have one
of those statuses (default STATUS_DISABLED, STATUS_BETA)."""
# If the add-on is deleted or hasn't been saved yet, it should not
# have a latest version.
if not self.id or self.status == amo.STATUS_DELETED:
return None
# We can't use .exclude(files__status=excluded_statuses) because that
# would exclude a version if *any* of its files match but if there is
# only one file that doesn't have one of the excluded statuses it
# should be enough for that version to be considered.
statuses_no_disabled_or_beta = (
set(amo.STATUS_CHOICES_FILE.keys()) - set(exclude))
try:
latest_qs = (
Version.objects.filter(addon=self)
.filter(files__status__in=statuses_no_disabled_or_beta))
if channel is not None:
latest_qs = latest_qs.filter(channel=channel)
latest = latest_qs.latest()
latest.addon = self
except Version.DoesNotExist:
latest = None
return latest
@write
def update_version(self, ignore=None, _signal=True):
"""
Update the current_version field on this add-on if necessary.
Returns True if we updated the current_version field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
if self.is_persona():
# Themes should only have a single version. So, if there is not
# current version set, we just need to copy over the latest version
# to current_version and we should never have to set it again.
if not self._current_version:
latest_version = self.find_latest_version(None)
if latest_version:
self.update(_current_version=latest_version, _signal=False)
return True
return False
new_current_version = self.find_latest_public_listed_version()
updated = {}
send_signal = False
if self._current_version != new_current_version:
updated['_current_version'] = new_current_version
send_signal = True
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = {k: v for k, v in updated.iteritems() if v != ignore}
if updated:
diff = [self._current_version, new_current_version]
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s '
u'for addon %s'
% tuple(diff + [self]))
except Exception, e:
log.error(u'Could not save version changes current: %s to %s '
u'for addon %s (%s)' %
tuple(diff + [self, e]))
return bool(updated)
def increment_theme_version_number(self):
"""Increment theme version number by 1."""
latest_version = self.find_latest_version(None)
version = latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
def invalidate_d2c_versions(self):
"""Invalidates the cache of compatible versions.
Call this when there is an event that may change what compatible
versions are returned so they are recalculated.
"""
key = cache_ns_key('d2c-versions:%s' % self.id, increment=True)
log.info('Incrementing d2c-versions namespace for add-on [%s]: %s' % (
self.id, key))
@property
def current_version(self):
"""Return the latest public listed version of an addon
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions).
If the add-on has not been created yet or is deleted, it returns None.
"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@cached_property
def latest_unlisted_version(self):
"""Shortcut property for Addon.find_latest_version(
channel=RELEASE_CHANNEL_UNLISTED)."""
return self.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
@cached_property
def binary(self):
"""Returns if the current version has binary files."""
version = self.current_version
if version:
return version.files.filter(binary=True).exists()
return False
@cached_property
def binary_components(self):
"""Returns if the current version has files with binary_components."""
version = self.current_version
if version:
return version.files.filter(binary_components=True).exists()
return False
def get_icon_dir(self):
return os.path.join(helpers.user_media_path('addon_icons'),
'%s' % (self.id / 1000))
def get_icon_url(self, size, use_default=True):
"""
Returns the addon's icon url according to icon_type.
If it's a persona, it will return the icon_url of the associated
Persona instance.
If it's a theme and there is no icon set, it will return the default
theme icon.
If it's something else, it will return the default add-on icon, unless
use_default is False, in which case it will return None.
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over
if (size not in amo.ADDON_ICON_SIZES and
size >= amo.ADDON_ICON_SIZES[0]):
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if self.type == amo.ADDON_PERSONA:
return self.persona.icon_url
if not self.icon_type:
if self.type == amo.ADDON_THEME:
icon = amo.ADDON_ICONS[amo.ADDON_THEME]
return "%simg/icons/%s" % (settings.STATIC_URL, icon)
else:
if not use_default:
return None
return self.get_default_icon_url(size)
elif icon_type_split[0] == 'icon':
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
icon_type_split[1],
size
)
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = '/'.join([
split_id.group(2) or '0',
'{0}-{1}.png?modified={2}'.format(self.id, size, modified),
])
return helpers.user_media_url('addon_icons') + path
def get_default_icon_url(self, size):
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL, 'default', size
)
@write
def update_status(self, ignore_version=None):
self.reload()
if (self.status in [amo.STATUS_NULL, amo.STATUS_DELETED] or
self.is_disabled or self.is_persona()):
self.update_version(ignore=ignore_version)
return
versions = self.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
status = None
if not versions.exists():
status = amo.STATUS_NULL
reason = 'no listed versions'
elif not versions.filter(
files__status__in=amo.VALID_FILE_STATUSES).exists():
status = amo.STATUS_NULL
reason = 'no listed version with valid file'
elif (self.status == amo.STATUS_PUBLIC and
not versions.filter(files__status=amo.STATUS_PUBLIC).exists()):
if versions.filter(
files__status=amo.STATUS_AWAITING_REVIEW).exists():
status = amo.STATUS_NOMINATED
reason = 'only an unreviewed file'
else:
status = amo.STATUS_NULL
reason = 'no reviewed files'
elif self.status == amo.STATUS_PUBLIC:
latest_version = self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (latest_version and latest_version.has_files and
(latest_version.all_files[0].status ==
amo.STATUS_AWAITING_REVIEW)):
# Addon is public, but its latest file is not (it's the case on
# a new file upload). So, call update, to trigger watch_status,
# which takes care of setting nomination time when needed.
status = self.status
reason = 'triggering watch_status'
if status is not None:
log.info('Changing add-on status [%s]: %s => %s (%s).'
% (self.id, self.status, status, reason))
self.update(status=status)
activity.log_create(amo.LOG.CHANGE_STATUS,
self.get_status_display(), self)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
all_ids = set(filter(None, (a._current_version_id for a in addons)))
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.debug('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
version.addon = addon
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = (UserProfile.objects.no_cache()
.filter(addons__in=addons, addonuser__listed=True)
.extra(select={'addon_id': 'addons_users.addon_id',
'position': 'addons_users.position'}))
qs = sorted(qs, key=lambda u: (u.addon_id, u.position))
for addon_id, users in itertools.groupby(qs, key=lambda u: u.addon_id):
addon_dict[addon_id].listed_authors = list(users)
# FIXME: set listed_authors to empty list on addons without listed
# authors.
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
for addon, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon].all_previews = list(previews)
# FIXME: set all_previews to empty list on addons without previews.
@staticmethod
def attach_static_categories(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = AddonCategory.objects.values_list(
'addon', 'category').filter(addon__in=addon_dict)
qs = sorted(qs, key=lambda x: (x[0], x[1]))
for addon_id, cats_iter in itertools.groupby(qs, key=lambda x: x[0]):
# The second value of each tuple in cats_iter are the category ids
# we want.
addon_dict[addon_id].category_ids = [c[1] for c in cats_iter]
addon_dict[addon_id].all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id
in addon_dict[addon_id].category_ids
if cat_id in CATEGORIES_BY_ID]
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = {a.id: a for a in addons}
# Attach categories. This needs to be done before separating addons
# from personas, because Personas need categories for the theme_data
# JSON dump, rest of the add-ons need the first category to be
# displayed in detail page / API.
Addon.attach_static_categories(addons, addon_dict=addon_dict)
personas = [a for a in addons if a.type == amo.ADDON_PERSONA]
addons = [a for a in addons if a.type != amo.ADDON_PERSONA]
# Set _current_version.
Addon.attach_related_versions(addons, addon_dict=addon_dict)
# Attach listed authors.
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
# Persona-specific stuff
for persona in Persona.objects.no_cache().filter(addon__in=personas):
addon = addon_dict[persona.addon_id]
addon.persona = persona
addon.weekly_downloads = persona.popularity
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
return addon_dict
@property
def show_beta(self):
return self.status == amo.STATUS_PUBLIC and self.current_beta_version
def show_adu(self):
return self.type != amo.ADDON_SEARCH
@cached_property
def current_beta_version(self):
"""Retrieves the latest version of an addon, in the beta channel."""
versions = self.versions.filter(files__status=amo.STATUS_BETA)[:1]
if versions:
return versions[0]
@property
def icon_url(self):
return self.get_icon_url(32)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
if app:
qs = Addon.objects.listed(app)
else:
qs = Addon.objects.valid()
return (qs.exclude(id=self.id)
.filter(addonuser__listed=True,
authors__in=self.listed_authors)
.distinct())
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE,
app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self.all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.STATIC_URL + '/img/icons/no-preview.png'
def can_request_review(self):
"""Return whether an add-on can request a review or not."""
if (self.is_disabled or
self.status in (amo.STATUS_PUBLIC,
amo.STATUS_NOMINATED,
amo.STATUS_DELETED)):
return False
latest_version = self.find_latest_version(
amo.RELEASE_CHANNEL_LISTED, exclude=(amo.STATUS_BETA,))
return latest_version is not None and latest_version.files.exists()
def is_persona(self):
return self.type == amo.ADDON_PERSONA
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_ADDON_STATUSES
def is_public(self):
return self.status == amo.STATUS_PUBLIC and not self.disabled_by_user
def has_complete_metadata(self, has_listed_versions=None):
"""See get_required_metadata for has_listed_versions details."""
return all(self.get_required_metadata(
has_listed_versions=has_listed_versions))
def get_required_metadata(self, has_listed_versions=None):
"""If has_listed_versions is not specified this method will return the
current (required) metadata (truthy values if present) for this Addon.
If has_listed_versions is specified then the method will act as if
Addon.has_listed_versions() returns that value. Used to predict if the
addon will require extra metadata before a version is created."""
if has_listed_versions is None:
has_listed_versions = self.has_listed_versions()
if not has_listed_versions:
# Add-ons with only unlisted versions have no required metadata.
return []
# We need to find out if the add-on has a license set. We prefer to
# check the current_version first because that's what would be used for
# public pages, but if there isn't any listed version will do.
version = self.current_version or self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED, exclude=())
return [
self.all_categories,
self.summary,
(version and version.license),
]
def should_redirect_to_submit_flow(self):
return (
self.status == amo.STATUS_NULL and
not self.has_complete_metadata() and
self.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED))
def is_pending(self):
return self.status == amo.STATUS_PENDING
def is_rejected(self):
return self.status == amo.STATUS_REJECTED
def can_be_deleted(self):
return not self.is_deleted
def has_listed_versions(self):
return self.versions.filter(
channel=amo.RELEASE_CHANNEL_LISTED).exists()
def has_unlisted_versions(self):
return self.versions.filter(
channel=amo.RELEASE_CHANNEL_UNLISTED).exists()
@classmethod
def featured_random(cls, app, lang):
return get_featured_ids(app, lang)
@property
def requires_restart(self):
"""Whether the add-on current version requires a browser restart to
work."""
return self.current_version and self.current_version.requires_restart
def is_featured(self, app, lang=None):
"""Is add-on globally featured for this app and language?"""
if app:
return self.id in get_featured_ids(app, lang)
def has_full_profile(self):
"""Is developer profile public (completed)?"""
return self.the_reason and self.the_future
def has_profile(self):
"""Is developer profile (partially or entirely) completed?"""
return self.the_reason or self.the_future
@cached_property
def tags_partitioned_by_developer(self):
"""Returns a tuple of developer tags and user tags for this addon."""
tags = self.tags.not_denied()
if self.is_persona:
return [], tags
user_tags = tags.exclude(addon_tags__user__in=self.listed_authors)
dev_tags = tags.exclude(id__in=[t.id for t in user_tags])
return dev_tags, user_tags
@cached_property
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
# Search providers and personas don't list their supported apps.
if self.type in amo.NO_COMPAT:
return dict((app, None) for app in
amo.APP_TYPE_SUPPORT[self.type])
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version of each app).
"""
return [app for app, ver in self.compatible_apps.items() if ver and
version_int(ver.max.version) < version_int(app.latest_version)]
def has_author(self, user, roles=None):
"""True if ``user`` is an author with any of the specified ``roles``.
``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If
not specified, has_author will return true if the user has any role.
"""
if user is None or user.is_anonymous():
return False
if roles is None:
roles = dict(amo.AUTHOR_CHOICES).keys()
return AddonUser.objects.filter(addon=self, user=user,
role__in=roles).exists()
@property
def takes_contributions(self):
return (self.status == amo.STATUS_PUBLIC and
self.wants_contributions and
(self.paypal_id or self.charity_id))
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__files__datestatuschanged')
public = (
Addon.objects.no_cache().filter(
status=amo.STATUS_PUBLIC,
versions__files__status=amo.STATUS_PUBLIC)
.exclude(type=amo.ADDON_PERSONA)
.values('id').annotate(last_updated=status_change))
stati = amo.VALID_ADDON_STATUSES
exp = (Addon.objects.no_cache().exclude(status__in=stati)
.filter(versions__files__status__in=amo.VALID_FILE_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__files__created')))
personas = (Addon.objects.no_cache().filter(type=amo.ADDON_PERSONA)
.extra(select={'last_updated': 'created'}))
return dict(public=public, exp=exp, personas=personas)
@cached_property
def all_categories(self):
return filter(
None, [cat.to_static_category() for cat in self.categories.all()])
@cached_property
def all_previews(self):
return list(self.get_previews())
def get_previews(self):
"""Exclude promo graphics."""
return self.previews.exclude(position=-1)
@property
def app_categories(self):
app_cats = {}
categories = sorted_groupby(
sorted(self.all_categories, key=attrgetter('weight', 'name')),
key=lambda x: amo.APP_IDS.get(x.application))
for app, cats in categories:
app_cats[app] = list(cats)
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def get_localepicker(self):
"""For language packs, gets the contents of localepicker."""
if (self.type == amo.ADDON_LPAPP and
self.status == amo.STATUS_PUBLIC and
self.current_version):
files = (self.current_version.files
.filter(platform=amo.PLATFORM_ANDROID.id))
try:
return unicode(files[0].get_localepicker(), 'utf-8')
except IndexError:
pass
return ''
def can_review(self, user):
return not(user and self.has_author(user))
@property
def all_dependencies(self):
"""Return all the (valid) add-ons this add-on depends on."""
return list(self.dependencies.valid().all()[:3])
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
viewer=(not require_owner),
ignore_disabled=ignore_disabled)
@property
def feature_compatibility(self):
try:
feature_compatibility = self.addonfeaturecompatibility
except AddonFeatureCompatibility.DoesNotExist:
# If it does not exist, return a blank one, no need to create. It's
# the caller responsibility to create when needed to avoid
# unexpected database writes.
feature_compatibility = AddonFeatureCompatibility()
return feature_compatibility
def should_show_permissions(self, version=None):
version = version or self.current_version
return (self.type == amo.ADDON_EXTENSION and
version and version.all_files[0] and
(not version.all_files[0].is_webextension or
version.all_files[0].webext_permissions))
dbsignals.pre_save.connect(save_signal, sender=Addon,
dispatch_uid='addon_translations')
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
@receiver(dbsignals.post_save, sender=Addon,
dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for review and re-requests review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
latest_version = instance.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (new_status not in amo.VALID_ADDON_STATUSES or
not new_status or not latest_version):
return
if old_status not in amo.UNREVIEWED_ADDON_STATUSES:
# New: will (re)set nomination only if it's None.
latest_version.reset_nomination_time()
elif latest_version.has_files:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, latest_version)
@Addon.on_change
def watch_disabled(old_attr=None, new_attr=None, instance=None, sender=None,
**kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
attrs = dict((k, v) for k, v in old_attr.items()
if k in ('disabled_by_user', 'status'))
if Addon(**attrs).is_disabled and not instance.is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.unhide_disabled_file()
if instance.is_disabled and not Addon(**attrs).is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.hide_disabled_file()
@Addon.on_change
def watch_developer_notes(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
whiteboard_changed = (
new_attr.get('whiteboard') and
old_attr.get('whiteboard') != new_attr.get('whiteboard'))
developer_comments_changed = (new_attr.get('_developer_comments_cache') and
old_attr.get('_developer_comments_cache') !=
new_attr.get('_developer_comments_cache'))
if whiteboard_changed or developer_comments_changed:
instance.versions.update(has_info_request=False)
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = dict((a.id, a) for a in addons)
qs = (Tag.objects.not_denied().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class Persona(caching.CachingMixin, models.Model):
"""Personas-specific additions to the add-on model."""
STATUS_CHOICES = amo.STATUS_CHOICES_PERSONA
addon = models.OneToOneField(Addon, null=True)
persona_id = models.PositiveIntegerField(db_index=True)
# name: deprecated in favor of Addon model's name field
# description: deprecated, ditto
header = models.CharField(max_length=64, null=True)
footer = models.CharField(max_length=64, null=True)
accentcolor = models.CharField(max_length=10, null=True)
textcolor = models.CharField(max_length=10, null=True)
author = models.CharField(max_length=255, null=True)
display_username = models.CharField(max_length=255, null=True)
submit = models.DateTimeField(null=True)
approve = models.DateTimeField(null=True)
movers = models.FloatField(null=True, db_index=True)
popularity = models.IntegerField(null=False, default=0, db_index=True)
license = models.PositiveIntegerField(
choices=amo.PERSONA_LICENSES_CHOICES, null=True, blank=True)
# To spot duplicate submissions.
checksum = models.CharField(max_length=64, blank=True, default='')
dupe_persona = models.ForeignKey('self', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'personas'
def __unicode__(self):
return unicode(self.addon.name)
def is_new(self):
return self.persona_id == 0
def _image_url(self, filename):
host = helpers.user_media_url('addons')
image_url = posixpath.join(host, str(self.addon.id), filename or '')
# TODO: Bust the cache on the hash of the image contents or something.
if self.addon.modified is not None:
modified = int(time.mktime(self.addon.modified.timetuple()))
else:
modified = 0
return '%s?%s' % (image_url, modified)
def _image_path(self, filename):
return os.path.join(helpers.user_media_path('addons'),
str(self.addon.id), filename)
@cached_property
def thumb_url(self):
"""
Handles deprecated GetPersonas URL.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview.jpg')
@cached_property
def thumb_path(self):
"""
Handles deprecated GetPersonas path.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview.jpg')
@cached_property
def icon_url(self):
"""URL to personas square preview."""
if self.is_new():
return self._image_url('icon.png')
else:
return self._image_url('preview_small.jpg')
@cached_property
def icon_path(self):
"""Path to personas square preview."""
if self.is_new():
return self._image_path('icon.png')
else:
return self._image_path('preview_small.jpg')
@cached_property
def preview_url(self):
"""URL to Persona's big, 680px, preview."""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview_large.jpg')
@cached_property
def preview_path(self):
"""Path to Persona's big, 680px, preview."""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview_large.jpg')
@cached_property
def header_url(self):
return self._image_url(self.header)
@cached_property
def footer_url(self):
return self.footer and self._image_url(self.footer) or ''
@cached_property
def header_path(self):
return self._image_path(self.header)
@cached_property
def footer_path(self):
return self.footer and self._image_path(self.footer) or ''
@cached_property
def update_url(self):
locale = settings.LANGUAGE_URL_MAP.get(trans_real.get_language())
return settings.NEW_PERSONAS_UPDATE_URL % {
'locale': locale or settings.LANGUAGE_CODE,
'id': self.addon.id
}
@cached_property
def theme_data(self):
"""Theme JSON Data for Browser/extension preview."""
def hexcolor(color):
return '#%s' % color
addon = self.addon
return {
'id': unicode(self.addon.id), # Personas dislikes ints
'name': unicode(addon.name),
'accentcolor': hexcolor(self.accentcolor),
'textcolor': hexcolor(self.textcolor),
'category': (unicode(addon.all_categories[0].name) if
addon.all_categories else ''),
# TODO: Change this to be `addons_users.user.display_name`.
'author': self.display_username,
'description': unicode(addon.description),
'header': self.header_url,
'footer': self.footer_url or '',
'headerURL': self.header_url,
'footerURL': self.footer_url or '',
'previewURL': self.preview_url,
'iconURL': self.icon_url,
'updateURL': self.update_url,
'detailURL': helpers.absolutify(self.addon.get_url_path()),
'version': '1.0'
}
@property
def json_data(self):
"""Persona JSON Data for Browser/extension preview."""
return json.dumps(self.theme_data,
separators=(',', ':'), cls=AMOJSONEncoder)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
qs = (Addon.objects.valid()
.exclude(id=self.addon.id)
.filter(type=amo.ADDON_PERSONA))
return (qs.filter(addonuser__listed=True,
authors__in=self.addon.listed_authors)
.distinct())
@cached_property
def listed_authors(self):
return self.addon.listed_authors
class AddonCategory(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
category = models.ForeignKey('Category')
feature = models.BooleanField(default=False)
feature_locales = models.CharField(max_length=255, default='', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'addons_categories'
unique_together = ('addon', 'category')
@classmethod
def creatured_random(cls, category, lang):
return get_creatured_ids(category, lang)
class AddonUser(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = UserForeignKey()
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_(u'Listed'), default=True)
position = models.IntegerField(default=0)
objects = caching.CachingManager()
def __init__(self, *args, **kwargs):
super(AddonUser, self).__init__(*args, **kwargs)
self._original_role = self.role
self._original_user_id = self.user_id
class Meta:
db_table = 'addons_users'
class AddonDependency(models.Model):
addon = models.ForeignKey(Addon, related_name='addons_dependencies')
dependent_addon = models.ForeignKey(Addon, related_name='dependent_on')
class Meta:
db_table = 'addons_dependencies'
unique_together = ('addon', 'dependent_addon')
class AddonFeatureCompatibility(ModelBase):
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE)
e10s = models.PositiveSmallIntegerField(
choices=amo.E10S_COMPATIBILITY_CHOICES, default=amo.E10S_UNKNOWN)
def __unicode__(self):
return unicode(self.addon) if self.pk else u""
def get_e10s_classname(self):
return amo.E10S_COMPATIBILITY_CHOICES_API[self.e10s]
class AddonApprovalsCounter(ModelBase):
"""Model holding a counter of the number of times a listed version
belonging to an add-on has been approved by a human. Reset everytime a
listed version is auto-approved for this add-on."""
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE)
counter = models.PositiveIntegerField(default=0)
last_human_review = models.DateTimeField(null=True)
def __unicode__(self):
return u'%s: %d' % (unicode(self.pk), self.counter) if self.pk else u''
@classmethod
def increment_for_addon(cls, addon):
"""
Increment approval counter for the specified addon, setting the last
human review date to now. If an AddonApprovalsCounter already exists,
it updates it, otherwise it creates and saves a new instance.
"""
data = {
'counter': 1,
'last_human_review': datetime.now(),
}
obj, created = cls.objects.get_or_create(
addon=addon, defaults=data)
if not created:
data['counter'] = F('counter') + 1
obj.update(**data)
return obj
@classmethod
def reset_for_addon(cls, addon):
"""
Reset the approval counter for the specified addon.
"""
obj, created = cls.objects.update_or_create(
addon=addon, defaults={'counter': 0})
return obj
class DeniedGuid(ModelBase):
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'denied_guids'
def __unicode__(self):
return self.guid
class Category(OnChangeMixin, ModelBase):
# Old name translations, we now have constants translated via gettext, but
# this is for backwards-compatibility, for categories which have a weird
# type/application/slug combo that is not in the constants.
db_name = TranslatedField(db_column='name')
slug = SlugField(max_length=50,
help_text='Used in Category URLs.')
type = models.PositiveIntegerField(db_column='addontype_id',
choices=do_dictsort(amo.ADDON_TYPE))
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
null=True, blank=True,
db_column='application_id')
count = models.IntegerField('Addon count', default=0)
weight = models.IntegerField(
default=0, help_text='Category weight used in sort ordering')
misc = models.BooleanField(default=False)
addons = models.ManyToManyField(Addon, through='AddonCategory')
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
@property
def name(self):
try:
value = CATEGORIES[self.application][self.type][self.slug].name
except KeyError:
# If we can't find the category in the constants dict, fall back
# to the db field.
value = self.db_name
return unicode(value)
def __unicode__(self):
return unicode(self.name)
def get_url_path(self):
try:
type = amo.ADDON_SLUGS[self.type]
except KeyError:
type = amo.ADDON_SLUGS[amo.ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
def to_static_category(self):
"""Return the corresponding StaticCategory instance from a Category."""
try:
staticcategory = CATEGORIES[self.application][self.type][self.slug]
except KeyError:
staticcategory = None
return staticcategory
@classmethod
def from_static_category(cls, static_category):
"""Return a Category instance created from a StaticCategory.
Does not save it into the database. Useful in tests."""
return cls(**static_category.__dict__)
dbsignals.pre_save.connect(save_signal, sender=Category,
dispatch_uid='category_translations')
class Preview(ModelBase):
addon = models.ForeignKey(Addon, related_name='previews')
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = JSONField(max_length=25, default={})
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
def _image_url(self, url_template):
if self.modified is not None:
modified = int(time.mktime(self.modified.timetuple()))
else:
modified = 0
args = [self.id / 1000, self.id, modified]
return url_template % tuple(args)
def _image_path(self, url_template):
args = [self.id / 1000, self.id]
return url_template % tuple(args)
def as_dict(self, src=None):
d = {'full': urlparams(self.image_url, src=src),
'thumbnail': urlparams(self.thumbnail_url, src=src),
'caption': unicode(self.caption)}
return d
@property
def thumbnail_url(self):
template = (
helpers.user_media_url('previews') +
'thumbs/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def image_url(self):
template = (
helpers.user_media_url('previews') +
'full/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def thumbnail_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'thumbs',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def image_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'full',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def thumbnail_size(self):
return self.sizes.get('thumbnail', []) if self.sizes else []
@property
def image_size(self):
return self.sizes.get('image', []) if self.sizes else []
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
def delete_preview_files(sender, instance, **kw):
"""On delete of the Preview object from the database, unlink the image
and thumb on the file system """
for filename in [instance.image_path, instance.thumbnail_path]:
if storage.exists(filename):
log.info('Removing filename: %s for preview: %s'
% (filename, instance.pk))
storage.delete(filename)
models.signals.post_delete.connect(delete_preview_files,
sender=Preview,
dispatch_uid='delete_preview_files')
class AppSupport(ModelBase):
"""Cache to tell us if an add-on's current version supports an app."""
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min = models.BigIntegerField("Minimum app version", null=True)
max = models.BigIntegerField("Maximum app version", null=True)
class Meta:
db_table = 'appsupport'
unique_together = ('addon', 'app')
class Charity(ModelBase):
name = models.CharField(max_length=255)
url = models.URLField()
paypal = models.CharField(max_length=255)
class Meta:
db_table = 'charities'
@property
def outgoing_url(self):
if self.pk == amo.FOUNDATION_ORG:
return self.url
return get_outgoing_url(unicode(self.url))
class DeniedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_denied_slug'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'frozen_addons'
def __unicode__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class CompatOverride(ModelBase):
"""Helps manage compat info for add-ons not hosted on AMO."""
name = models.CharField(max_length=255, blank=True, null=True)
guid = models.CharField(max_length=255, unique=True)
addon = models.ForeignKey(Addon, blank=True, null=True,
help_text='Fill this out to link an override '
'to a hosted add-on')
class Meta:
db_table = 'compat_override'
unique_together = ('addon', 'guid')
def save(self, *args, **kw):
if not self.addon:
qs = Addon.objects.filter(guid=self.guid)
if qs:
self.addon = qs[0]
return super(CompatOverride, self).save(*args, **kw)
def __unicode__(self):
if self.addon:
return unicode(self.addon)
elif self.name:
return '%s (%s)' % (self.name, self.guid)
else:
return self.guid
def is_hosted(self):
"""Am I talking about an add-on on AMO?"""
return bool(self.addon_id)
@staticmethod
def transformer(overrides):
if not overrides:
return
id_map = dict((o.id, o) for o in overrides)
qs = CompatOverrideRange.objects.filter(compat__in=id_map)
for compat_id, ranges in sorted_groupby(qs, 'compat_id'):
id_map[compat_id].compat_ranges = list(ranges)
# May be filled in by a transformer for performance.
@cached_property
def compat_ranges(self):
return list(self._compat_ranges.all())
def collapsed_ranges(self):
"""Collapse identical version ranges into one entity."""
Range = collections.namedtuple('Range', 'type min max apps')
AppRange = collections.namedtuple('AppRange', 'app min max')
rv = []
def sort_key(x):
return (x.min_version, x.max_version, x.type)
for key, compats in sorted_groupby(self.compat_ranges, key=sort_key):
compats = list(compats)
first = compats[0]
item = Range(first.override_type(), first.min_version,
first.max_version, [])
for compat in compats:
app = AppRange(amo.APPS_ALL[compat.app],
compat.min_app_version, compat.max_app_version)
item.apps.append(app)
rv.append(item)
return rv
OVERRIDE_TYPES = (
(0, 'Compatible (not supported)'),
(1, 'Incompatible'),
)
class CompatOverrideRange(ModelBase):
"""App compatibility for a certain version range of a RemoteAddon."""
compat = models.ForeignKey(CompatOverride, related_name='_compat_ranges')
type = models.SmallIntegerField(choices=OVERRIDE_TYPES, default=1)
min_version = models.CharField(
max_length=255, default='0',
help_text=u'If not "0", version is required to exist for the override'
u' to take effect.')
max_version = models.CharField(
max_length=255, default='*',
help_text=u'If not "*", version is required to exist for the override'
u' to take effect.')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, default='0')
max_app_version = models.CharField(max_length=255, default='*')
class Meta:
db_table = 'compat_override_range'
def override_type(self):
"""This is what Firefox wants to see in the XML output."""
return {0: 'compatible', 1: 'incompatible'}[self.type]
class IncompatibleVersions(ModelBase):
"""
Denormalized table to join against for fast compat override filtering.
This was created to be able to join against a specific version record since
the CompatOverrideRange can be wildcarded (e.g. 0 to *, or 1.0 to 1.*), and
addon versioning isn't as consistent as Firefox versioning to trust
`version_int` in all cases. So extra logic needed to be provided for when
a particular version falls within the range of a compatibility override.
"""
version = models.ForeignKey(Version, related_name='+')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, blank=True, default='0')
max_app_version = models.CharField(max_length=255, blank=True, default='*')
min_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
max_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
class Meta:
db_table = 'incompatible_versions'
def __unicode__(self):
return u'<IncompatibleVersion V:%s A:%s %s-%s>' % (
self.version.id, self.app.id, self.min_app_version,
self.max_app_version)
def save(self, *args, **kw):
self.min_app_version_int = version_int(self.min_app_version)
self.max_app_version_int = version_int(self.max_app_version)
return super(IncompatibleVersions, self).save(*args, **kw)
def update_incompatible_versions(sender, instance, **kw):
if not instance.compat.addon_id:
return
if not instance.compat.addon.type == amo.ADDON_EXTENSION:
return
from . import tasks
versions = instance.compat.addon.versions.values_list('id', flat=True)
for chunk in chunked(versions, 50):
tasks.update_incompatible_appversions.delay(chunk)
models.signals.post_save.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
models.signals.post_delete.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=Addon,
dispatch_uid='track_new_addon_status')
@Addon.on_change
def track_status_change(old_attr=None, new_attr=None, **kw):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr('addon_status_change.all.status_{}'
.format(addon.status))
| bsd-3-clause | -8,762,713,141,264,077,000 | 36.610726 | 79 | 0.601608 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/telestream_cloud_qc/models/sps_pps_test.py | 1 | 4039 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class SpsPpsTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""SpsPpsTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._reject_on_error = None
self._checked = None
self.discriminator = None
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def reject_on_error(self):
"""Gets the reject_on_error of this SpsPpsTest. # noqa: E501
:return: The reject_on_error of this SpsPpsTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this SpsPpsTest.
:param reject_on_error: The reject_on_error of this SpsPpsTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this SpsPpsTest. # noqa: E501
:return: The checked of this SpsPpsTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this SpsPpsTest.
:param checked: The checked of this SpsPpsTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SpsPpsTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SpsPpsTest):
return True
return self.to_dict() != other.to_dict()
| mit | -5,409,016,393,252,897,000 | 26.47619 | 104 | 0.559544 | false |
darkfeline/dantalian | src/dantalian/base.py | 1 | 5280 | # Copyright (C) 2015 Allen Li
#
# This file is part of Dantalian.
#
# Dantalian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dantalian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dantalian. If not, see <http://www.gnu.org/licenses/>.
"""This module defines basic interaction with a semantic filesystem.
This module primarily extends link(), unlink(), and rename() to work as though
they support directory linking. The rest of the functions exist as
implementation details to manage directory linking with symlinks and dtags.
"""
from itertools import chain
import os
import posixpath
from dantalian import dtags
from dantalian import oserrors
from dantalian import pathlib
from dantalian import tagnames
def link(rootpath, src, dst):
"""Link src to dst.
Args:
rootpath: Path for tagname conversions.
src: Source path.
dst: Destination path.
"""
if posixpath.isdir(src):
src = pathlib.readlink(src)
os.symlink(posixpath.abspath(src), dst)
dtags.add_tag(src, tagnames.path2tag(rootpath, dst))
else:
os.link(src, dst)
def unlink(rootpath, path):
"""Unlink given path.
If the target is a directory without any other links, raise OSError.
"""
target = path
# We unlink the target. However, if it is a directory, we want to swap it
# out for one of its symlinks, then unlink the symlink. If the directory
# doesn't have any tags, then we fail.
if posixpath.isdir(target):
if not posixpath.islink(target):
tags = dtags.list_tags(target)
if not tags:
raise oserrors.is_a_directory(target)
swap_candidate = tagnames.tag2path(rootpath, tags[0])
swap_dir(rootpath, swap_candidate)
assert posixpath.islink(target)
dtags.remove_tag(target, tagnames.path2tag(rootpath, target))
os.unlink(target)
def rename(rootpath, src, dst):
"""Rename src to dst and fix tags for directories.
Doesn't overwrite an existing file at dst.
Args:
rootpath: Rootpath for tagname conversions.
src: Source path.
dst: Destination path.
"""
link(rootpath, src, dst)
unlink(rootpath, src)
def swap_dir(rootpath, path):
"""Swap a symlink with its target directory.
Args:
rootpath: Rootpath for tag conversions.
path: Path of target symlink.
"""
target = path
if posixpath.islink(target) and posixpath.isdir(target):
here = target
there = pathlib.readlink(target)
# here is the symlink
# there is the dir
here_tag = tagnames.path2tag(rootpath, here)
there_tag = tagnames.path2tag(rootpath, there)
dtags.remove_tag(here, here_tag)
dtags.add_tag(here, there_tag)
os.unlink(here)
# here is now nothing
# there is now the dir
os.rename(there, here)
# here is now the dir
# there is now nothing
os.symlink(here, there)
else:
raise ValueError('{} is not a symlink to a directory'.format(target))
def list_links(top, path):
"""List all links to the target file.
Args:
top: Path to top of directory tree to search.
path: Path of file.
Returns:
Generator yielding paths.
"""
target = path
for (dirpath, dirnames, filenames) in os.walk(top):
for name in chain(dirnames, filenames):
filepath = posixpath.join(dirpath, name)
if posixpath.samefile(target, filepath):
yield filepath
def save_dtags(rootpath, top, dirpath):
"""Save symlinks to a directory's dtags, overwriting it.
Args:
rootpath: Path for tag conversions.
top: Path of directory in which to search.
dirpath: Path of directory whose dtags to update.
"""
dirpath = pathlib.readlink(dirpath)
tags = [tagnames.path2tag(rootpath, path)
for path in list_links(top, dirpath)]
dir_tagname = tagnames.path2tag(rootpath, dirpath)
tags = [tagname
for tagname in tags
if tagname != dir_tagname]
dtags.set_tags(dirpath, tags)
def load_dtags(rootpath, dirpath):
"""Create symlinks for a directory using its dtags."""
tags = dtags.list_tags(dirpath)
dirpath = pathlib.readlink(dirpath)
target = posixpath.abspath(dirpath)
for tagname in tags:
dstpath = tagnames.tag2path(rootpath, tagname)
os.symlink(target, dstpath)
def unload_dtags(rootpath, dirpath):
"""Remove symlinks using a directory's dtags."""
tags = dtags.list_tags(dirpath)
dirpath = pathlib.readlink(dirpath)
for tagname in tags:
tagpath = tagnames.tag2path(rootpath, tagname)
if posixpath.samefile(dirpath, tagpath):
os.unlink(tagpath)
| gpl-3.0 | 4,279,770,812,570,900,500 | 29.697674 | 78 | 0.659848 | false |
onitake/Uranium | UM/Scene/ToolHandle.py | 1 | 3897 | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from . import SceneNode
from UM.Resources import Resources
from UM.Application import Application
from UM.Math.Color import Color
from UM.Math.Vector import Vector
from UM.Scene.Selection import Selection
from UM.View.GL.OpenGL import OpenGL
from UM.View.RenderBatch import RenderBatch
## A tool handle is a object in the scene that gives queues for what the tool it is
# 'paired' with can do. ToolHandles are used for translation, rotation & scale handles.
# They can also be used as actual objects to interact with (in the case of translation,
# pressing one arrow of the toolhandle locks the translation in that direction)
class ToolHandle(SceneNode.SceneNode):
NoAxis = 1
XAxis = 2
YAxis = 3
ZAxis = 4
AllAxis = 5
DisabledColor = Color(0.5, 0.5, 0.5, 1.0)
XAxisColor = Color(1.0, 0.0, 0.0, 1.0)
YAxisColor = Color(0.0, 0.0, 1.0, 1.0)
ZAxisColor = Color(0.0, 1.0, 0.0, 1.0)
AllAxisColor = Color(1.0, 1.0, 1.0, 1.0)
def __init__(self, parent = None):
super().__init__(parent)
self._scene = Application.getInstance().getController().getScene()
self._solid_mesh = None
self._line_mesh = None
self._selection_mesh = None
self._shader = None
self._previous_dist = None
self._active_axis = None
self._auto_scale = True
self.setCalculateBoundingBox(False)
Selection.selectionCenterChanged.connect(self._onSelectionCenterChanged)
def getLineMesh(self):
return self._line_mesh
def setLineMesh(self, mesh):
self._line_mesh = mesh
self.meshDataChanged.emit(self)
def getSolidMesh(self):
return self._solid_mesh
def setSolidMesh(self, mesh):
self._solid_mesh = mesh
self.meshDataChanged.emit(self)
def getSelectionMesh(self):
return self._selection_mesh
def setSelectionMesh(self, mesh):
self._selection_mesh = mesh
self.meshDataChanged.emit(self)
def getMaterial(self):
return self._shader
def render(self, renderer):
if not self._shader:
self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "toolhandle.shader"))
if self._auto_scale:
camera_position = self._scene.getActiveCamera().getWorldPosition()
dist = (camera_position - self.getWorldPosition()).length()
scale = dist / 400
self.setScale(Vector(scale, scale, scale))
if self._line_mesh:
renderer.queueNode(self, mesh = self._line_mesh, mode = RenderBatch.RenderMode.Lines, overlay = True, shader = self._shader)
if self._solid_mesh:
renderer.queueNode(self, mesh = self._solid_mesh, overlay = True, shader = self._shader)
return True
def getSelectionMap(self):
return {
self.XAxisColor: self.XAxis,
self.YAxisColor: self.YAxis,
self.ZAxisColor: self.ZAxis,
self.AllAxisColor: self.AllAxis
}
def setActiveAxis(self, axis):
if axis == self._active_axis or not self._shader:
return
if axis:
self._shader.setUniformValue("u_activeColor", self._axisColorMap[axis])
else:
self._shader.setUniformValue("u_activeColor", self.DisabledColor)
self._active_axis = axis
self._scene.sceneChanged.emit(self)
@classmethod
def isAxis(cls, value):
return value in cls._axisColorMap
_axisColorMap = {
NoAxis: DisabledColor,
XAxis: XAxisColor,
YAxis: YAxisColor,
ZAxis: ZAxisColor,
AllAxis: AllAxisColor
}
def _onSelectionCenterChanged(self):
self.setPosition(Selection.getSelectionCenter())
| agpl-3.0 | -6,628,176,053,176,245,000 | 30.176 | 136 | 0.641519 | false |
AngryDevelopersLLC/res-scheduler | setup.py | 1 | 3088 | """
Resystem Scheduling Service.
Released under New BSD License.
Copyright © 2015, Vadim Markovtsev :: Angry Developers LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Angry Developers LLC nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VADIM MARKOVTSEV BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from setuptools import setup
import os
def parse_requirements():
path = os.path.join(os.path.dirname(__file__), "requirements.txt")
reqs = []
with open(path, "r") as fin:
for r in fin.read().split("\n"):
r = r.strip()
if r.startswith("#") or not r:
continue
if r.startswith("git+"):
print("Warning: git dependencies cannot be used in setuptools "
"(%s)" % r)
continue
if not r.startswith("-r"):
reqs.append(r)
return reqs
setup(
name="res-scheduling",
description="Resystem Scheduling Service",
version="1.0.2",
license="New BSD",
author="Vadim Markovtsev",
author_email="[email protected]",
url="https://github.com/AngryDevelopersLLC/res-scheduler",
download_url='https://github.com/AngryDevelopersLLC/res-scheduler',
packages=["res.scheduling"],
install_requires=parse_requirements(),
package_data={"": [
'res/scheduling/requirements/base.txt',
'res/scheduling/res_scheduling.service',
'res/scheduling/run.sh']},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| bsd-3-clause | -7,938,679,896,831,706,000 | 40.716216 | 79 | 0.688371 | false |
cloudmesh/vagrant | cloudmesh_vagrant/cm_vbox.py | 1 | 5612 | from __future__ import print_function
from docopt import docopt
import cloudmesh_vagrant as vagrant
from cloudmesh_client.common.dotdict import dotdict
from pprint import pprint
from cloudmesh_client.common.Printer import Printer
from cloudmesh_client.common.Shell import Shell
import sys
import os
from cloudmesh_vagrant.version import __version__
# pprint (vagrant.vm.list())
# vagrant.vm.execute("w2", "uname")
# pprint (vagrant.image.list())
def defaults():
"""
default values
:return: a number of default values for memory, image, and script
:rtype: dotdict
"""
d = dotdict()
d.memory = 1024
# d.image = "ubuntu/xenial64"
d.image = "ubuntu/trusty64"
d.port = 8080
d.script = None
return d
def _convert(lst, id="name"):
d = {}
for entry in lst:
d[entry[id]] = entry
return d
def _LIST_PRINT(l, output, order=None):
if output in ["yaml", "dict", "json"]:
l = _convert(l)
result = Printer.write(l,
order=order,
output=output)
if output in ["table", "yaml", "json", "csv"]:
print(result)
else:
pprint(result)
def do_vbox(argv):
"""
::
Usage:
vbox version [--format=FORMAT]
vbox image list [--format=FORMAT]
vbox image find NAME
vbox image add NAME
vbox vm list [--format=FORMAT] [-v]
vbox vm delete NAME
vbox vm config NAME
vbox vm ip NAME [--all]
vbox create NAME ([--memory=MEMORY]
[--image=IMAGE]
[--script=SCRIPT] | list)
vbox vm boot NAME ([--memory=MEMORY]
[--image=IMAGE]
[--port=PORT]
[--script=SCRIPT] | list)
vbox vm ssh NAME [-e COMMAND]
"""
arg = dotdict(docopt(do_vbox.__doc__, argv))
arg.format = arg["--format"] or "table"
arg.verbose = arg["-v"]
arg.all = arg["--all"]
if arg.version:
versions = {
"vagrant": {
"attribute": "Vagrant Version",
"version": vagrant.version(),
},
"cloudmesh-vbox": {
"attribute":"cloudmesh vbox Version",
"version": __version__
}
}
_LIST_PRINT(versions, arg.format)
elif arg.image and arg.list:
l = vagrant.image.list(verbose=arg.verbose)
_LIST_PRINT(l, arg.format, order=["name", "provider", "date"])
elif arg.image and arg.add:
l = vagrant.image.add(arg.NAME)
print(l)
elif arg.image and arg.find:
l = vagrant.image.find(arg.NAME)
print(l)
elif arg.vm and arg.list:
l = vagrant.vm.list()
_LIST_PRINT(l,
arg.format,
order=["name", "state", "id", "provider", "directory"])
elif arg.create and arg.list:
result = Shell.cat("{NAME}/Vagrantfile".format(**arg))
print (result)
elif arg.create:
d = defaults()
arg.memory = arg["--memory"] or d.memory
arg.image = arg["--image"] or d.image
arg.script = arg["--script"] or d.script
vagrant.vm.create(
name=arg.NAME,
memory=arg.memory,
image=arg.image,
script=arg.script)
elif arg.config:
# arg.NAME
d = vagrant.vm.info(name=arg.NAME)
result = Printer.attribute(d, output=arg.format)
print (result)
elif arg.ip:
data = []
result = vagrant.vm.execute(arg.NAME, "ifconfig")
if result is not None:
lines = result.splitlines()[:-1]
for line in lines:
if "inet addr" in line:
line = line.replace("inet addr", "ip")
line = ' '.join(line.split())
_adresses = line.split(" ")
address = {}
for element in _adresses:
attribute, value = element.split(":")
address[attribute] = value
data.append(address)
if arg.all:
d = {}
i = 0
for e in data:
d[str(i)] = e
i = i + 1
result = Printer.attribute(d, output=arg.format)
print(result)
else:
for element in data:
ip = element['ip']
if ip == "127.0.0.1" or ip.startswith("10."):
pass
else:
print (element['ip'])
elif arg.boot:
d = defaults()
arg.memory = arg["--memory"] or d.memory
arg.image = arg["--image"] or d.image
arg.script = arg["--script"] or d.script
arg.port = arg["--port"] or d.port
vagrant.vm.boot(
name=arg.NAME,
memory=arg.memory,
image=arg.image,
script=arg.script,
port=arg.port)
elif arg.delete:
result = vagrant.vm.delete(name=arg.NAME)
print(result)
elif arg.ssh:
if arg.COMMAND is None:
os.system("cd {NAME}; vagrant ssh {NAME}".format(**arg))
else:
result = vagrant.vm.execute(arg.NAME, arg.COMMAND)
if result is not None:
lines = result.splitlines()[:-1]
for line in lines:
print (line)
else:
print ("use help")
def main():
args = sys.argv[1:]
do_vbox(args)
if __name__ == '__main__':
main()
| apache-2.0 | -8,535,315,272,027,744,000 | 25.224299 | 74 | 0.498396 | false |
rdev-hackaton/GitHubTimeTracker | tests/frontends/cli.py | 1 | 2340 | import click
import pytest
from click.testing import CliRunner
from tests.core.mock_source import MockSource
from time_tracker.frontends.cli.tracker import print_time_tracking_info
from time_tracker.frontends.cli.options import DependentOption
@pytest.fixture(autouse=True)
def monkeypatch_config(monkeypatch):
monkeypatch.setattr('time_tracker.config.Config.__init__', lambda *a: None)
monkeypatch.setattr('time_tracker.config.Config.get_backend',
lambda *a: MockSource)
def test_cli():
runner = CliRunner()
result = runner.invoke(
print_time_tracking_info,
['--token', 'dummy', '--repo', 'dummy']
)
assert not result.exception
def test_cli_total():
runner = CliRunner()
result = runner.invoke(
print_time_tracking_info,
['--token', 'dummy', '--repo', 'dummy', '--total']
)
assert not result.exception
def test_cli_issue():
runner = CliRunner()
result = runner.invoke(
print_time_tracking_info,
['--token', 'dummy', '--repo', 'dummy', '--issue', '1']
)
assert not result.exception
def test_cli_milestone():
runner = CliRunner()
result = runner.invoke(
print_time_tracking_info,
['--token', 'dummy', '--repo', 'dummy', '--milestone', 'dummy']
)
assert not result.exception
def test_cli_committer():
runner = CliRunner()
result = runner.invoke(
print_time_tracking_info,
['--token', 'dummy', '--repo', 'dummy', '--committer', 'dummy']
)
assert not result.exception
def test_cli_fail():
runner = CliRunner()
result = runner.invoke(
print_time_tracking_info,
['--token', 'dummy', '--repo', 'dummy', '--issue', 'a']
)
assert result.exception
# Options tests
def test_dependent_option_prompt():
@click.command()
@click.option('--option', cls=DependentOption, prompt=True,
prompt_depends_on=('another', False))
def test_comm(option):
pass
runner = CliRunner()
result = runner.invoke(
test_comm,
input='a'
)
assert not result.exception
@pytest.mark.parametrize('value', [1, 'a', [], {}])
def test_dependent_option_create_fail(value):
with pytest.raises(TypeError):
DependentOption('--option', prompt_depends_on=value)
| mit | 3,904,383,696,815,917,600 | 24.16129 | 79 | 0.620085 | false |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/approx_mc_prediction.py | 1 | 2661 | import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
# NOTE: this is only policy evaluation, not optimization
# we'll try to obtain the same result as our other MC script
from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS
LEARNING_RATE = 0.001
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
# found by policy_iteration_random on standard_grid
# MC method won't get exactly this, but should be close
# values:
# ---------------------------
# 0.43| 0.56| 0.72| 0.00|
# ---------------------------
# 0.33| 0.00| 0.21| 0.00|
# ---------------------------
# 0.25| 0.18| 0.11| -0.17|
# policy:
# ---------------------------
# R | R | R | |
# ---------------------------
# U | | U | |
# ---------------------------
# U | L | U | L |
policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'U',
(2, 1): 'L',
(2, 2): 'U',
(2, 3): 'L',
}
# initialize theta
# our model is V_hat = theta.dot(x)
# where x = [row, col, row*col, 1] - 1 for bias term
theta = np.random.randn(4) / 2
def s2x(s):
return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1])
# repeat until convergence
deltas = []
t = 1.0
for it in range(20000):
if it % 100 == 0:
t += 0.01
alpha = LEARNING_RATE/t
# generate an episode using pi
biggest_change = 0
states_and_returns = play_game(grid, policy)
seen_states = set()
for s, G in states_and_returns:
# check if we have already seen s
# called "first-visit" MC policy evaluation
if s not in seen_states:
old_theta = theta.copy()
x = s2x(s)
V_hat = theta.dot(x)
# grad(V_hat) wrt theta = x
theta += alpha*(G - V_hat)*x
biggest_change = max(biggest_change, np.abs(old_theta - theta).sum())
seen_states.add(s)
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
# obtain predicted values
V = {}
states = grid.all_states()
for s in states:
if s in grid.actions:
V[s] = theta.dot(s2x(s))
else:
# terminal state or state we can't otherwise get to
V[s] = 0
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
| apache-2.0 | -1,145,871,072,479,624,100 | 26.153061 | 98 | 0.534386 | false |
joebos/django-allauth | test_settings.py | 1 | 2450 | # -*- coding: utf-8 -*-
SECRET_KEY = 'psst'
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ROOT_URLCONF = 'allauth.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.amazon',
'allauth.socialaccount.providers.angellist',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.feedly',
'allauth.socialaccount.providers.dropbox',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.flickr',
'allauth.socialaccount.providers.foursquare',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.instagram',
'allauth.socialaccount.providers.linkedin',
'allauth.socialaccount.providers.linkedin_oauth2',
'allauth.socialaccount.providers.openid',
'allauth.socialaccount.providers.paypal',
'allauth.socialaccount.providers.persona',
'allauth.socialaccount.providers.soundcloud',
'allauth.socialaccount.providers.stackexchange',
'allauth.socialaccount.providers.tumblr',
'allauth.socialaccount.providers.twitch',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.vimeo',
'allauth.socialaccount.providers.weibo',
'allauth.socialaccount.providers.bitly',
'allauth.socialaccount.providers.vk',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = '/tmp/' # Dummy
STATIC_URL = '/static/'
| mit | -6,332,023,860,715,804,000 | 30.818182 | 61 | 0.706122 | false |
petrundiy2/arithmetic_dragons | enemies.py | 1 | 3433 | __author__ = 'student'
# coding: utf-8
# license: GPLv3
from gameunit import *
from random import randint, choice
class Enemy(Attacker):
pass
def generate_random_enemy():
RandomEnemyType = choice(enemy_types)
enemy = RandomEnemyType()
return enemy
def generate_dragon_list(enemy_number):
enemy_list = [generate_random_enemy() for i in range(enemy_number)]
return enemy_list
class Dragon(Enemy):
def set_answer(self, answer):
self.__answer = answer
def check_answer(self, answer):
return answer == self.__answer
class Troll(Enemy):
def set_answer(self,answer):
self.__answer = answer
def check_answer(self, answer):
return answer == self.__answer
class GreenDragon(Dragon):
def __init__(self):
self._health = 200
self._attack = 10
self._color = 'зелёный дракон'
def question(self):
x = randint(1,100)
y = randint(1,100)
self.__quest = str(x) + '+' + str(y)
self.set_answer(x + y)
return self.__quest
class RedDragon(Dragon):
def __init__(self):
self._health = 170
self._attack = 15
self._color = 'красный дракон'
def question(self):
x = randint(1,100)
y = randint(1,100)
self.__quest = str(x) + '-' + str(y)
self.set_answer(x - y)
return self.__quest
class BlackDragon(Dragon):
def __init__(self):
self._health = 250
self._attack = 9
self._color = 'черный дракон'
def question(self):
x = randint(1,100)
y = randint(1,100)
self.__quest = str(x) + '*' + str(y)
self.set_answer(x * y)
return self.__quest
class CleverTroll1(Troll):
def __init__(self):
self._health=300
self._attack=20
self._color='зеленый толстый тролль'
def question(self):
x = randint(1,5)
self.__quest = 'Угадай число от 1 до 5!'
self.set_answer(x)
return self.__quest
class CleverTroll2(Troll):
def __init__(self):
self._health=280
self._attack=25
self._color='синий худой тролль'
def question(self):
x = randint(1,1000)
self.__quest = 'Угадай, простое ли число'+' '+str(x)+'?'+'Простое-1,Составное-0,Ни простое, ни составное-00'
n=0
for y in range(1,x):
if x%y==0:
n+=1
if n>2:
self.set_answer(1)
if n==2:
self.set_answer(0)
if n==1:
self.set_answer(00)
return self.__quest
class CleverTroll3(Troll):
def __init__(self):
self._health=350
self._attack=20
self._color='Огромный серый тролль'
def question(self):
x = randint(1,100)
self.__quest = 'Разложи число'+' '+str(x)+' '+'на множители в порядке возрастания! Само число включается!'
A=[]
for y in range (1,x+1):
if x%y==0:
A.append(y)
j=''
for t in range(len(A)):
j+=str(A[t])
u=int(j)
self.set_answer(u)
return self.__quest
enemy_types = [GreenDragon, RedDragon, BlackDragon,CleverTroll1,CleverTroll2,CleverTroll3]
| gpl-3.0 | -4,596,253,404,611,616,300 | 25.733333 | 116 | 0.551434 | false |
michael-dev2rights/ansible | lib/ansible/modules/cloud/amazon/ec2_asg.py | 1 | 59333 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: false
choices: ['present', 'absent']
default: present
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
target_group_arns:
description:
- List of target group ARNs to use for the group
version_added: "2.4"
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
If unspecified then the current group value will be used.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
placement_group:
description:
- Physical location of your cluster placement group created in Amazon EC2.
required: false
version_added: "2.3"
default: None
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch
configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in conjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the
current termination policies are maintained.
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
notification_topic:
description:
- A SNS topic ARN to send auto scaling notifications to.
default: None
required: false
version_added: "2.2"
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
default:
- 'autoscaling:EC2_INSTANCE_LAUNCH'
- 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
- 'autoscaling:EC2_INSTANCE_TERMINATE'
- 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
required: false
version_added: "2.2"
suspend_processes:
description:
- A list of scaling processes to suspend.
required: False
default: []
choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
version_added: "2.3"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
# Below is an example of how to assign a new launch config to an ASG and terminate old instances.
#
# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
# a rolling fashion with instances using the current launch configuration, "my_new_lc".
#
# This could also be considered a rolling deploy of a pre-baked AMI.
#
# If this is a newly created group, the instances will not be replaced since all instances
# will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
# To only replace a couple of instances instead of all of them, supply a list
# to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
RETURN = '''
---
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
healthcheck_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
healthcheck_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
healthy_instances:
description: Number of instances in a healthy state
returned: success
type: int
sample: 5
in_service_instances:
description: Number of instances in service
returned: success
type: int
sample: 3
instance_facts:
description: Dictionary of EC2 instances and their status as it relates to the ASG.
returned: success
type: dict
sample: {
"i-0123456789012": {
"health_status": "Healthy",
"launch_config_name": "public-webapp-production-1",
"lifecycle_state": "InService"
}
}
instances:
description: list of instance IDs in the ASG
returned: success
type: list
sample: [
"i-0123456789012"
]
launch_config_name:
description: >
Name of launch configuration associated with the ASG. Same as launch_configuration_name,
provided for compatibility with ec2_asg module.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancers:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
pending_instances:
description: Number of instances in pending state
returned: success
type: int
sample: 1
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
target_group_arns:
description: List of ARNs of the target groups that the ASG populates
returned: success
type: list
sample: [
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
]
target_group_names:
description: List of names of the target groups that the ASG populates
returned: success
type: list
sample: [
"target-group-host-hello",
"target-group-path-world"
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
unhealthy_instances:
description: Number of instances in an unhealthy state
returned: success
type: int
sample: 0
viable_instances:
description: Number of instances in a viable state
returned: success
type: int
sample: 1
'''
import time
import logging as log
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
# log.basicConfig(filename='/tmp/ansible_ec2_asg.log', level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup',
'TerminationPolicies', 'VPCZoneIdentifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
backoff_params = dict(tries=10, delay=3, backoff=1.5)
@AWSRetry.backoff(**backoff_params)
def describe_autoscaling_groups(connection, group_name):
pg = connection.get_paginator('describe_auto_scaling_groups')
return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', [])
@AWSRetry.backoff(**backoff_params)
def deregister_lb_instances(connection, lb_name, instance_id):
connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)])
@AWSRetry.backoff(**backoff_params)
def describe_instance_health(connection, lb_name, instances):
params = dict(LoadBalancerName=lb_name)
if instances:
params.update(Instances=instances)
return connection.describe_instance_health(**params)
@AWSRetry.backoff(**backoff_params)
def describe_target_health(connection, target_group_arn, instances):
return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances)
@AWSRetry.backoff(**backoff_params)
def suspend_asg_processes(connection, asg_name, processes):
connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
@AWSRetry.backoff(**backoff_params)
def resume_asg_processes(connection, asg_name, processes):
connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
@AWSRetry.backoff(**backoff_params)
def describe_launch_configurations(connection, launch_config_name):
pg = connection.get_paginator('describe_launch_configurations')
return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result()
@AWSRetry.backoff(**backoff_params)
def create_asg(connection, **params):
connection.create_auto_scaling_group(**params)
@AWSRetry.backoff(**backoff_params)
def put_notification_config(connection, asg_name, topic_arn, notification_types):
connection.put_notification_configuration(
AutoScalingGroupName=asg_name,
TopicARN=topic_arn,
NotificationTypes=notification_types
)
@AWSRetry.backoff(**backoff_params)
def del_notification_config(connection, asg_name, topic_arn):
connection.delete_notification_configuration(
AutoScalingGroupName=asg_name,
TopicARN=topic_arn
)
@AWSRetry.backoff(**backoff_params)
def attach_load_balancers(connection, asg_name, load_balancers):
connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
@AWSRetry.backoff(**backoff_params)
def detach_load_balancers(connection, asg_name, load_balancers):
connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
@AWSRetry.backoff(**backoff_params)
def attach_lb_target_groups(connection, asg_name, target_group_arns):
connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
@AWSRetry.backoff(**backoff_params)
def detach_lb_target_groups(connection, asg_name, target_group_arns):
connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
@AWSRetry.backoff(**backoff_params)
def update_asg(connection, **params):
connection.update_auto_scaling_group(**params)
@AWSRetry.backoff(**backoff_params)
def delete_asg(connection, asg_name, force_delete):
connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete)
@AWSRetry.backoff(**backoff_params)
def terminate_asg_instance(connection, instance_id, decrement_capacity):
connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id,
ShouldDecrementDesiredCapacity=decrement_capacity)
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group, module):
properties = dict()
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = dict()
autoscaling_group_instances = autoscaling_group.get('Instances')
if autoscaling_group_instances:
properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
for i in autoscaling_group_instances:
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'],
'launch_config_name': i.get('LaunchConfigurationName')}
if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
properties['viable_instances'] += 1
if i['HealthStatus'] == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i['LifecycleState'] == 'InService':
properties['in_service_instances'] += 1
if i['LifecycleState'] == 'Terminating':
properties['terminating_instances'] += 1
if i['LifecycleState'] == 'Pending':
properties['pending_instances'] += 1
else:
properties['instances'] = []
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
properties['tags'] = autoscaling_group.get('Tags')
properties['min_size'] = autoscaling_group.get('MinSize')
properties['max_size'] = autoscaling_group.get('MaxSize')
properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod')
properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType')
properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
properties['termination_policies'] = autoscaling_group.get('TerminationPolicies')
properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs')
if properties['target_group_arns']:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result()
target_groups = tg_result['TargetGroups']
else:
target_groups = []
properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups]
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
wait_timeout = module.params.get('wait_timeout')
count = 1
if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
else:
return
for lb in as_group['LoadBalancerNames']:
deregister_lb_instances(elb_connection, lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group['LoadBalancerNames']:
lb_instances = describe_instance_health(elb_connection, lb, [])
for i in lb_instances['InstanceStates']:
if i['InstanceId'] == instance_id and i['State'] == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i['InstanceId'], i['State'], i['Description']))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group, module)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(InstanceId=instance))
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
lb_instances = list()
for lb in as_group.get('LoadBalancerNames'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = describe_instance_health(elb_connection, lb, instances)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstance':
return None
module.fail_json(msg="Failed to get load balancer.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to get load balancer.",
exception=traceback.format_exc())
for i in lb_instances.get('InstanceStates'):
if i['State'] == "InService":
healthy_instances.add(i['InstanceId'])
log.debug("ELB Health State {0}: {1}".format(i['InstanceId'], i['State']))
return len(healthy_instances)
def tg_healthy(asg_connection, elbv2_connection, module, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group, module)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(Id=instance))
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("Target Group instance status:")
tg_instances = list()
for tg in as_group.get('TargetGroupARNs'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
tg_instances = describe_target_health(elbv2_connection, tg, instances)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstance':
return None
module.fail_json(msg="Failed to get target group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to get target group.",
exception=traceback.format_exc())
for i in tg_instances.get('TargetHealthDescriptions'):
if i['TargetHealth']['State'] == "healthy":
healthy_instances.add(i['Target']['Id'])
log.debug("Target Group Health State {0}: {1}".format(i['Target']['Id'], i['TargetHealth']['State']))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
log.debug("Waiting for ELB to consider instances healthy.")
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def wait_for_target_group(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
log.debug("Waiting for Target Group to consider instances healthy.")
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
log.debug("Target Group thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. Target Group thinks {0} instances are healthy.".format(healthy_instances))
def suspend_processes(ec2_connection, as_group, module):
suspend_processes = set(module.params.get('suspend_processes'))
try:
suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']])
except AttributeError:
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
if suspend_processes == suspended_processes:
return False
resume_processes = list(suspended_processes - suspend_processes)
if resume_processes:
resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes)
if suspend_processes:
suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes))
return True
@AWSRetry.backoff(tries=3, delay=0.1)
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
target_group_arns = module.params['target_group_arns']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.describe_auto_scaling_groups(AutoScalingGroupNames=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
notification_topic = module.params.get('notification_topic')
notification_types = module.params.get('notification_types')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
ec2_connection = boto3_conn(module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k, v in tag.items():
if k != 'propagate_at_launch':
asg_tags.append(dict(Key=k,
Value=v,
PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)),
ResourceType='auto-scaling-group',
ResourceId=group_name))
if not as_groups.get('AutoScalingGroups'):
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
enforce_required_arguments(module)
launch_configs = describe_launch_configurations(connection, launch_config_name)
if len(launch_configs['LaunchConfigurations']) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = dict(
AutoScalingGroupName=group_name,
LaunchConfigurationName=launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName'],
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=desired_capacity,
Tags=asg_tags,
HealthCheckGracePeriod=health_check_period,
HealthCheckType=health_check_type,
DefaultCooldown=default_cooldown,
TerminationPolicies=termination_policies)
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
if availability_zones:
ag['AvailabilityZones'] = availability_zones
if placement_group:
ag['PlacementGroup'] = placement_group
if load_balancers:
ag['LoadBalancerNames'] = load_balancers
if target_group_arns:
ag['TargetGroupARNs'] = target_group_arns
try:
create_asg(connection, **ag)
all_ag = describe_autoscaling_groups(connection, group_name)
if len(all_ag) == 0:
module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
as_group = all_ag[0]
suspend_processes(connection, as_group, module)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
if load_balancers:
wait_for_elb(connection, module, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
wait_for_target_group(connection, module, group_name)
if notification_topic:
put_notification_config(connection, group_name, notification_topic, notification_types)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group, module)
changed = True
return changed, asg_properties
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to create Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to create Autoscaling Group.",
exception=traceback.format_exc())
else:
as_group = as_groups['AutoScalingGroups'][0]
initial_asg_properties = get_properties(as_group, module)
changed = False
if suspend_processes(connection, as_group, module):
changed = True
# process tag changes
if len(set_tags) > 0:
have_tags = as_group.get('Tags')
want_tags = asg_tags
dead_tags = []
have_tag_keyvals = [x['Key'] for x in have_tags]
want_tag_keyvals = [x['Key'] for x in want_tags]
for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals):
changed = True
dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'],
ResourceType='auto-scaling-group', Key=dead_tag))
have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag]
if dead_tags:
connection.delete_tags(Tags=dead_tags)
zipped = zip(have_tags, want_tags)
if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped):
changed = True
connection.create_or_update_tags(Tags=asg_tags)
# Handle load balancer attachments/detachments
# Attach load balancers if they are specified but none currently exist
if load_balancers and not as_group['LoadBalancerNames']:
changed = True
try:
attach_load_balancers(connection, group_name, load_balancers)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc())
# Update load balancers if they are specified and one or more already exists
elif as_group['LoadBalancerNames']:
change_load_balancers = load_balancers is not None
# Get differences
if not load_balancers:
load_balancers = list()
wanted_elbs = set(load_balancers)
has_elbs = set(as_group['LoadBalancerNames'])
# check if all requested are already existing
if has_elbs - wanted_elbs and change_load_balancers:
# if wanted contains less than existing, then we need to delete some
elbs_to_detach = has_elbs.difference(wanted_elbs)
if elbs_to_detach:
changed = True
detach_load_balancers(connection, group_name, list(elbs_to_detach))
if wanted_elbs - has_elbs:
# if has contains less than wanted, then we need to add some
elbs_to_attach = wanted_elbs.difference(has_elbs)
if elbs_to_attach:
changed = True
attach_load_balancers(connection, group_name, list(elbs_to_attach))
# Handle target group attachments/detachments
# Attach target groups if they are specified but none currently exist
if target_group_arns and not as_group['TargetGroupARNs']:
changed = True
try:
attach_lb_target_groups(connection, group_name, target_group_arns)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc())
# Update target groups if they are specified and one or more already exists
elif target_group_arns is not None and as_group['TargetGroupARNs']:
# Get differences
wanted_tgs = set(target_group_arns)
has_tgs = set(as_group['TargetGroupARNs'])
# check if all requested are already existing
if has_tgs.issuperset(wanted_tgs):
# if wanted contains less than existing, then we need to delete some
tgs_to_detach = has_tgs.difference(wanted_tgs)
if tgs_to_detach:
changed = True
detach_lb_target_groups(connection, group_name, list(tgs_to_detach))
if wanted_tgs.issuperset(has_tgs):
# if has contains less than wanted, then we need to add some
tgs_to_attach = wanted_tgs.difference(has_tgs)
if tgs_to_attach:
changed = True
attach_lb_target_groups(connection, group_name, list(tgs_to_attach))
# check for attributes that aren't required for updating an existing ASG
desired_capacity = desired_capacity if desired_capacity is not None else as_group['DesiredCapacity']
min_size = min_size if min_size is not None else as_group['MinSize']
max_size = max_size if max_size is not None else as_group['MaxSize']
launch_config_name = launch_config_name or as_group['LaunchConfigurationName']
launch_configs = describe_launch_configurations(connection, launch_config_name)
if len(launch_configs['LaunchConfigurations']) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = dict(
AutoScalingGroupName=group_name,
LaunchConfigurationName=launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName'],
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=desired_capacity,
HealthCheckGracePeriod=health_check_period,
HealthCheckType=health_check_type,
DefaultCooldown=default_cooldown,
TerminationPolicies=termination_policies)
if availability_zones:
ag['AvailabilityZones'] = availability_zones
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
update_asg(connection, **ag)
if notification_topic:
try:
put_notification_config(connection, group_name, notification_topic, notification_types)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc())
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
# Wait for ELB health if ELB(s)defined
if load_balancers:
log.debug('\tWAITING FOR ELB HEALTH')
wait_for_elb(connection, module, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
log.debug('\tWAITING FOR TG HEALTH')
wait_for_target_group(connection, module, group_name)
try:
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group, module)
if asg_properties != initial_asg_properties:
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups.",
exception=traceback.format_exc())
return changed, asg_properties
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
wait_for_instances = module.params.get('wait_for_instances')
wait_timeout = module.params.get('wait_timeout')
if notification_topic:
del_notification_config(connection, group_name, notification_topic)
groups = describe_autoscaling_groups(connection, group_name)
if groups:
if not wait_for_instances:
delete_asg(connection, group_name, force_delete=True)
return True
wait_timeout = time.time() + wait_timeout
updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
update_asg(connection, **updated_params)
instances = True
while instances and wait_for_instances and wait_timeout >= time.time():
tmp_groups = describe_autoscaling_groups(connection, group_name)
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.get('Instances'):
instances = False
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
delete_asg(connection, group_name, force_delete=False)
while describe_autoscaling_groups(connection, group_name):
time.sleep(5)
return True
return False
def get_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def update_size(connection, group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size))
updated_group = dict()
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
updated_group['MinSize'] = min_size
updated_group['MaxSize'] = max_size
updated_group['DesiredCapacity'] = dc
update_asg(connection, **updated_group)
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = describe_autoscaling_groups(connection, group_name)[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group, module)
instances = props['instances']
if replace_instances:
instances = replace_instances
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group['MinSize']
if max_size is None:
max_size = as_group['MaxSize']
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
as_group = describe_autoscaling_groups(connection, group_name)[0]
update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
wait_for_elb(connection, module, group_name)
wait_for_target_group(connection, module, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
wait_for_target_group(connection, module, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
if break_early:
log.debug("breaking loop")
break
update_size(connection, as_group, max_size, min_size, desired_capacity)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group, module)
log.debug("Rolling update complete.")
changed = True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
desired_size = as_group['MinSize']
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group['MinSize'] != min_size:
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
update_asg(connection, **updated_params)
log.debug("Updating minimum size back to original of {0}".format(min_size))
# if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
terminate_asg_instance(connection, instance_id, decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
instance_facts = props['instance_facts']
instances = (i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i, lifecycle, health))
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[])
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
try:
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties = create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json(changed=changed)
if replace_all_instances or replace_instances:
replace_changed, asg_properties = replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,692,306,080,514,071,000 | 42.372076 | 158 | 0.645728 | false |
zhixingchou/Adminset_Zabbix | adminset/views.py | 1 | 11356 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import redirect
from jsonrpc import jsonrpc_method
from jsonrpc.proxy import ServiceProxy
from . import zabbix_api
import util
import os, json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import datetime
from zabbix_api import *
from zabbix_Graph_api import *
def index(request):
return redirect('/navi/')
# zabbix api -- by zhouzx
def listapi(request):
method = request.GET.get('method')
# zbhost_allhost_getlist = method + ".getlist"
# s = ServiceProxy('http://192.168.2.159:8400/json/')
# data = s.zbhost_allhost.getlist()
# print data
print method
if method == "zbhost_allhost":
s = ServiceProxy('http://192.168.2.159:8000/json/')
data = s.zbhost_allhost.getlist()
print json.dumps(data)
print type(json.dumps(data))
# print ServiceProxy('http://192.168.2.159:8400/json/').zbhost_allhost.getlist()
return HttpResponse(json.dumps(data), content_type='application/json; charset=utf-8')
elif method == "zabbix_gettem":
s = ServiceProxy('http://192.168.2.159:8000/json/')
tem = s.zabbix_gettem.getlist()
return HttpResponse(json.dumps(tem), content_type='application/json; charset=utf-8')
elif method == "zabbix_tem":
s = ServiceProxy('http://192.168.2.159:8000/json/')
host_tem = s.zabbix_tem.getlist()
return HttpResponse(json.dumps(host_tem), content_type='application/json; charset=utf-8')
elif method == "zbhost":
s = ServiceProxy('http://192.168.2.159:8000/json/')
zbhost = s.zbhost.getlist()
return HttpResponse(json.dumps(zbhost), content_type='application/json; charset=utf-8')
elif method == "zabbix":
s = ServiceProxy('http://192.168.2.159:8000/json/')
zabbixhost = s.zabbix.getlist()
return HttpResponse(json.dumps(zabbixhost), content_type='application/json; charset=utf-8')
else:
data = {
u'result': u'{"code": 1, "result": [{"host": "Zabbix server", "hostid": "10084"}, {"host": "192.168.2.159", "hostid": "10107"}]}',
u'jsonrpc': u'1.0', u'id': u'48e8787a-ad68-11e7-be94-000c29a6a1c8', u'error': None}
data1 = json.dumps(data)
type(data1)
return HttpResponse(json.dumps(data))
def getapi(request):
method = request.GET.get('method')
print method
data = {}
data['method'] = method + '.get'
data['params'] = {
"m_table":request.GET.get('m_table',None),
'field': request.GET.get('field', None),
's_table': request.GET.get('s_table', None),
'where': {'id': int(request.GET.get('id'))}
}
if method == "graph":
s = ServiceProxy('http://192.168.2.159:8000/json/')
zbx_graph = s.graph.get(data)
return HttpResponse(json.dumps(zbx_graph))
@csrf_exempt
def zabbixapi(request):
method = request.POST.get('method')
hostids = request.POST.get('hostids')
groupid = request.POST.get('groupid')
data = {}
data['method'] = 'zabbix.' + method
data['params'] = {
"hostids": hostids,
"groupid": groupid
}
# method = request.POST.get('method')
print data['method']
if data['method'] == "zabbix.link_tem":
s = ServiceProxy('http://192.168.2.159:8000/json/')
link_tem = s.zabbix.link_tem(data)
return HttpResponse(json.dumps(link_tem))
elif data['method'] == "zabbix.add":
s = ServiceProxy('http://192.168.2.159:8000/json/')
create_zbx_host = s.zabbix.add(data)
return HttpResponse(json.dumps(create_zbx_host))
@csrf_exempt
def zabbix_template(request):
method = request.POST.get('method')
hostid = request.POST.get('hostid')
templateid = request.POST.get('templateid')
data = {}
data['method'] = 'zabbix_template.' + method
data['params'] = {
"hostids": hostid,
"templateid": templateid
}
# method = request.POST.get('method')
print data['method']
if data['method'] == "zabbix_template.unlink_tem":
s = ServiceProxy('http://192.168.2.159:8000/json/')
unlink_tem = s.zabbix_template.unlink_tem(data)
return HttpResponse(json.dumps(unlink_tem))
@jsonrpc_method('graph.get')
def graph_get(request, arg1):
ret = []
stime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
data_where = {}
# add centos7 "Network traffic on ens33"
monitor_name = ["CPU load", "CPU utilization", "Memory usage", "Disk space usage /", "Network traffic on eth0",
"Network traffic on em1", "Network traffic on ens33"]
output = ['id', 'hostid']
data = arg1['params']
# util.write_log('api').debug('data %s' % data)
# {u'field': None, u'm_table': None, u'where': {u'id': 33}, u's_table': None}
fields = data.get('output', output) # Python 字典(Dictionary) get() 函数返回指定键的值,如果值不在字典中返回默认值。
where = data.get('where', None)
data_where['cmdb_hostid'] = where['id']
if not where:
return json.dumps({'code': 1, 'errmsg': 'must need a condition'})
# util.graph_file(app.config['zabbix_img_url'])
util.graph_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),'../static/zabbix'))
result = db.Cursor( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'api')).get_one_result('zbhost', fields,
data_where) # SELECT id,hostid FROM zbhost WHERE cmdb_hostid=33
# util.write_log('api').debug('result is: %s' % result)
grapsh_id = zabbix_api.Zabbix( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'zabbix')).get_graphid(str(result['hostid']))
# grapsh_id = app.config['zabbix'].get_graphid("10107")
for i in grapsh_id:
if i['name'] in monitor_name:
# util.write_log('api').debug('stime: %s' % stime)
values = {"graphid": str(i['graphid']), "stime": stime, "period": 3600, "width": 800, "height": 200}
graph = ZabbixGraph("http://192.168.2.22:6080/index.php", "Admin",
"zabbix")
ret_data = graph.GetGraph("http://192.168.2.22:6080/chart2.php", values, os.path.join(os.path.dirname(os.path.realpath(__file__)),'../static/zabbix'))
ret.append(ret_data)
img_url = util.graph_img(os.path.join(os.path.dirname(os.path.realpath(__file__)),'../static/zabbix'))
return json.dumps({'code': 0, 'result': img_url})
@jsonrpc_method('zabbix.add')
def zabbix_add(request, arg1):
data = arg1['params']
hosts = data['hostids'].split(",")
result = create_zabbix_host(hosts, data['groupid'])
return json.dumps({'code': 0, 'result': 'create zabbix host %s scucess' % result[0]['hostids']})
@jsonrpc_method('zabbix.getlist')
def zabbix_select(request):
hostgroup = zabbix_api.Zabbix(
util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'zabbix')).get_hostgroup()
return json.dumps({'code': 0, 'result': hostgroup})
@jsonrpc_method('zbhost.getlist')
def zbhost_select(request):
datadict = {}
ret = []
# zbhost表关联cmdb_host by zhoux
init()
# update by zhouzx (delete 字段 host)
fields = ['id', 'cmdb_hostid', 'hostid', 'host', 'ip']
zabbix_hosts = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'api')).get_results('zbhost', fields)
hostid = [str(zb["cmdb_hostid"]) for zb in zabbix_hosts]
server_hosts = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'api')).get_results('cmdb_host', ["id"])
for i in server_hosts:
if str(i["id"]) not in hostid:
datadict["id"] = i["id"]
# all_host = app.config['cursor'].get_results('cmdb_host',["ip"],datadict)
get_ip = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'api')).get_where_results('cmdb_host', ["id", "ip"], datadict)
ret.append(get_ip[0])
return json.dumps({'code': 0, 'result': ret})
@jsonrpc_method('zabbix_template.unlink_tem')
def zabbix_unlink_tem(request, arg1):
work_dir = os.path.dirname(os.path.realpath(__file__))
service_conf = os.path.join(work_dir, 'service.conf')
zabbix_config = util.get_config(service_conf, 'zabbix')
result = []
data = arg1['params']
print data
data_host = data['hostids'].split(',')
for i in data_host:
result.append(zabbix_api.Zabbix(zabbix_config).unlink_template(int(i), data['templateid']))
return json.dumps({'code': 0, 'result': result})
@jsonrpc_method('zabbix.link_tem')
def zabbix_link_tem(request, arg1):
work_dir = os.path.dirname(os.path.realpath(__file__))
service_conf = os.path.join(work_dir, 'service.conf')
zabbix_config = util.get_config(service_conf, 'zabbix')
result = []
tem = []
template = {}
data = arg1['params']
print data
# {u'hostids': [u'10157,10158'], u'groupid': u'10001'}
data_host = data['hostids'].split(',')
print data_host
for i in data_host:
if len(zabbix_api.Zabbix(zabbix_config).hostid_get_template(i)[0]['parentTemplates']) == 0:
result.append(zabbix_api.Zabbix(zabbix_config).link_template(int(i), data['groupid']))
else:
template['templateid'] = data['groupid']
data_mu = zabbix_api.Zabbix(zabbix_config).hostid_get_template(i)[0]['parentTemplates']
data_mu.append(template)
result.append(zabbix_api.Zabbix(zabbix_config).link_template(int(i), data_mu))
return json.dumps({'code': 0, 'result': result})
@jsonrpc_method('zbhost_allhost.getlist')
def zbhost_allhost_select(request):
work_dir = os.path.dirname(os.path.realpath(__file__))
service_conf = os.path.join(work_dir, 'service.conf')
zabbix_config = util.get_config(service_conf, 'zabbix')
data = zabbix_api.Zabbix(zabbix_config).get_hosts()
print json.dumps({'code': 0, 'result': data})
return json.dumps({'code': 0, 'result': data})
@jsonrpc_method('zabbix_gettem.getlist')
def zabbix_gettem_select(request):
work_dir = os.path.dirname(os.path.realpath(__file__))
service_conf = os.path.join(work_dir, 'service.conf')
zabbix_config = util.get_config(service_conf, 'zabbix')
tem = zabbix_api.Zabbix(zabbix_config).get_template()
print json.dumps({'code': 0, 'result': tem})
return json.dumps({'code': 0, 'result': tem})
@jsonrpc_method('zabbix_tem.getlist')
def zabbix_gettem_select(request):
work_dir = os.path.dirname(os.path.realpath(__file__))
service_conf = os.path.join(work_dir, 'service.conf')
zabbix_config = util.get_config(service_conf, 'zabbix')
tem = zabbix_api.Zabbix(zabbix_config).get_host_tem()
print json.dumps({'code': 0, 'result': tem})
return json.dumps({'code': 0, 'result': tem})
@jsonrpc_method('myapp.sayHello')
def whats_the_time(request, name='Lester'):
return "Hello %s" % name
@jsonrpc_method('myapp.gimmeThat', authenticated=True)
def something_special(request, secret_data):
return {'sauce': ['authenticated', 'sauce']}
| apache-2.0 | -4,084,114,825,781,418,500 | 39.192171 | 183 | 0.62573 | false |
ric2b/Vivaldi-browser | chromium/third_party/blink/renderer/devtools/scripts/build/build_debug_applications.py | 1 | 2057 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Builds applications in debug mode:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from os import path
from os.path import join
import os
import shutil
import sys
import modular_build
def main(argv):
try:
input_path_flag_index = argv.index('--input_path')
input_path = argv[input_path_flag_index + 1]
output_path_flag_index = argv.index('--output_path')
output_path = argv[output_path_flag_index + 1]
except:
print('Usage: %s app_1 app_2 ... app_N --input_path <input_path> --output_path <output_path>' % argv[0])
raise
symlink_dir_or_copy(input_path, output_path)
def symlink_dir_or_copy(src, dest):
if hasattr(os, 'symlink'):
if path.exists(dest):
if os.path.islink(dest):
os.unlink(dest)
else:
shutil.rmtree(dest)
os.symlink(join(os.getcwd(), src), dest)
else:
for filename in os.listdir(src):
new_src = join(os.getcwd(), src, filename)
if os.path.isdir(new_src):
copy_dir(new_src, join(dest, filename))
else:
copy_file(new_src, join(dest, filename), safe=True)
def copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
shutil.copy(src, dest)
def copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.mkdir(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
copy_file(src_name, dest_name)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -6,639,254,433,883,959,000 | 27.444444 | 112 | 0.599121 | false |
jabbalaci/jabbapylib | demos/browser_automation/splinter_2.py | 1 | 1090 | #!/usr/bin/env python
"""
splinter
http://splinter.cobrateam.info
"""
from time import sleep
from splinter.browser import Browser
#url = 'http://simile.mit.edu/crowbar/test.html'
#url = 'http://dl.dropbox.com/u/144888/hello_js.html'
url = 'http://www.ncbi.nlm.nih.gov/nuccore/CP002059.1'
#url = 'http://translate.google.com/#en|fr|game'
def main():
#browser = Browser('zope.testbrowser')
#browser = Browser('webdriver.chrome')
browser = Browser()
browser.visit(url)
#browser.execute_script("var win = window.open(); win.document.write('<html><head><title>Generated HTML of ' + location.href + '</title></head><pre>' + document.documentElement.innerHTML.replace(/&/g, '&').replace(/</g, '<') + '</pre></html>'); win.document.close(); void 0;")
while 'ORIGIN' not in browser.html:
sleep(5)
f = open("/tmp/source.html", "w")
print >>f, browser.html
f.close()
browser.quit()
print '__END__'
#############################################################################
if __name__ == "__main__":
main() | gpl-3.0 | -3,163,800,249,660,252,700 | 26.974359 | 288 | 0.577064 | false |
OpusVL/odoo | openerp/addons/base/module/wizard/base_module_upgrade.py | 1 | 5096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module_cancel(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.module.module')
to_installed_ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove'])])
if to_installed_ids:
mod_obj.write(cr, uid, to_installed_ids, {'state': 'installed'}, context=context)
to_uninstalled_ids = mod_obj.search(cr, uid, [
('state', '=', 'to install')])
if to_uninstalled_ids:
mod_obj.write(cr, uid, to_uninstalled_ids, {'state': 'uninstalled'}, context=context)
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.api.Environment.reset()
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
return {'type': 'ir.actions.act_window_close'}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
| agpl-3.0 | -3,829,129,742,672,154,600 | 45.327273 | 157 | 0.566719 | false |
hydratk/hydratk-lib-network | src/hydratk/lib/network/email/client.py | 1 | 1118 | # -*- coding: utf-8 -*-
"""Generic EMAIL client factory
.. module:: network.email.client
:platform: Unix
:synopsis: Generic EMAIL client factory
.. moduleauthor:: Petr Rašek <[email protected]>
"""
from hydratk.core.masterhead import MasterHead
from importlib import import_module
protocols = {
'SMTP': 'smtp_client',
'POP': 'pop_client',
'IMAP': 'imap_client'
}
def EmailClient(protocol, *args, **kwargs):
"""Email client factory method
Args:
protocol (str): Email protocol, SMTP|POP|IMAP
args (args): arguments
kwargs (kwargs): key value arguments
Returns:
obj: EmailClient
Raises:
error: NotImplementedError
"""
protocol = protocol.upper()
if (protocol in protocols):
mh = MasterHead.get_head()
mod = import_module(
'hydratk.lib.network.email.{0}'.format(protocols[protocol]))
mh.find_module('hydratk.lib.network.email.client', None)
return mod.EmailClient(*args, **kwargs)
else:
raise NotImplementedError('Unknown protocol:{0}'.format(protocol))
| bsd-3-clause | 3,173,219,289,923,795,000 | 23.822222 | 74 | 0.633841 | false |
cmtm/networkx | networkx/algorithms/tests/test_dag.py | 1 | 12697 | from itertools import combinations
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_in
from nose.tools import assert_raises
from nose.tools import assert_true
from nose.tools import ok_
import networkx as nx
from networkx.testing.utils import assert_edges_equal
from networkx.utils import consume
class TestDagLongestPath(object):
"""Unit tests for computing the longest path in a directed acyclic
graph.
"""
def test_unweighted(self):
edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)]
G = nx.DiGraph(edges)
assert_equal(nx.dag_longest_path(G), [1, 2, 3, 5, 6])
edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)]
G = nx.DiGraph(edges)
assert_equal(nx.dag_longest_path(G), [1, 2, 3, 4, 5])
def test_weighted(self):
G = nx.DiGraph()
edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4),
(1, 6, 2)]
G.add_weighted_edges_from(edges)
assert_equal(nx.dag_longest_path(G), [2, 3, 5])
def test_undirected_not_implemented(self):
G = nx.Graph()
assert_raises(nx.NetworkXNotImplemented, nx.dag_longest_path, G)
def test_unorderable_nodes(self):
"""Tests that computing the longest path does not depend on
nodes being orderable.
For more information, see issue #1989.
"""
# TODO In Python 3, instances of the `object` class are
# unorderable by default, so we wouldn't need to define our own
# class here, we could just instantiate an instance of the
# `object` class. However, we still support Python 2; when
# support for Python 2 is dropped, this test can be simplified
# by replacing `Unorderable()` by `object()`.
class Unorderable(object):
def __lt__(self, other):
error_msg = "< not supported between instances of " \
"{} and {}".format(type(self).__name__, type(other).__name__)
raise TypeError(error_msg)
# Create the directed path graph on four nodes in a diamond shape,
# with nodes represented as (unorderable) Python objects.
nodes = [Unorderable() for n in range(4)]
G = nx.DiGraph()
G.add_edge(nodes[0], nodes[1])
G.add_edge(nodes[0], nodes[2])
G.add_edge(nodes[2], nodes[3])
G.add_edge(nodes[1], nodes[3])
# this will raise NotImplementedError when nodes need to be ordered
nx.dag_longest_path(G)
class TestDagLongestPathLength(object):
"""Unit tests for computing the length of a longest path in a
directed acyclic graph.
"""
def test_unweighted(self):
edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)]
G = nx.DiGraph(edges)
assert_equal(nx.dag_longest_path_length(G), 4)
edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)]
G = nx.DiGraph(edges)
assert_equal(nx.dag_longest_path_length(G), 4)
# test degenerate graphs
G = nx.DiGraph()
G.add_node(1)
assert_equal(nx.dag_longest_path_length(G), 0)
def test_undirected_not_implemented(self):
G = nx.Graph()
assert_raises(nx.NetworkXNotImplemented, nx.dag_longest_path_length, G)
def test_weighted(self):
edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4),
(1, 6, 2)]
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
assert_equal(nx.dag_longest_path_length(G), 5)
class TestDAG:
def setUp(self):
pass
def test_topological_sort1(self):
DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)])
for algorithm in [nx.topological_sort,
nx.lexicographical_topological_sort]:
assert_equal(tuple(algorithm(DG)), (1, 2, 3))
DG.add_edge(3, 2)
for algorithm in [nx.topological_sort,
nx.lexicographical_topological_sort]:
assert_raises(nx.NetworkXUnfeasible, consume, algorithm(DG))
DG.remove_edge(2, 3)
for algorithm in [nx.topological_sort,
nx.lexicographical_topological_sort]:
assert_equal(tuple(algorithm(DG)), (1, 3, 2))
DG.remove_edge(3, 2)
assert_in(tuple(nx.topological_sort(DG)), {(1, 2, 3), (1, 3, 2)})
assert_equal(tuple(nx.lexicographical_topological_sort(DG)), (1, 2, 3))
def test_is_directed_acyclic_graph(self):
G = nx.generators.complete_graph(2)
assert_false(nx.is_directed_acyclic_graph(G))
assert_false(nx.is_directed_acyclic_graph(G.to_directed()))
assert_false(nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])))
assert_true(nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])))
def test_topological_sort2(self):
DG = nx.DiGraph({1: [2], 2: [3], 3: [4],
4: [5], 5: [1], 11: [12],
12: [13], 13: [14], 14: [15]})
assert_raises(nx.NetworkXUnfeasible, consume, nx.topological_sort(DG))
assert_false(nx.is_directed_acyclic_graph(DG))
DG.remove_edge(1, 2)
consume(nx.topological_sort(DG))
assert_true(nx.is_directed_acyclic_graph(DG))
def test_topological_sort3(self):
DG = nx.DiGraph()
DG.add_edges_from([(1, i) for i in range(2, 5)])
DG.add_edges_from([(2, i) for i in range(5, 9)])
DG.add_edges_from([(6, i) for i in range(9, 12)])
DG.add_edges_from([(4, i) for i in range(12, 15)])
def validate(order):
ok_(isinstance(order, list))
assert_equal(set(order), set(DG))
for u, v in combinations(order, 2):
assert_false(nx.has_path(DG, v, u))
validate(list(nx.topological_sort(DG)))
DG.add_edge(14, 1)
assert_raises(nx.NetworkXUnfeasible, consume, nx.topological_sort(DG))
def test_topological_sort4(self):
G = nx.Graph()
G.add_edge(1, 2)
# Only directed graphs can be topologically sorted.
assert_raises(nx.NetworkXError, consume, nx.topological_sort(G))
def test_topological_sort5(self):
G = nx.DiGraph()
G.add_edge(0, 1)
assert_equal(list(nx.topological_sort(G)), [0, 1])
def test_topological_sort6(self):
for algorithm in [nx.topological_sort,
nx.lexicographical_topological_sort]:
def runtime_error():
DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
first = True
for x in algorithm(DG):
if first:
first = False
DG.add_edge(5 - x, 5)
def unfeasible_error():
DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
first = True
for x in algorithm(DG):
if first:
first = False
DG.remove_node(4)
def runtime_error2():
DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
first = True
for x in algorithm(DG):
if first:
first = False
DG.remove_node(2)
assert_raises(RuntimeError, runtime_error)
assert_raises(RuntimeError, runtime_error2)
assert_raises(nx.NetworkXUnfeasible, unfeasible_error)
def test_ancestors(self):
G = nx.DiGraph()
ancestors = nx.algorithms.dag.ancestors
G.add_edges_from([
(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
assert_equal(ancestors(G, 6), set([1, 2, 4, 5]))
assert_equal(ancestors(G, 3), set([1, 4]))
assert_equal(ancestors(G, 1), set())
assert_raises(nx.NetworkXError, ancestors, G, 8)
def test_descendants(self):
G = nx.DiGraph()
descendants = nx.algorithms.dag.descendants
G.add_edges_from([
(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
assert_equal(descendants(G, 1), set([2, 3, 6]))
assert_equal(descendants(G, 4), set([2, 3, 5, 6]))
assert_equal(descendants(G, 3), set())
assert_raises(nx.NetworkXError, descendants, G, 8)
def test_transitive_closure(self):
G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
transitive_closure = nx.algorithms.dag.transitive_closure
solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
assert_edges_equal(transitive_closure(G).edges(), solution)
G = nx.DiGraph([(1, 2), (2, 3), (2, 4)])
solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)]
assert_edges_equal(transitive_closure(G).edges(), solution)
G = nx.Graph([(1, 2), (2, 3), (3, 4)])
assert_raises(nx.NetworkXNotImplemented, transitive_closure, G)
def _check_antichains(self, solution, result):
sol = [frozenset(a) for a in solution]
res = [frozenset(a) for a in result]
assert_true(set(sol) == set(res))
def test_antichains(self):
antichains = nx.algorithms.dag.antichains
G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
solution = [[], [4], [3], [2], [1]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)])
solution = [[], [4], [7], [7, 4], [6], [6, 4], [6, 7], [6, 7, 4],
[5], [5, 4], [3], [3, 4], [2], [1]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph([(1, 2), (1, 3), (3, 4), (3, 5), (5, 6)])
solution = [[], [6], [5], [4], [4, 6], [4, 5], [3], [2], [2, 6],
[2, 5], [2, 4], [2, 4, 6], [2, 4, 5], [2, 3], [1]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph({0: [1, 2], 1: [4], 2: [3], 3: [4]})
solution = [[], [4], [3], [2], [1], [1, 3], [1, 2], [0]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph()
self._check_antichains(list(antichains(G)), [[]])
G = nx.DiGraph()
G.add_nodes_from([0, 1, 2])
solution = [[], [0], [1], [1, 0], [2], [2, 0], [2, 1], [2, 1, 0]]
self._check_antichains(list(antichains(G)), solution)
f = lambda x: list(antichains(x))
G = nx.Graph([(1, 2), (2, 3), (3, 4)])
assert_raises(nx.NetworkXNotImplemented, f, G)
G = nx.DiGraph([(1, 2), (2, 3), (3, 1)])
assert_raises(nx.NetworkXUnfeasible, f, G)
def test_lexicographical_topological_sort(self):
G = nx.DiGraph([(1,2), (2,3), (1,4), (1,5), (2,6)])
assert_equal(list(nx.lexicographical_topological_sort(G)),
[1, 2, 3, 4, 5, 6])
assert_equal(list(nx.lexicographical_topological_sort(
G, key=lambda x: x)),
[1, 2, 3, 4, 5, 6])
assert_equal(list(nx.lexicographical_topological_sort(
G, key=lambda x: -x)),
[1, 5, 4, 2, 6, 3])
def test_is_aperiodic_cycle():
G = nx.DiGraph()
nx.add_cycle(G, [1, 2, 3, 4])
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_cycle2():
G = nx.DiGraph()
nx.add_cycle(G, [1, 2, 3, 4])
nx.add_cycle(G, [3, 4, 5, 6, 7])
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_cycle3():
G = nx.DiGraph()
nx.add_cycle(G, [1, 2, 3, 4])
nx.add_cycle(G, [3, 4, 5, 6])
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_cycle4():
G = nx.DiGraph()
nx.add_cycle(G, [1, 2, 3, 4])
G.add_edge(1, 3)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_selfloop():
G = nx.DiGraph()
nx.add_cycle(G, [1, 2, 3, 4])
G.add_edge(1, 1)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_raise():
G = nx.Graph()
assert_raises(nx.NetworkXError,
nx.is_aperiodic,
G)
def test_is_aperiodic_bipartite():
# Bipartite graph
G = nx.DiGraph(nx.davis_southern_women_graph())
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_rary_tree():
G = nx.full_rary_tree(3, 27, create_using=nx.DiGraph())
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_disconnected():
# disconnected graph
G = nx.DiGraph()
nx.add_cycle(G, [1, 2, 3, 4])
nx.add_cycle(G, [5, 6, 7, 8])
assert_false(nx.is_aperiodic(G))
G.add_edge(1, 3)
G.add_edge(5, 7)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_disconnected2():
G = nx.DiGraph()
nx.add_cycle(G, [0, 1, 2])
G.add_edge(3, 3)
assert_false(nx.is_aperiodic(G))
| bsd-3-clause | 2,591,598,625,069,971,500 | 34.66573 | 79 | 0.535087 | false |
maqnius/compscie-mc | particlesim/api.py | 1 | 14109 | # particlesim
# Copyright (C) 2017 Mark Niehues, Stefaan Hessmann, Jaap Pedersen,
# Simon Treu, Hanna Wulkow, Thomas Hadler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#
from .total_potential import *
class SystemConfiguration(object):
r"""
Parameters
----------
xyz : ndarray(n,3), float
Position of n particles in x,y,z coordinates.
sigmas : ndarray(n) or float value
Sigma coefficient of lennard jones potential for each particle;
if not array but float value, assigned to all particles.
Default = 1.0 --> assigned to all particles
epsilons : ndarray(n) or float value
Epsilon coefficient of lennard jones potential for each particle;
if not array but float value, assigned to all particles.
Default = 1.0 --> assigned to all particles
charges : ndarray(n) or float value
Charges coefficient of lennard jones potential for each particle;
if not array but float value, assigned to all particles.
Default = 0.0 --> assigned to all particles
box_size : float
Boxsize for cubic simulation box; positive number.
Default = 1.0
epsilon_r : float,
Relative permittivity constant of system.
Default = 1.0 --> for vacuum by definition
labels : array-like of string
Additional information about the particles.
p_error : int
Max. error for the total ewald summation.
Error = e^-p_error
Default = 10
r_cutoff : float
Cutoff-radius for shortrange Ewald summation
Default = None --> Optimal cutoff is calculated automatically
k_cutoff : float
Cutoff-radius for longrange Ewald summation in reciprocal space.
Dafault = None --> Optimal cutoff is calculated automatically
neighbouring : bool
True: Use neighbouring list for calculation of shortrange energies.
False: Calculate neighbouring with fast_distances function in cython.
Notes
-----
arithmetic mean for sigma and geometric mean for epsilon
arithmetic = (a+b)/2; geometric : sqrt(a*b)
Lorentz Berthelot Rule
lj_cutoff = 2.5 * sigma
"""
def __init__(self, xyz, sigmas= 1.0, epsilons = 1.0, charges=0.0, box_size=12.0, epsilon_r=1.0, labels = [],
p_error=10, r_cutoff = None, k_cutoff = None, neighbouring = False):
if not np.all((xyz>=0)*(xyz<box_size)):
raise ValueError("xyz must be in range of zero to %d" %box_size)
if isinstance(sigmas, (float,int)):
sigmas = np.asarray([float(sigmas)] * len(xyz))
elif not len(xyz) == len(sigmas):
raise TypeError('sigmas must have the same length as particle numbers')
if isinstance(epsilons, (float,int)):
epsilons = np.asarray([float(epsilons)] * len(xyz))
elif not len(xyz) == len(epsilons):
raise TypeError('epsilons must have the same length as particle numbers')
if isinstance(charges, (float,int)):
charges = np.asarray([float(charges)] * len(xyz))
elif not len(xyz) == len(charges):
raise TypeError('charges must have the same length as particle numbers')
self.box_size = box_size
self._volume = box_size ** 3
self.epsilon_r = epsilon_r
self.xyz = xyz * 1.
self.charges = charges
self.sigmas = sigmas
self.epsilons = epsilons
self.labels = labels
self.r_cutoff = r_cutoff
self.k_cutoff = k_cutoff
self._create_lj_mean_parameters()
self._create_lennard_jones_cutoff()
self._neighbouring = neighbouring
self.p_error = p_error
self._total_potential = TotalPotential(self)
if self.box_size <= 2 * max(self.lj_cutoff_matrix.max(),self._total_potential.r_cutoff):
raise ValueError('Box_size to small. Box_size has to be twice the cutoff radius '
'of the Lennard Jones potential.\n'
'box_size = %f\n lj_max = %f, coulomb_cutoff(r_cutoff) = %f \n'
'set box_size to be larger than %f \n '
% (self.box_size, self.lj_cutoff_matrix.max(), self._total_potential.r_cutoff, 2 * max(self.lj_cutoff_matrix.max(),self._total_potential.r_cutoff))
)
@property
def p_error(self):
return self._p_error
@p_error.setter
def p_error(self, value):
if value <= 0:
raise ValueError('p_error must be bigger than zero')
self._p_error = value
@property
def neighbouring(self):
return self._neighbouring
@neighbouring.setter
def neighbouring(self, value):
if not isinstance(value,bool):
raise TypeError
self._total_potential.shortrange.neighbouring = value
self._neighbouring = value
pass
@property
def xyz(self):
return self._xyz
@xyz.setter
def xyz(self, value):
xyz = np.asarray(value,dtype=float)
if not (issubclass(xyz.dtype.type, np.float) or issubclass(xyz.dtype.type, np.integer)):
raise TypeError("values in xyz must be of type float or int")
if xyz.ndim != 2 or xyz.shape[0] < 2 or xyz.shape[1] != 3:
raise ValueError("xyz must be of shape=(n_particles, dim) with n_particles > 1 and dim = 3")
self._xyz = xyz
@property
def volume(self):
return self._volume
@property
def box_size(self):
return self._box_size
@box_size.setter
def box_size(self, value):
if not isinstance(value, (float, int)) or value <= 0.0:
raise ValueError("box_size must be a positive number or None")
self._box_size = float(value)
@property
def charges(self):
return self._charges
@charges.setter
def charges(self, value):
charges = np.asarray(value)
if not (issubclass(charges.dtype.type, np.float) or issubclass(charges.dtype.type, np.integer)):
raise TypeError("values of charges must be of type float or int")
if charges.ndim != 1:
raise ValueError("charges must be a 1 dim array")
self._charges = np.asarray(value,dtype=np.float)
@property
def sigmas(self):
return self._sigmas
@sigmas.setter
def sigmas(self, value):
sigmas = np.asarray(value, dtype=np.float)
if not np.all(sigmas >= 0):
raise ValueError("sigmas must be positive float")
self._sigmas = sigmas
@property
def epsilons(self):
return self._epsilons
@epsilons.setter
def epsilons(self, value):
epsilons = np.asarray(value,dtype=np.float)
if not np.all(epsilons >= 0):
raise ValueError("epsilons must be positive float")
self._epsilons = epsilons
def potential(self,xyz_trial, lennard_jones, coulomb):
if not (type(lennard_jones) == bool and type(coulomb == bool)):
raise TypeError('lennard_jones and coulomb must be booleans')
xyz_trial = np.asarray(xyz_trial,dtype=float)
if not (issubclass(xyz_trial.dtype.type, np.float) or issubclass(xyz_trial.dtype.type, np.integer)):
raise TypeError("values in xyz must be of type float or int")
if xyz_trial.ndim != 2 or xyz_trial.shape[0] < 2 or xyz_trial.shape[1] != 3:
raise ValueError("xyz must be of shape=(n_particles, dim) with n_particles > 1 and dim = 3")
return self._total_potential.potential(xyz_trial, lennard_jones, coulomb)
def _create_lj_mean_parameters(self):
self._create_lennard_jones_epsilons()
self._create_lennard_jones_sigmas()
def _create_lennard_jones_epsilons(self):
self.lj_epsilon_matrix = np.sqrt(np.array([self.epsilons]).transpose()*np.array([self.epsilons]))
def _create_lennard_jones_sigmas(self):
self.lj_sigma_matrix = (np.array([self.sigmas]).transpose() + np.array([self.sigmas]))/2
def _create_lennard_jones_cutoff(self):
self.lj_cutoff_matrix = 2.5 * self.lj_sigma_matrix
class Sampler(object):
r"""A sampler class for system configuration.
Parameters
----------
system_configuration : :obj:
Instance of an SystemConfiguration Object that holds essential parameters
previously set by the user.
"""
def __init__(self, system_configuration, lennard_jones=True, coulomb=True):
if len(system_configuration.xyz) == 0:
raise ValueError("no particle in system configuration")
self.system_configuration = system_configuration
self.lennard_jones = lennard_jones
self.coulomb = coulomb
def _update(self, xyz, pot, step, beta):
xyz_trial = (xyz + 2.0 * self.system_configuration.box_size * step
* (np.random.rand(*xyz.shape)- 0.5))%self.system_configuration.box_size
pot_trial = self.system_configuration.potential(xyz_trial, lennard_jones=self.lennard_jones, coulomb=self.coulomb)
if pot_trial <= pot or np.random.rand() < np.exp(beta * (pot - pot_trial)):
return xyz_trial, pot_trial
return xyz, pot
def metropolis(self, iteration_number, step=0.1, beta=1.0):
r"""
Perform a Metropolis MC sampling procedure.
Parameters
----------
system_configuration : object
Encapsulates the system's positions and params, box_size and
a function to compute potential energies.
iteration_number : int
Number of Metropolis update steps.
step : float, optional, default=0.1
Maximal size of an update move in each coordinate.
beta : float, optional, default=1.0
Inverse temperature factor (1/kT).
Returns
-------
numpy.ndarray of float
Configuration trajectory.
numpy.ndarray of float
Total interaction and external potential trajectory.
"""
# check input data
if not isinstance(iteration_number,int) or iteration_number <= 0:
raise ValueError("To sample you need at least one iteration step...\n"
"iteration_numer has to be a positive integer")
if not isinstance(step,(float,int)) or step <= 0:
raise ValueError("stepsize has to be a postive number")
if not isinstance(beta,(float,int)) or beta <= 0:
raise ValueError("beta has to be a postive number")
# create copy of instance and work with copy, so initial configuration is unchanged
xyz_traj = [self.system_configuration.xyz]
pot_traj = [self.system_configuration.potential(self.system_configuration.xyz, lennard_jones=self.lennard_jones,
coulomb=self.coulomb)]
# perform metropolis
for i in range(iteration_number):
xyz, pot = self._update(
xyz_traj[-1]
, pot_traj[-1],
step=step, beta=beta)
xyz_traj.append(xyz)
pot_traj.append(pot)
return np.asarray(xyz_traj, dtype=np.float64), np.asarray(pot_traj, dtype=np.float64)
def metropolis_sa(self, iteration_number, step=0.1, beta=1.0):
r"""
Perform a Metropolis-based simulated annealing procedure.
Parameters
----------
self : object,
Encapsulates the system's positions and params, box_size and
a function to compute potential energies.
iteration_number : int
Number of Metropolis update steps.
step : float, optional, default=0.1
Maximal size of an update move in each coordinate.
beta : float, optional, default=1.0
Initial inverse temperature factor (1/kT).
Returns
-------
numpy.ndarray of float
Configuration trajectory.
numpy.ndarray of float
Total interaction and external potential trajectory.
"""
if isinstance(beta, (float, int)):
# beta determines maximum
beta_values = 1.0 / np.linspace(1.0E-15, 1.0 / beta, iteration_number)[::-1]
else:
try:
if len(beta) == iteration_number:
# Accept beta values
beta_values = beta
elif len(beta) == 2:
# beta contains min and max value for beta
beta_values = 1.0 / np.linspace(1.0 / beta[1], 1.0 / beta[0], iteration_number)[::-1]
else:
raise ValueError(
"beta must be float|int, touple with len 2 or touple with len equal to iteration number")
except TypeError:
print("beta must be float|int, touple with len 2 or touple with len equal to iteration number")
exit(1)
xyz_traj = [self.system_configuration.xyz]
pot_traj = [self.system_configuration.potential(self.system_configuration.xyz, lennard_jones=self.lennard_jones,
coulomb=self.coulomb)]
for i in range(iteration_number):
xyz, pot = self._update(xyz_traj[-1], pot_traj[-1],
step=step, beta=beta_values[i])
xyz_traj.append(xyz)
pot_traj.append(pot)
return np.asarray(xyz_traj, dtype=np.float64), np.asarray(pot_traj, dtype=np.float64)
| gpl-3.0 | 3,311,756,994,876,722,000 | 38.743662 | 176 | 0.607626 | false |
neothemachine/crowfood | crowfood/cli.py | 1 | 5675 | from __future__ import absolute_import, print_function
import sys
import os
import argparse
from warnings import warn
import crowfood.engine
from crowfood.utils import is_subdir
description = '''
See sfood for output format.
'''
def getParser():
parser = argparse.ArgumentParser(prog='cfood',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('path', help='file or directory to scan (directory becomes a hierarchy root)',
nargs='+',
)
parser.add_argument('--quotetypes', help=
'Select for parsing the files included by strip quotes or angle brackets:\n'
'both - the default, parse all headers\n'
'angle - include only "system" headers included by anglebrackets (<>)\n'
'quote - include only "user" headers included by strip quotes ("")',
default='both', choices=['both', 'angle', 'quote'])
parser.add_argument('--ext', help='an additional extension for files to be scanned\n'
'default: c, h, cc, cpp, cxx, hpp, hxx',
action='append', default=[], dest='additional_exts',
)
parser.add_argument('--merge', help='file - the default, treats each file as separate\n'
'module - merges .c/.cc/.cpp/.cxx and .h/.hpp/.hxx pairs',
default='file', choices=['file', 'module'])
parser.add_argument('-i','--ignore', help='directory to ignore',
dest='ignore_paths', metavar='IGNORE',
action='append', default=[],
)
parser.add_argument('-I','--include', help=
'additional include search path (for external dependencies\n'
'or when directory to scan does not correspond to #include path)',
dest='include_paths', metavar='INCLUDE',
action='append', default=[],
)
parser.add_argument('--no-include-current', help=
'Do not search for includes in the folder of the '
'currently scanned file',
dest='no_include_current',
action='store_true',
)
parser.add_argument('--fuzzy', help=
'Try to locate all non-found includes by matching '
'with file name only. Note that this may lead to '
'wrong dependencies.',
dest='fuzzy',
action='store_true',
)
parser.add_argument('-E','--external-root', help=
'root directory to use for additional -I include paths for external dependencies'
'if not given, then the -I directories become the roots instead',
dest='external_roots', metavar='ROOT',
action='append', default=[],
)
parser.add_argument('--print-roots', help='Only print the roots, useful for testing',
dest='print_roots',
action='store_true',
)
parser.add_argument('-v', '--verbose', help='be more verbose',
dest='verbose',
action='store_true',
)
return parser
def parseargs(argv):
parser = getParser()
if not argv:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
for path in args.include_paths:
if not os.path.isdir(path):
parser.error('{} is not a directory'.format(path))
for path in args.ignore_paths:
if not os.path.isdir(path):
warn.warn('{} is not a directory'.format(path))
for path in args.path:
if not os.path.exists(path):
parser.error('{} does not exist'.format(path))
for ext_root in args.external_roots:
if not os.path.isdir(ext_root):
parser.error('{} is not a directory'.format(ext_root))
if not any(is_subdir(include_path, ext_root) for include_path in args.include_paths):
parser.error('The external root {} must have at least ' +
'one matching -I subdirectory'.format(ext_root))
args.include_paths = list(map(os.path.abspath, args.include_paths))
args.external_roots = list(map(os.path.abspath, args.external_roots))
args.ignore_paths = list(map(os.path.abspath, args.ignore_paths))
args.path = list(map(os.path.abspath, args.path))
return args
def main():
args = parseargs(sys.argv[1:])
if args.print_roots:
input_roots, input_include_paths, external_roots, external_include_paths =\
crowfood.engine.get_roots_and_include_paths(args)
print('input roots:')
print(input_roots)
print('input roots search paths:')
print(list(input_include_paths.values()))
print('external roots:')
print(external_roots)
print('external roots search paths:')
print(list(external_include_paths.values()))
sys.exit()
deps = crowfood.engine.run(args)
for dep in deps:
print(dep)
if __name__ == '__main__':
main()
| mit | -2,339,957,922,401,396,000 | 38.971831 | 105 | 0.52141 | false |
jirenz/CS229_Project | hearthbreaker/replay.py | 1 | 21593 | import re
import json
import hearthbreaker
from hearthbreaker.cards.heroes import hero_from_name
import hearthbreaker.constants
from hearthbreaker.engine import Game, card_lookup, Deck
import hearthbreaker.game_objects
import hearthbreaker.cards
import hearthbreaker.proxies
from hearthbreaker.serialization.move import Move, AttackMove, PowerMove, TurnEndMove, \
TurnStartMove, ConcedeMove, PlayMove, GameEndMove
from pprint import pprint
__doc__ = """
Responsible for reading and writing replays in either the compact or complete replay format (see the `replay format
<https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_ for details).
Recording a game
~~~~~~~~~~~~~~~~
Recording a game is a matter of creating a game, calling :meth:record on that game, playing the game, and then saving
the replay. For example: ::
game = create_a_game() # Create a game somehow
replay = record(game) # Create a replay that will track the game's moves
game.start() # Play the game
replay.write_json("my_replay.hsreplay") # Save the replay to a file
Playing back a game
~~~~~~~~~~~~~~~~~~~
Playing back a game is a matter of loading the replay, getting a game for playing it back, and then starting the game
For example: ::
replay = Replay() # create a new replay object
replay.read_json("my_replay.hsreplay") # load the replay (this can be combined with the previous line)
game = playback(replay) # create a game associated with the replay
game.start() # play the recorded game
"""
class Replay:
"""
Encapsulates the data stored in a replay, along with functions to read and write replays. The data
stored in this class can be used for either recording or playing back replays.
"""
def __init__(self, filename=None):
"""
Create a new Replay. This replay can be used for recording or playing back a game.
If the `filename` string is present, then this will also load the file located at `filename` for playback
:param string filename: A string representing a filename for a replay file to load or None (the default).
If present, it will load the selected replay and prepare it for playback.
The replay file must be in the complete format
"""
self._moves = []
self.__next_target = None
self.__next_index = -1
self.decks = []
self.keeps = []
self.random = []
schema_file = open("replay.schema.json", "r")
self.schema = json.load(schema_file)
schema_file.close()
if filename is not None:
self.read_json(filename)
def _save_decks(self, deck1, deck2):
"""
Save the decks specified by the parameters
:param hearthbreaker.game_objects.Deck deck1: The deck for player 1
:param hearthbreaker.game_objects.Deck deck2: The deck for player 2
"""
self.decks = [deck1, deck2]
def _record_random(self, result):
"""
Record a random number that has been generated by the system.
This random number will be added to the header if the game hasn't started, or top the most recent
move if it has.
"""
if len(self._moves) > 0:
if self._moves[-1].__class__.__name__ != 'GameEndMove':
self._moves[-1].random_numbers.append(result)
else:
self._moves[-2].random_numbers.append(result)
else:
self.random.append(result)
def _record_card_played(self, card, index):
"""
Record that a card has been played. This will add a new PlayMove to the moves array
"""
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(index), target=card.target))
if self.__next_index >= 0:
self._moves[-1].index = self.__next_index
self.__next_index = -1
def _record_option_chosen(self, option):
"""
Record that an option was chosen. This will update whichever is the most recent move
"""
self._moves[-1].card.set_option(option)
def _record_attack(self, attacker, target):
"""
Record that an attack occurred. This will create a new AttackMove in the moves array
"""
self._moves.append(AttackMove(attacker, target))
self.__next_target = None
def _record_power(self):
"""
Record that the current played used their hero power
"""
self._moves.append(PowerMove(self.__next_target))
self.__next_target = None
def _record_target(self, target):
"""
Record that a target was chosen. This affects PlayMoves and PowerMoves. AttackMoves have
their target passed in as an argument
"""
self.__next_target = target
def _record_index(self, index):
"""
Records the index that a minion is played at. Will update the most recent move with this index
"""
self.__next_index = index
def _record_kept_index(self, cards, card_index):
"""
Records the index of the cards that a player kept.
"""
k_arr = []
for index in range(0, len(cards)):
if card_index[index]:
k_arr.append(index)
self.keeps.append(k_arr)
def _record_game_end(self, winner):
"""
Record the end of the game
"""
self._moves.append(GameEndMove(winner))
def __shorten_deck(self, cards):
"""
Mostly for testing, this function will check if the deck is made up of a repeating pattern and if so, shorten
the output, since the parser will generate the pattern from a shorter sample
:param cards: The deck of cards to replace
:return: an array of cards that represents the deck if repeated until 30 cards are found
"""
for pattern_length in range(1, 15):
matched = True
for index in range(pattern_length, 30):
if not isinstance(cards[index % pattern_length], type(cards[index])):
matched = False
break
if matched:
return cards[0:pattern_length]
return cards
def write(self, file):
"""
Write a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
was_filename = False
writer = file
for deck in self.decks:
writer.write("deck(")
writer.write(deck.hero.short_name)
writer.write(",")
writer.write(",".join([card.name for card in self.__shorten_deck(deck.cards)]))
writer.write(")\n")
found_random = False
if self.random.count(0) == len(self.random):
for move in self._moves:
if move.random_numbers.count(0) != len(move.random_numbers):
found_random = True
break
else:
found_random = True
if not found_random:
writer.write("random()\n")
else:
writer.write("random(")
writer.write(",".join([str(num) for num in self.random]))
writer.write(")\n")
for keep in self.keeps:
writer.write("keep(")
writer.write(",".join([str(k) for k in keep]))
writer.write(")\n")
for move in self._moves:
writer.write(move.to_output_string() + "\n")
if len(move.random_numbers) > 0:
writer.write("random(")
writer.write(",".join([str(num) for num in move.random_numbers]))
writer.write(")\n")
if was_filename:
writer.close()
def write_json(self, file):
"""
Write a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file should be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
writer = file
header_cards = [{"cards": [card.name for card in self.__shorten_deck(deck.cards)],
"hero": deck.hero.short_name} for deck in self.decks]
header = {
'decks': header_cards,
'keep': self.keeps,
'random': self.random,
}
json.dump({'header': header, 'moves': self._moves}, writer, default=lambda o: o.__to_json__(), indent=2,
sort_keys=True)
if was_filename:
writer.close()
def read_json(self, file):
"""
Read a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
from jsonschema import validate
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
jd = json.load(file)
validate(jd, self.schema)
self.decks = []
for deck in jd['header']['decks']:
deck_size = len(deck['cards'])
cards = [card_lookup(deck['cards'][index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(deck['hero'])))
self.random = jd['header']['random']
self.keeps = jd['header']['keep']
if len(self.keeps) == 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
self._moves = [Move.from_json(**js) for js in jd['moves']]
if was_filename:
file.close()
def read(self, file):
"""
Read a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
line_pattern = re.compile("\s*(\w*)\s*\(([^)]*)\)\s*(;.*)?$")
for line in file:
(move, args) = line_pattern.match(line).group(1, 2)
args = [arg.strip() for arg in args.split(",")]
if move == 'play':
card = args[0]
if len(args) > 1:
target = args[1]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), target=target))
elif move == 'summon':
card = args[0]
index = int(args[1])
if len(args) > 2:
target = args[2]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), index, target))
elif move == 'attack':
self._moves.append(AttackMove(args[0], args[1]))
elif move == 'power':
if len(args) > 0 and args[0] != '':
self._moves.append(PowerMove(args[0]))
else:
self._moves.append(PowerMove())
elif move == 'end':
self._moves.append(TurnEndMove())
elif move == 'start':
self._moves.append(TurnStartMove())
elif move == 'random':
if len(self._moves) == 0:
if len(args[0]) > 0:
for num in args:
self.random.append(int(num))
else:
for num in args:
if num.isdigit():
self._moves[-1].random_numbers.append(int(num))
else:
self._moves[-1].random_numbers.append(hearthbreaker.proxies.ProxyCharacter(num))
elif move == 'deck':
if len(self.decks) > 1:
raise Exception("Maximum of two decks per file")
deck_size = len(args) - 1
cards = [card_lookup(args[1 + index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(args[0])))
elif move == 'keep':
if len(self.keeps) > 1:
raise Exception("Maximum of two keep directives per file")
self.keeps.append([int(a) for a in args])
elif move == 'concede':
self._moves.append(ConcedeMove())
elif move == 'game_end':
pass # currently we are not putting in game end because it will end anyways
if was_filename:
file.close()
if len(self.keeps) is 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
def record(game):
"""
Ready a game for recording. This function must be called before the game is played.
Several methods of the game and its agents are modified. These modifications will not affect the operation
of the game or its agents, although any further modifications to these methods will not be recorded.
:param game: A game which has not been started
:type game: :class:`Game <hearthbreaker.game_objects.Game>`
:return: A replay that will track the actions of the game as it is played. Once the game is complete,
this replay can be written to a file to remember the state of this game.
:rtype: :class:`Replay`
"""
class RecordingAgent:
__slots__ = ['agent']
def __init__(self, proxied_agent):
object.__setattr__(self, "agent", proxied_agent)
def choose_index(self, card, player):
index = self.agent.choose_index(card, player)
replay._record_index(index)
return index
def choose_target(self, targets):
target = self.agent.choose_target(targets)
replay._record_target(target)
return target
def choose_option(self, options, player):
option = self.agent.choose_option(options, player)
replay._record_option_chosen(options.index(option))
return option
def __getattr__(self, item):
return self.agent.__getattribute__(item)
def __setattr__(self, key, value):
setattr(self.__getattribute__("agent"), key, value)
replay = hearthbreaker.replay.Replay()
replay.random.append(game.first_player)
game.players[0].agent = RecordingAgent(game.players[0].agent)
game.players[1].agent = RecordingAgent(game.players[1].agent)
if game.first_player == 0:
replay._save_decks(game.players[0].deck, game.players[1].deck)
else:
replay._save_decks(game.players[1].deck, game.players[0].deck)
game.bind("kept_cards", replay._record_kept_index)
game.bind("game_ended", replay._record_game_end)
for player in game.players:
player.bind("used_power", replay._record_power)
player.hero.bind("found_power_target", replay._record_target)
player.bind("card_played", replay._record_card_played)
player.bind("character_attack", replay._record_attack)
_old_random_choice = game.random_choice
_old_generate_random_between = game._generate_random_between
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
def random_choice(choice):
result = _old_random_choice(choice)
if isinstance(result, hearthbreaker.game_objects.Character):
replay._moves[-1].random_numbers[-1] = hearthbreaker.proxies.ProxyCharacter(result)
return result
def _generate_random_between(lowest, highest):
result = _old_generate_random_between(lowest, highest)
replay._record_random(result)
return result
def _end_turn():
replay._moves.append(TurnEndMove())
_old_end_turn()
def _start_turn():
replay._moves.append(TurnStartMove())
_old_start_turn()
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
return replay
def playback(replay):
"""
Create a game which can be replayed back out of a replay.
:param replay: The replay to load the game out of
:type replay: :class:`Replay`
:return: A game which when played will perform all of the actions in the replay.
:rtype: :class:`Game <hearthbreaker.game_objects.Game>`
"""
move_index = -1
k_index = 0
random_index = 0
game = None
class ReplayAgent:
def __init__(self):
self.next_target = None
self.next_index = -1
self.next_option = None
def do_card_check(self, cards):
nonlocal k_index
keep_arr = [False] * len(cards)
for index in replay.keeps[k_index]:
keep_arr[int(index)] = True
k_index += 1
return keep_arr
def do_turn(self, player):
nonlocal move_index, random_index
while move_index < len(replay._moves) and not player.hero.dead and type(
replay._moves[move_index]) is not hearthbreaker.serialization.move.TurnEndMove:
random_index = 0
print(replay._moves[move_index].to_output_string())
replay._moves[move_index].play(game)
move_index += 1
if move_index == len(replay._moves):
player.game.game_ended = True
def set_game(self, game):
pass
def choose_target(self, targets):
return self.next_target
def choose_index(self, card, player):
return self.next_index
def choose_option(self, options, player):
return options[self.next_option]
game = Game.__new__(Game)
_old_random_choice = game.random_choice
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
_old_pre_game = game.pre_game
def _generate_random_between(lowest, highest):
nonlocal random_index
if len(replay.random) == 0:
return 0
else:
random_index += 1
if move_index == -1:
return replay.random[random_index - 1]
return replay._moves[move_index].random_numbers[random_index - 1]
def random_choice(choice):
nonlocal move_index, random_index
if isinstance(replay._moves[move_index].random_numbers[random_index], hearthbreaker.proxies.ProxyCharacter):
result = replay._moves[move_index].random_numbers[random_index].resolve(game)
random_index += 1
return result
return _old_random_choice(choice)
def _start_turn():
nonlocal move_index, random_index
random_index = 0
_old_start_turn()
move_index += 1
def _end_turn():
nonlocal move_index, random_index
random_index = 0
_old_end_turn()
move_index += 1
def pre_game():
nonlocal move_index
_old_pre_game()
move_index = 0
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
game.pre_game = pre_game
game.__init__(replay.decks, [ReplayAgent(), ReplayAgent()])
return game
| mit | -7,057,704,691,699,661,000 | 37.150177 | 118 | 0.574538 | false |
Iconoclasteinc/tgit | test/ui/file_dialogs/test_file_dialog.py | 1 | 1231 | # -*- coding: utf-8 -*-
import pytest
from PyQt5.QtWidgets import QFileDialog
from hamcrest import ends_with, assert_that, equal_to
from cute.widgets import QFileDialogDriver, window
from test.ui import show_, close_
from tgit.ui import locations
from tgit.ui.dialogs.file_dialogs import make_file_dialog, name_filter
pytestmark = pytest.mark.ui
@pytest.yield_fixture()
def driver(prober, automaton):
dialog_driver = QFileDialogDriver(window(QFileDialog), prober, automaton)
yield dialog_driver
close_(dialog_driver)
def show_dialog(name_filters="", file_mode=QFileDialog.ExistingFile, directory="", parent=None):
dialog = make_file_dialog(name_filters, file_mode, directory, parent, False)
show_(dialog)
return dialog
def test_shows_name_filters(driver):
_ = show_dialog("PNG Images (*.png)")
driver.filter_files_of_type("PNG Images (*.png)")
def test_initially_starts_in_directory(driver):
_ = show_dialog(directory=locations.Documents)
driver.has_current_directory(ends_with("Documents"))
def test_builds_name_filters():
assert_that(name_filter(["type1", "type2"], "caption"), equal_to("caption (*.type1 *.type2)"), "The name filters") | gpl-3.0 | -7,738,104,564,986,402,000 | 30.447368 | 118 | 0.70593 | false |
Skeletrox/usb-backend-pinut | file_upload/fileupload/views.py | 1 | 15896 | import json, os, subprocess, getpass, shutil
import logging
from .USBFinder import attemptMount,transfer_file, get_usb_name
from hashlib import sha1
from django.http import HttpResponse,HttpResponseRedirect, JsonResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.template import Context, loader
from django.shortcuts import render,get_object_or_404
from django.views.generic import CreateView, DeleteView, ListView
from .models import EkFile, Content
from django.contrib.auth.models import User
from .response import JSONResponse, response_mimetype
from .serialize import serialize
from django.urls import reverse
from .extract import extractit
from .deleteExtract import deleteit
from distutils.dir_util import copy_tree
from django.conf import settings
staticFileLocRoot = None
old_files = []
files = []
total_amount = 0
total_done = 0
count = 0
is_auth = True
optional_flag = False
percentage_done = 0
perm_dict = None
user = None
telemetry = None
local_files = []
allowed_exts = settings.ACCEPTED_EXTNS
class User_Permissions:
def __init__(self, user):
self.permissions = user.permission.get_permissions()
def get_permissions(self):
return self.permissions
class NoFilesError(ValueError):
def __init__ (self, arg = None):
self.strerror = arg
self.args = {arg}
def user_logout(request):
logout(request)
return HttpResponseRedirect('../../upload/')
def index(request):
return render(request,'fileupload/LOGIN.html')
'''
<<<<<<< HEAD
=======
Dev's code that is not actually called in the program, can be ignored, kept for future references if needed
>>>>>>> refs/remotes/origin/master
@ensure_csrf_cookie
def upload(request):
if request.method=='POST':
instance=EkFile(file=request.FILES['files'])
obj=instance.save();
print (instance)
values=serialize(instance)
data={"files":values}
response=json.dumps(data)
print (response)
if instance.type_of_file=="ecar":
print instance.path_of_file
files=extractit(instance.path_of_file)
instance=Content(ekfile=instance,folder_file=files,json_file=files+".json")
instance.save()
return HttpResponse(response,content_type="application/json")
@ensure_csrf_cookie
def list_the_files(request):
values=[serialize(instance) for instance in EkFile.objects.all()]
data={"files":values}
response=json.dumps(data)
print (response)
return HttpResponse(response,content_type="application/json")
@ensure_csrf_cookie
def delete_files(request):
print ("Delete this file: "+request.POST['id'])
instance=EkFile.objects.get(id=request.POST['id'])
print (instance)
if instance.type_of_file=="ecar":
obj=Content.objects.get(ekfile=instance.id)
deleteit({'folder_file':obj.folder_file,'json_file':obj.json_file})
obj.delete()
instance.delete()
return HttpResponse(json.dumps({"id":4}),content_type="application/json")
<<<<<<< HEAD
'''
def verify(request, optional=False):
flag='INIT'
global optional_flag
optional_flag = False
global is_auth, user, password, telemetry
if optional:
optional_flag = True
return HttpResponseRedirect('../new')
try:
user=User.objects.get(username=request.POST['email'])
logger = logging.getLogger(__name__)
password=request.POST.get('password', '')
#_,salt,hashpw=user.password.split('$')
logger.error(request.POST.get('email', '')+","+request.POST.get('password', '')+" \n")
logger.error(user.password+", username is "+user.username)
flag='REAL'
except User.DoesNotExist:
flag = 'FAKE'
if(flag == 'REAL' and user.check_password(password)):
global perm_dict
perm_dict = User_Permissions(user)
is_auth = True
############################################################
# Load values from res.json file #
############################################################
staticFileLocRoot = settings.MEDIA_ROOT
telemetry = settings.TELEMETRY
return HttpResponseRedirect('new/')
else:
return render(request,'fileupload/LOGIN.html',{'invalid':'not a valid username or password',})
#=======
config_json_dir = settings.CONFIG_JSON_DIR
class EkFileCreateView(CreateView):
model = EkFile
fields = "__all__"
def form_valid(self, form):
self.object = form.save()
print "self Object: "
print unicode(self.object)
self.object.file_upload = self.object.slug
files = [serialize(self.object)]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
print 'Before you send post request'
print self.object.path_of_file
print '-'*10 + 'WE GON EXTRACT IT YO' + '-'*10
#print self.object.slug
if(self.object.path_of_file.endswith(".json")):
if not os.path.exists(config_json_dir):
os.makedirs(config_json_dir)
shutil.copy2(self.object.path_of_file, config_json_dir)
else:
if(settings.ACTIVE_PROFILE == "ekstep"):
files = extractit(self.object.path_of_file)
for f in files:
obj=Content(ekfile=self.object,filename=f)
obj.save()
return response
def form_invalid(self, form):
data = json.dumps(form.errors)
print data + ' omg fail '
return HttpResponse(content=data, status=400, content_type='application/json')
class EkFileDeleteView(DeleteView):
model = EkFile
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
print 'Attempting to delete ' + unicode(self.object)
if(self.object.path_of_file.endswith(".json")):
json_file = unicode(self.object.file_upload)
file_name = config_json_dir+json_file
os.remove(file_name)
else:
if(settings.ACTIVE_PROFILE == "ekstep"):
files = Content.objects.filter(ekfile = self.object.id)
filename = []
for f in files:
filename.append(f.filename)
f.delete()
deleteit(filename)
self.object.delete()
response = JSONResponse(True, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class EkFileListView(ListView):
model = EkFile
def render_to_response(self, context, **response_kwargs):
files = [ serialize(p) for p in self.get_queryset() ]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def verify_USB(request):
value = attemptMount()
response_data = 'disabled'
if value is not None:
response_data = 'active '
return JsonResponse({'data':response_data})
def serve_extensions(requests):
global allowed_exts
return JSONResponse({"exts":allowed_exts})
def download_to_USBx(request):
usb_name = get_usb_name()
if usb_name is not None:
local_files_dir = '/' + getpass.getuser() + '/FILES/'
if os.geteuid() != 0: #If not root, user location is /home/user/files
local_files_dir = '/home/' + getpass.getuser() + '/FILES/'
print local_files_dir
local_files = []
for root, folders, files in os.walk(local_files_dir):
for file in files:
if (not os.path.isdir(file)) and file.endswith(".json"):
local_files.append(os.path.join(root, file))
print local_files
actual_index = local_files[0].split('/').index('FILES') + 1
for file in local_files:
os.chdir('/media/' + getpass.getuser() + '/' + usb_name)
split_list = file.split('/')
for i in range (actual_index, len(split_list) - 1):
if not os.path.exists(split_list[i]):
os.makedirs(split_list[i])
os.chdir(split_list[i])
command = 'cp "' + file + '" "' + os.getcwd() + '"'
t = subprocess.Popen(command, shell=True)
t.communicate()[0]
result = t.returncode
if result != 0:
return JsonResponse ({'res': 'Copy aborted! [USB Unplugged/Insufficient Space?]'})
return JsonResponse({'res': 'Copy successful'})
return JsonResponse({'res':'Reinsert USB'})
'''
def download_to_USB(request):
print request.method
usb_name = get_usb_name()
val = request.POST.get("counter", None)
print "HAI " + str(val)
if val is None:
return HttpResponseRedirect('/upload/new/')
if val == 'INIT':
global local_files
if usb_name is None:
return HttpResponseRedirect('/upload/new/')
local_files = []
for root, folders, files in os.walk(telemetry):
for file in files:
if not os.path.isdir(file):
local_files.append(os.path.join(root, file))
return JsonResponse({'value': '-1', 'length' : str(len(local_files))})
else:
try:
current = int(val)
# global local_files
curr_file = local_files[current]
file_localized_name = curr_file[curr_file.find("telemetry") + len("telemetry/"):]
shutil.copy2(curr_file, usb_name + file_localized_name)
return JsonResponse({'value', str(current+1)})
except ValueError:
return JsonResponse({'res': 'Use an integer for manual inputs!'})
except IndexError:
return JsonResponse({'res': 'Files have been successfully copied!'})
except OSError:
return JsonResponse({'res': 'Copy error! USB unplugged/insufficient storage space?'})
'''
def split_dirs(text): #Splits the entire path to get the file name
splitty = text.split('/')
value = splitty[len(splitty) - 1]
return value
def transfer(request):
try:
if not is_auth:
return HttpResponse("Please access this URL properly")
elif request.method == 'GET' or request.method == 'POST':
global percentage_done
global total_amount, total_done, count, files, old_files
files_existing = []
if request.method == 'GET':
new_files = attemptMount()
if new_files is None:
print "new_files none"
return HttpResponseRedirect('../new')
old_files = [fModel.file_upload for fModel in EkFile.objects.all()]
files = [thing for thing in new_files if split_dirs(thing) not in old_files]
total_done = 0
total_amount = len(files)
fileCount = 0
else:
fileCount = request.POST.get("file_descriptor", "")
download_more = True
file_to_transfer = None
if len(files) > 0:
temp_value = 0
for file in files:
try:
#Runs each time. Can be optimized further to handle JSON requests and responses
value = split_dirs(file)
x = EkFile.objects.get(file_upload=str(value))
except EkFile.DoesNotExist:
file_size = os.stat(file).st_size
value = split_dirs(file)
fModel = EkFile(id = temp_value+1, file_upload = str(value))
temp_value += 1
if fModel not in files_existing:
files_existing.append(fModel)
try:
if len(files_existing) == 0:
raise NoFilesError('No Files')
file_to_transfer = files[int(fileCount)]
return_code = transfer_file(file_to_transfer)
if return_code != 0:
print 'USB unexpectedly removed!'
removeCorruptFile(file_to_transfer)
except NoFilesError as error:
global optional_flag #If a simple refresh occurs without a change in USB attached
if optional_flag:
return HttpResponseRedirect('../new')
template = loader.get_template('fileupload/downloadFiles.html')
total_files_in_db = EkFile.objects.all()
context = {
'files_existing' : None,
'show_output' : False,
'percentage_done' : 0,
'current_count' : 0,
'btn_check_flag' : 'disabled',
'download_more' : False,
}
return HttpResponse(template.render(context, request))
count += 1
total_done += 1
percentage_done = int(total_done*100/total_amount)
#Code below updates the file transferred list
if file_to_transfer is not None:
print "file_to_transfer " + file_to_transfer
value = split_dirs(file_to_transfer)
file_to_save = EkFile(id = count, file_upload = value)
file_to_save.save()
if(value.endswith(".json")):
if not os.path.exists(config_json_dir):
os.makedirs(config_json_dir)
shutil.copy2(file_to_save.path_of_file, config_json_dir)
else:
if(settings.ACTIVE_PROFILE == "ekstep"):
files2 = extractit(file_to_save.path_of_file)
for f in files2:
obj=Content(ekfile=file_to_save,filename=f)
obj.save()
print '[Z]Saved ' + value
#list_of_files.append(file_to_save)
#files.remove(file_to_transfer)
#=======
#extractit(file_to_save.path_of_file)
#Code above updates the file transferred list
if (total_done <= total_amount - 1 or len(files_existing) == 0):
#We still have files to download
template = loader.get_template('fileupload/downloadFiles.html')
context = {
'files_existing' : files_existing,
'show_output' : True,
'percentage_done' : percentage_done,
'current_count' : total_done,
'btn_check_flag' : 'disabled',
'download_more' : True,
}
return HttpResponse(template.render(context, request))
#Code below is for final condition
if total_done == total_amount and len(files_existing) > 0:
optional_flag = True #Any further refreshes will not attempt to show "no new files available"
download_more = None
return HttpResponseRedirect('../new')
#Code above is for final condition
return JsonResponse({'null':'null'}) #For testing only, report if thrown anytime!
except OSError:
return HttpResponseRedirect('../new/');
def removeCorruptFile(file):
global staticFileLocRoot
delete_from_db_file = EkFile.objects.get(split_dirs(file))
delete_from_db_file.delete()
sendString = "rm " + staticFileLocRoot + file
t = subprocess.Popen(sendString)
t.communicate()[0]
| apache-2.0 | 7,018,819,054,383,964,000 | 37.865526 | 109 | 0.57417 | false |
vodkina/GlobaLeaks | backend/globaleaks/models/validators.py | 1 | 5111 | # -*- coding: UTF-8
#
# validator
# *********
#
# Utilities to validate data recorded in the ORM
import re
from globaleaks import LANGUAGES_SUPPORTED_CODES
from globaleaks.settings import GLSettings
from globaleaks.rest import errors
from globaleaks.utils.utility import log
def natnum_v(self, attr, value):
"""
Validates that the passed value is a natural number (in Z+)
"""
if not isinstance(value, int) or value < 0:
raise errors.InvalidModelInput("natnum_v: expected val to be in Z+ (%s:%d)" % (attr, value))
return value
class range_v(object):
def __call__(self, model_obj, attr, value):
if not isinstance(value, int):
raise errors.InvalidModelInput("range_v: expected int (%s)" % attr)
if value < self.start or value > self.stop:
m = "range_v(%d, %d): value outside of acceptable range (%d)" % (self.start, self.stop, value)
raise errors.InvalidModelInput(m)
return value
def __init__(self, start, stop):
self.start = start
self.stop = stop
def shorttext_v(self, attr, value):
if isinstance(value, str):
value = unicode(value)
if not isinstance(value, unicode):
raise errors.InvalidModelInput("shorttext_v: expected unicode (%s:%s)" % (attr, value))
if GLSettings.enable_input_length_checks and len(value) > GLSettings.memory_copy.maximum_namesize:
raise errors.InvalidModelInput("shorttext_v: length need to be < of %d"
% GLSettings.memory_copy.maximum_namesize)
return value
def longtext_v(self, attr, value):
"""
"""
if not attr:
return value
if isinstance(value, str):
value = unicode(value)
if not isinstance(value, unicode):
raise errors.InvalidModelInput("longtext_v: expected unicode (%s:%s)" %
(attr, value))
if GLSettings.enable_input_length_checks and len(value) > GLSettings.memory_copy.maximum_textsize:
raise errors.InvalidModelInput("longtext_v: unicode text in %s " \
"overcomes length " \
"limit %d" % (attr, GLSettings.memory_copy.maximum_textsize))
return value
def dict_v(self, attr, value):
if not value:
return {}
if not isinstance(value, dict):
raise errors.InvalidModelInput("dict_v: expected dict (%s)" % attr)
for key, subvalue in value.iteritems():
if isinstance(subvalue, str):
subvalue = unicode(subvalue)
if isinstance(subvalue, unicode):
if GLSettings.enable_input_length_checks and len(subvalue) > GLSettings.memory_copy.maximum_textsize:
raise errors.InvalidModelInput("dict_v: text for key %s in %s " \
"overcomes length limit of %d" % (key, attr,
GLSettings.memory_copy.maximum_textsize))
if isinstance(subvalue, dict):
dict_v(self, attr, subvalue)
return value
def shortlocal_v(self, attr, value):
dict_v(None, attr, value)
if not value:
return value
# If a language does not exist, it does not mean that a malicious input have been provided,
# this condition in fact may happen when a language is removed from the package and
# so the proper way to handle it so is simply to log the issue and discard the input.
# https://github.com/globaleaks/GlobaLeaks/issues/879
remove = [lang for lang in value if lang not in LANGUAGES_SUPPORTED_CODES]
for k in remove:
try:
del value[unicode(k)]
except KeyError:
pass
log.debug("shortlocal_v: (%s) Invalid language code in %s, skipped" %
(k, attr))
for lang, text in value.iteritems():
shorttext_v(None, None, text)
return value
def longlocal_v(self, attr, value):
dict_v(None, attr, value)
if not value:
return value
# If a language does not exist, it does not mean that a malicious input have been provided,
# this condition in fact may happen when a language is removed from the package and
# so the proper way to handle it so is simply to log the issue and discard the input.
# https://github.com/globaleaks/GlobaLeaks/issues/879
remove = [lang for lang in value if lang not in LANGUAGES_SUPPORTED_CODES]
for k in remove:
try:
del value[unicode(k)]
except KeyError:
pass
log.debug("longlocal_v: (%s) Invalid language code in %s, skipped" %
(k, attr))
for lang, text in value.iteritems():
longtext_v(None, attr, text)
return value
def shorturl_v(self, attr, value):
if not re.match(r'^(/s/[a-z0-9]{1,30})$', value):
raise errors.InvalidModelInput("invalid shorturl")
return value
def longurl_v(self, attr, value):
if not re.match(r'^(/[a-z0-9#=_&?/-]{1,255})$', value):
raise errors.InvalidModelInput("invalid longurl")
return value
| agpl-3.0 | -8,436,190,481,681,134,000 | 31.974194 | 123 | 0.6126 | false |
vapor-ware/synse-server | synse_server/backoff.py | 1 | 1845 | """Retry backoff strategies."""
import random
import time
from typing import Union
__all__ = ['ExponentialBackoff']
class ExponentialBackoff:
"""An implementation of the exponential backoff strategy.
This is useful for getting an exponentially backed-off delay for
reconnection or retry actions.
Each call to ``delay`` will return the next exponentially backed-off
value, in seconds, to use for waiting. The backoff will continue for
each call, up to a maximum of 2^10 * base.
Args:
base: The base delay, in seconds. This is the starting point for
the returned exponentially backed off time values.
cap: The cap on the exponent, after which the backoff will not
grow exponentially. This is 9 by default (2^9 = 512 ~= 8.5 minutes)
"""
def __init__(self, base: int = 1, cap: int = 9) -> None:
self._base = base
self._exp = 0
self._max = cap
self._reset_time = base * 2 ** (self._max + 1)
self._last_invocation = time.monotonic()
self.rand = random.Random()
self.rand.seed()
def delay(self) -> Union[int, float]:
"""Get the next exponentially backed off time delay, in seconds.
The delay value is incremented exponentially with every call, up
to the defined max. If a period of time greater than 2^(max+1) * base
has passed, the backoff is reset.
Returns:
The time, in seconds, to be used as the next delay interval.
"""
invocation = time.monotonic()
interval = invocation - self._last_invocation
self._last_invocation = invocation
if interval > self._reset_time:
self._exp = 0
self._exp = min(self._exp + 1, self._max)
return self.rand.uniform(0, self._base * 2 ** self._exp)
| gpl-3.0 | 5,880,698,989,741,114,000 | 31.946429 | 79 | 0.625474 | false |
ecreall/lagendacommun | urlshortener/main.py | 1 | 5007 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# Source: https://github.com/narenaryan/Pyster
# licence: AGPL
# author: Amen Souissi
import sqlite3
import string
from flask import Flask, request, render_template, redirect, jsonify
from flask.ext.cors import CORS, cross_origin
from sqlite3 import OperationalError
from urllib.parse import urlparse
#host = 'http://localhost:5000/'
host = 'http://6li.eu/'
BASE = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
BASE.extend(list(string.ascii_lowercase))
BASE.extend(list(string.ascii_uppercase))
BASE_LEN = len(BASE)
#Assuming urls.db is in your app root folder
app = Flask(__name__)
cors = CORS(app, resources={r"/": {"origins": "*"}})
def get_base_next(char):
if char == '':
return False, '0'
char_index = BASE.index(char)
char_index += 1
return (False, BASE[char_index]) if \
char_index < BASE_LEN else (True, '0')
def next_id(id_=None):
new_id = id_
if id_ is None:
new_id = '0'
else:
index = -1
to_inc = new_id[index]
final, next = get_base_next(to_inc)
new_id = new_id[:index] + next
index -= 1
len_id = len(new_id)
while index+6 >= 0 and final:
if index+len_id >= 0:
to_inc = new_id[index]
final, next = get_base_next(to_inc)
new_id = new_id[:index] + next + new_id[index+1:]
else:
to_inc = ''
final, next = get_base_next(to_inc)
new_id = next + new_id[index+1:]
index -= 1
return new_id
def table_check():
create_table = """
CREATE TABLE WEB_URL(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
NUM TEXT NOT NULL UNIQUE,
URL TEXT NOT NULL UNIQUE
);
"""
with sqlite3.connect('var/urls.db') as conn:
cursor = conn.cursor()
try:
cursor.execute(create_table)
except OperationalError:
pass
@app.route('/', methods=['GET', 'POST'])
# @cross_origin(origin='localhost',headers=['Content- Type','Authorization'])
def home():
method = request.method
with sqlite3.connect('var/urls.db') as conn:
try:
cursor = conn.cursor()
rows_query = """
SELECT NUM, max(ID) FROM WEB_URL"""
result_cursor = cursor.execute(rows_query)
result_fetch = result_cursor.fetchone()
last_num = result_fetch[0]
number_of_rows = result_fetch[1]
number_of_rows = 0 if number_of_rows is None else number_of_rows
if method == 'POST':
original_url = request.form.get('url')
if original_url:
if urlparse(original_url).scheme == '':
original_url = 'http://' + original_url
exist_row = """
SELECT NUM FROM WEB_URL
WHERE URL='{url}'
""".format(url=original_url)
result_cursor = cursor.execute(exist_row)
result_fetch = result_cursor.fetchone()
if result_fetch:
new_num = result_fetch[0]
else:
new_num = next_id(last_num)
insert_row = """
INSERT INTO WEB_URL (URL, NUM)
VALUES ('{url}', '{num}')
""".format(url=original_url, num=new_num)
cursor.execute(insert_row)
number_of_rows += 1
encoded_string = new_num
return jsonify(**{'short_url': host + encoded_string,
'code': 'SUCCESS',
'original_url': original_url})
return render_template('home.html', number_of_rows=number_of_rows)
except Exception as error:
return jsonify(**{'code': 'ERROR',
'error': str(error),
'original_url': original_url
})
@app.route('/<short_url>')
def redirect_short_url(short_url):
decoded_string = short_url
with sqlite3.connect('var/urls.db') as conn:
cursor = conn.cursor()
select_row = """
SELECT URL FROM WEB_URL
WHERE NUM='{num}'
""".format(num=decoded_string)
result_cursor = cursor.execute(select_row)
try:
return redirect(result_cursor.fetchone()[0])
except Exception:
pass
return render_template(
'home.html',
error=True)
if __name__ == '__main__':
# This code checks whether database table is created or not
table_check()
# app.run(debug=True)
app.run(host='0.0.0.0')
| agpl-3.0 | 2,148,260,794,711,410,000 | 31.72549 | 78 | 0.505892 | false |
liverliu/netmusichacker | python/hacker/hacker.py | 1 | 7384 | import logging
import web
import config
import requests
import json
import hashlib
import random
logger = logging.getLogger('route')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S')
#console
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
#file
fh = logging.FileHandler('hacker.log')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
urls = (
'/.*', 'route',
'/api/.*', 'route',
'/eapi/.*', 'route',
)
valid_header = ['HTTP_ORIGIN', 'HTTP_COOKIE', 'HTTP_ACCEPT', 'HTTP_CONNECTION', 'HTTP_USER_AGENT',
'HTTP_ACCEPT_LANGUAGE', 'HTTP_ACCEPT_ENCODING', 'CONTENT_LENGTH', 'CONTENT_TYPE',
'HTTP_BATCH_METHOD', 'HTTP_REFERER']
new_header = {'HTTP_ORIGIN':'Origin', 'HTTP_COOKIE':'Cookie', 'HTTP_ACCEPT':'Accept',
'HTTP_CONNECTION':'Connection', 'HTTP_USER_AGENT':'User-Agent', 'HTTP_HOST':'Host',
'HTTP_ACCEPT_LANGUAGE':'Accept-Language', 'HTTP_ACCEPT_ENCODING':'Accept-Encoding',
'CONTENT_LENGTH':'Content-Length', 'CONTENT_TYPE':'Content-Type', 'HTTP_BATCH_METHOD':'Batch-Method',
'HTTP_REFERER':'Referer'}
response_headers = ['Content-Type', 'Connection', 'Pragrma', 'Cache-Control', 'Expires', 'Vary', 'Server', 'Date']
class MyApplication(web.application):
def run(self, host='127.0.0.1', port=8080, *middleware):
return web.httpserver.runsimple(self.wsgifunc(*middleware), (host, port))
class route:
def GET(self):
return handle()
def POST(self):
return handle()
def handle():
logger.info('-------------------')
logger.info(web.ctx.path)
try:
headers={}
for k,v in web.ctx.env.items():
if(k.upper()=='HTTP_HOST'):
headers[new_header[k]]='music.163.com'
continue
if(k.upper() in valid_header):
headers[new_header[k]] = v
if web.ctx.env['REQUEST_METHOD'].upper() == 'POST':
response = requests.post(config.host+web.ctx.path, data=web.data(), headers=headers)
elif web.ctx.env['REQUEST_METHOD'].upper() == 'GET':
response = requests.get(config.host+web.ctx.env['REQUEST_URI'], headers=headers)
else:
return None
for k,v in response.headers.items():
if k in response_headers:
web.header(k, v)
return modify(response.content)
except Exception, ex:
logger.error(ex)
return None
def modify(message):
try:
result = json.loads(message)
if web.ctx.path.startswith('/eapi/v1/album/'):
logger.info('modify album info')
if result['songs']:
for song in result['songs']:
modify_privilege(song['privilege'])
if song['fee'] and song['fee']>0:
song['fee']=0
elif web.ctx.path=='/eapi/v3/song/detail/':
logger.info('modify songs privileges')
map(modify_privilege, result['privileges']) if result['privileges'] else None
elif web.ctx.path=='/eapi/v3/playlist/detail':
logger.info('modify songs info')
map(modify_privilege, result['privileges']) if result['privileges'] else None
elif web.ctx.path=='/eapi/song/enhance/player/url':
data = result['data'][0]
if data['code'] != 200:
logger.info('try to generate url: {}'.format(data['id']))
song = music_detail(data['id'])
music = get_music(song)
data['code']=200
data['type']='mp3'
data['url']=gen_url(song)
data['gain']=music['volumeDelta']
data['br']=music['bitrate']
data['size']=music['size']
data['md5']=music['dfsId']
logger.info(result)
elif web.ctx.path=='/eapi/batch':
logger.info('modify search result')
search = result['/api/cloudsearch/pc']
[modify_privilege(song['privilege']) for song in search['result']['songs']] if search['code']==200 else None
elif web.ctx.path=='/eapi/cloudsearch/pc':
logger.info('modify search result')
[modify_privilege(song['privilege']) for song in result['result']['songs']] if result['code']==200 else None
elif web.ctx.path.startswith('/eapi/v1/artist'):
logger.info('modify singer info')
[modify_privilege(hot_song['privilege']) for hot_song in result['hotSongs']]
elif web.ctx.path.startswith('/eapi/song/enhance/download/url'):
logger.info(message)
return json.dumps(result)
except Exception, ex:
logger.info(ex)
return message
def get_music(song):
if(song['hMusic']):
return song['hMusic']
elif song['mMusic']:
return song['mMusic']
elif song['lMusic']:
return song['lMusic']
elif song['bMusic']:
return song['bMusic']
else:
return song['audition']
def modify_privilege(privilege):
if privilege:
if privilege['st'] and privilege['st']<0:
privilege['st']=0
privilege['cs']=False
privilege['subp']=1
privilege['fl']=privilege['maxbr']
privilege['dl']=privilege['maxbr']
privilege['pl']=privilege['maxbr']
privilege['sp']=7
privilege['cp']=1
if privilege['fee'] and privilege['fee']>0:
privilege['fee']=0
privilege['st']=0
privilege['cs']=False
privilege['subp']=1
privilege['fl']=privilege['maxbr']
privilege['dl']=privilege['maxbr']
privilege['pl']=privilege['maxbr']
privilege['sp']=7
privilege['cp']=1
def music_detail(id):
url = '{}/api/song/detail?ids=[{}]'.format(config.host, id)
response = requests.get(url, headers=config.headers).text.encode('utf-8')
song = json.loads(response)['songs'][0]
if not song['hMusic'] and not song['mMusic'] and not song['lMusic'] and not song['bMusic']:
album = album_detail(song)
for song1 in album['songs']:
if song1['id'] == song['id']:
return song1
return song
def album_detail(song):
url = '{}/api/album/{}'.format(config.host, song['album']['id'])
response = requests.get(url, headers=config.headers).text.encode('utf-8')
return json.loads(response)['album']
def gen_url(song):
music = get_music(song)
song_id = music['dfsId']
enc_id = encrypt(song_id)
return 'http://m{}.music.126.net/{}/{}.mp3'.format(random.randint(1,2), enc_id, song_id)
def encrypt(id):
magic = bytearray('3go8&$8*3*3h0k(2)2')
song_id = bytearray(str(id))
magic_len = len(magic)
for i in xrange(len(song_id)):
song_id[i] = song_id[i] ^ magic[i % magic_len]
m = hashlib.md5(song_id)
result = m.digest().encode('base64')[:-1]
result = result.replace('/', '_')
result = result.replace('+', '-')
return result
app = MyApplication(urls, globals())
application = app.wsgifunc()
if __name__ == '__main__':
app.run(host=config.server_host, port=config.server_port) | apache-2.0 | 2,123,856,222,579,568,600 | 35.20098 | 120 | 0.574485 | false |
crappycrypto/wincrypto | wincrypto/constants.py | 1 | 1062 | from collections import namedtuple
import struct
PUBLICKEYSTRUC = namedtuple('PUBLICKEYSTRUC', 'bType bVersion aiKeyAlg') # reserved is skipped when unpacking
PUBLICKEYSTRUC_s = struct.Struct('<bb2xI')
PRIVATEKEYBLOB = namedtuple('PRIVATEKEYBLOB', 'modulus prime1 prime2 exponent1 exponent2 coefficient privateExponent')
RSAPUBKEY = namedtuple('RSAPUBKEY', 'magic bitlen pubexp')
RSAPUBKEY_s = struct.Struct('<4sII')
RSAPUBKEY_MAGIC = b'RSA1'
PRIVATEKEYBLOB_MAGIC = b'RSA2'
# bType
bType_SIMPLEBLOB = 1
bType_PUBLICKEYBLOB = 6
bType_PRIVATEKEYBLOB = 7
bType_PLAINTEXTKEYBLOB = 8
# CALG
CALG_RC2 = 0x6602
CALG_AES_128 = 0x660e
CALG_AES_192 = 0x660f
CALG_AES_256 = 0x6610
CALG_RC4 = 0x6801
CALG_MD5 = 0x8003
CALG_SHA = 0x8004
CALG_SHA1 = 0x8004
CALG_SHA_256 = 0x800c
CALG_RSA_KEYX = 0xa400
# Hash params
HP_ALGID = 0x0001
HP_HASHVAL = 0x0002
HP_HASHSIZE = 0x0004
# key params
KP_ALGID = 7
KP_BLOCKLEN = 8
KP_KEYLEN = 9
CRYPT_EXPORTABLE = 1
CUR_BLOB_VERSION = 2
ALG_CLASS_DATA_ENCRYPT = 3 << 13
ALG_CLASS_HASH = 4 << 13
ALG_CLASS_KEY_EXCHANGE = 5 << 13
| mit | 5,415,822,493,171,555,000 | 20.673469 | 118 | 0.746704 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/plugin/api/validate.py | 1 | 12873 | """This module provides an API to validate and to some extent
manipulate data structures, such as JSON and XML parsing results.
Example usage:
>>> validate(int, 5)
5
>>> validate({text: int}, {'foo': '1'})
ValueError: Type of '1' should be 'int' but is 'str'
>>> validate({'foo': transform(int)}, {'foo': '1'})
{'foo': 1}
"""
from xml.etree import ElementTree as ET
from copy import copy as copy_obj
try:
from functools import singledispatch
except ImportError:
from streamlink.utils.singledispatch import singledispatch
from ...compat import is_py2, urlparse
from ...exceptions import PluginError
__all__ = [
"any", "all", "filter", "get", "getattr", "hasattr", "length", "optional",
"transform", "text", "union", "url", "startswith", "endswith", "contains",
"xml_element", "xml_find", "xml_findall", "xml_findtext",
"validate", "Schema", "SchemaContainer"
]
#: Alias for text type on each Python version
text = is_py2 and basestring or str
# References to original functions that we override in this module
_all = all
_getattr = getattr
_hasattr = hasattr
_filter = filter
_map = map
_re_match_attr = ("group", "groups", "groupdict", "re")
def _is_re_match(value):
return _all(_hasattr(value, a) for a in _re_match_attr)
class any(tuple):
"""At least one of the schemas must be valid."""
def __new__(cls, *args):
return super(any, cls).__new__(cls, args)
class all(tuple):
"""All schemas must be valid."""
def __new__(cls, *args):
return super(all, cls).__new__(cls, args)
class SchemaContainer(object):
def __init__(self, schema):
self.schema = schema
class transform(object):
"""Applies function to value to transform it."""
def __init__(self, func):
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and func == text:
func = unicode
self.func = func
class optional(object):
"""An optional key used in a dict or union-dict."""
def __init__(self, key):
self.key = key
class union(SchemaContainer):
"""Extracts multiple validations based on the same value."""
class attr(SchemaContainer):
"""Validates an object's attributes."""
class xml_element(object):
"""A XML element."""
def __init__(self, tag=None, text=None, attrib=None):
self.tag = tag
self.text = text
self.attrib = attrib
def length(length):
"""Checks value for minimum length using len()."""
def min_len(value):
if not len(value) >= length:
raise ValueError(
"Minimum length is {0} but value is {1}".format(length, len(value))
)
return True
return min_len
def startswith(string):
"""Checks if the string value starts with another string."""
def starts_with(value):
validate(text, value)
if not value.startswith(string):
raise ValueError("'{0}' does not start with '{1}'".format(value, string))
return True
return starts_with
def endswith(string):
"""Checks if the string value ends with another string."""
def ends_with(value):
validate(text, value)
if not value.endswith(string):
raise ValueError("'{0}' does not end with '{1}'".format(value, string))
return True
return ends_with
def contains(string):
"""Checks if the string value contains another string."""
def contains_str(value):
validate(text, value)
if string not in value:
raise ValueError("'{0}' does not contain '{1}'".format(value, string))
return True
return contains_str
def get(item, default=None):
"""Get item from value (value[item]).
If the item is not found, return the default.
Handles XML elements, regex matches and anything that has __getitem__.
"""
def getter(value):
if ET.iselement(value):
value = value.attrib
try:
# Use .group() if this is a regex match object
if _is_re_match(value):
return value.group(item)
else:
return value[item]
except (KeyError, IndexError):
return default
except (TypeError, AttributeError) as err:
raise ValueError(err)
return transform(getter)
def getattr(attr, default=None):
"""Get a named attribute from an object.
When a default argument is given, it is returned when the attribute
doesn't exist.
"""
def getter(value):
return _getattr(value, attr, default)
return transform(getter)
def hasattr(attr):
"""Verifies that the object has an attribute with the given name."""
def has_attr(value):
return _hasattr(value, attr)
return has_attr
def filter(func):
"""Filters out unwanted items using the specified function.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
def expand_kv(kv):
return func(*kv)
def filter_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_filter(expand_kv, value.items()))
else:
return cls(_filter(func, value))
return transform(filter_values)
def map(func):
"""Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and text == func:
func = unicode
def expand_kv(kv):
return func(*kv)
def map_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_map(expand_kv, value.items()))
else:
return cls(_map(func, value))
return transform(map_values)
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url
def xml_find(xpath):
"""Find a XML element via xpath."""
def xpath_find(value):
validate(ET.iselement, value)
value = value.find(xpath)
if value is None:
raise ValueError("XPath '{0}' did not return an element".format(xpath))
return validate(ET.iselement, value)
return transform(xpath_find)
def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall)
def xml_findtext(xpath):
"""Find a XML element via xpath and extract its text."""
return all(
xml_find(xpath),
getattr("text"),
)
@singledispatch
def validate(schema, value):
if callable(schema):
if schema(value):
return value
else:
raise ValueError("{0}({1!r}) is not true".format(schema.__name__, value))
if schema == value:
return value
else:
raise ValueError("{0!r} does not equal {1!r}".format(value, schema))
@validate.register(any)
def validate_any(schema, value):
errors = []
for subschema in schema:
try:
return validate(subschema, value)
except ValueError as err:
errors.append(err)
else:
err = " or ".join(_map(str, errors))
raise ValueError(err)
@validate.register(all)
def validate_all(schemas, value):
for schema in schemas:
value = validate(schema, value)
return value
@validate.register(transform)
def validate_transform(schema, value):
validate(callable, schema.func)
return schema.func(value)
@validate.register(list)
@validate.register(tuple)
@validate.register(set)
@validate.register(frozenset)
def validate_sequence(schema, value):
validate(type(schema), value)
return type(schema)(validate(any(*schema), v) for v in value)
@validate.register(dict)
def validate_dict(schema, value):
validate(type(schema), value)
new = type(schema)()
for key, subschema in schema.items():
if isinstance(key, optional):
if key.key not in value:
continue
key = key.key
if type(key) in (type, transform, any, all, union):
for subkey, subvalue in value.items():
new[validate(key, subkey)] = validate(subschema, subvalue)
break
else:
if key not in value:
raise ValueError("Key '{0}' not found in {1!r}".format(key, value))
try:
new[key] = validate(subschema, value[key])
except ValueError as err:
raise ValueError("Unable to validate key '{0}': {1}".format(key, err))
return new
@validate.register(type)
def validate_type(schema, value):
if isinstance(value, schema):
return value
else:
raise ValueError(
"Type of {0!r} should be '{1}' but is '{2}'".format(
value, schema.__name__, type(value).__name__
)
)
@validate.register(xml_element)
def validate_xml_element(schema, value):
validate(ET.iselement, value)
new = ET.Element(value.tag, attrib=value.attrib)
if schema.attrib is not None:
try:
new.attrib = validate(schema.attrib, value.attrib)
except ValueError as err:
raise ValueError("Unable to validate XML attributes: {0}".format(err))
if schema.tag is not None:
try:
new.tag = validate(schema.tag, value.tag)
except ValueError as err:
raise ValueError("Unable to validate XML tag: {0}".format(err))
if schema.text is not None:
try:
new.text = validate(schema.text, value.text)
except ValueError as err:
raise ValueError("Unable to validate XML text: {0}".format(err))
for child in value:
new.append(child)
return new
@validate.register(attr)
def validate_attr(schema, value):
new = copy_obj(value)
for attr, schema in schema.schema.items():
if not _hasattr(value, attr):
raise ValueError("Attribute '{0}' not found on object '{1}'".format(
attr, value
))
setattr(new, attr, validate(schema, _getattr(value, attr)))
return new
@singledispatch
def validate_union(schema, value):
raise ValueError("Invalid union type: {0}".format(type(schema).__name__))
@validate_union.register(dict)
def validate_union_dict(schema, value):
new = type(schema)()
for key, schema in schema.items():
optional_ = isinstance(key, optional)
if optional_:
key = key.key
try:
new[key] = validate(schema, value)
except ValueError as err:
if optional_:
continue
raise ValueError("Unable to validate union '{0}': {1}".format(key, err))
return new
@validate_union.register(list)
@validate_union.register(tuple)
@validate_union.register(set)
@validate_union.register(frozenset)
def validate_union_sequence(schemas, value):
return type(schemas)(validate(schema, value) for schema in schemas)
@validate.register(union)
def validate_unions(schema, value):
return validate_union(schema.schema, value)
class Schema(object):
"""Wraps a validator schema into a object."""
def __init__(self, *schemas):
self.schema = all(*schemas)
def validate(self, value, name="result", exception=PluginError):
try:
return validate(self.schema, value)
except ValueError as err:
raise exception("Unable to validate {0}: {1}".format(name, err))
@validate.register(Schema)
def validate_schema(schema, value):
return schema.validate(value, exception=ValueError)
| gpl-2.0 | 4,050,243,957,748,727,000 | 25.325153 | 86 | 0.61027 | false |
doc-cloud/ds | string/string-rotate/rotate.py | 1 | 1026 | # -*- coding: utf-8 -*-
import copy
def simple_rotate(s, m):
if m < 0:
raise Exception('m is less than 0')
m %= len(s)
t = copy.copy(s)
del s[:]
s += t[m:] + t[:m]
def left_shift_m(s, m):
if m < 0:
raise Exception('m is less than 0')
length = len(s)
m %= length
for i in xrange(m):
t = s[0]
for j in xrange(1, length):
s[j - 1] = s[j]
s[length - 1] = t
def reverse(s, b, e):
n = e - b + 1;
for i in xrange(n / 2):
s[b + i], s[e - i] = s[e - i], s[b + i]
def rotate_reverse(s, m):
if m < 0:
raise Exception('m is less than 0')
length = len(s)
m %= length
reverse(s, 0, m - 1)
reverse(s, m, length - 1)
reverse(s, 0, length - 1)
if __name__ == '__main__':
s = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
s0 = copy.copy(s)
simple_rotate(s0, 3)
print s0
s1 = copy.copy(s)
left_shift_m(s1, 3)
print s1
s2 = copy.copy(s)
rotate_reverse(s2, 4)
print s2
| gpl-2.0 | 102,612,010,791,808,480 | 19.52 | 47 | 0.457115 | false |
elfnor/sverchok | nodes/transforms/scale_mk2.py | 1 | 2683 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from mathutils import Vector
import bpy
from bpy.props import FloatProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_recursive import sv_recursive_transformations
class SvScaleNodeMK2(bpy.types.Node, SverchCustomTreeNode):
''' Scale MK2 '''
bl_idname = 'SvScaleNodeMK2'
bl_label = 'Scale'
bl_icon = 'MAN_SCALE'
factor_ = FloatProperty(name='multiplyer', description='scaling factor',
default=1.0,
options={'ANIMATABLE'}, update=updateNode)
separate = BoolProperty(name='separate', description='Separate UV coords',
default=False,
update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "vertices", "vertices")
self.inputs.new('VerticesSocket', "centers", "centers")
self.inputs.new('StringsSocket', "multiplier", "multiplier").prop_name = "factor_"
self.outputs.new('VerticesSocket', "vertices", "vertices")
def draw_buttons(self, context, layout):
layout.prop(self, 'separate')
def process(self):
# inputs
vers = self.inputs['vertices'].sv_get()
vecs = self.inputs['centers'].sv_get(default=[[[0.0, 0.0, 0.0]]])
mult = self.inputs['multiplier'].sv_get()
# outputs
if self.outputs[0].is_linked:
sca = sv_recursive_transformations(self.scaling,vers,vecs,mult,self.separate)
self.outputs['vertices'].sv_set(sca)
def scaling(self, v, c, m):
print(c,v,m)
return [(Vector(c) + m * (Vector(v) - Vector(c)))[:]]
def register():
bpy.utils.register_class(SvScaleNodeMK2)
def unregister():
bpy.utils.unregister_class(SvScaleNodeMK2)
if __name__ == '__main__':
register() | gpl-3.0 | -1,494,444,262,516,776,700 | 34.315789 | 90 | 0.656355 | false |
lucidfrontier45/flasktest | test/test.py | 1 | 2489 | from flask_app import application
import unittest
import logging
import json
import uuid
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
def make_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
return logger
class FlaskTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.app = application.app
self.app.config["TESTING"] = True
self.logger = make_logger(self.__class__.__name__)
def setUp(self):
self.client = self.app.test_client()
def test_index(self):
r = self.client.get("/")
self.logger.debug("code={0}, data={1}".format(r.status_code, r.data))
assert r.status_code == 200
def test_user_model(self):
model = application.model
u = model.User(name="Tom", data="hoge")
assert str(u) == "User[None,Tom,hoge]"
def test_put_user(self):
r = self.client.put("/user/")
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 400
name = str(uuid.uuid4())
r = self.client.put("/user/{0}/".format(name), data={"data":"fuga2"})
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 200
assert data["result"]["data"] == "fuga2"
r = self.client.put("/user/{0}/".format(name), data={"data":"fuga2"})
self.logger.info(r.status_code)
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 200
assert data["result"]["data"] == "fuga2"
def test_get_user(self):
r = self.client.get("/user/")
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 200
assert isinstance(data["result"], list)
self.client.put("/user/tom/", data={"data":"test"})
r = self.client.get("/user/tom/")
self.logger.info(r.status_code)
assert r.status_code == 200
data = json.loads(r.data)
self.logger.info(data)
assert data["code"] == 200
r = self.client.get("/user/tom2/")
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 404
if __name__ == "__main__":
unittest.main() | mit | -5,974,267,527,050,064,000 | 29 | 85 | 0.575733 | false |
awni/tensorflow | tensorflow/python/training/training.py | 1 | 7130 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""This library provides a set of classes and functions that helps train models.
## Optimizers
The Optimizer base class provides methods to compute gradients for a loss and
apply gradients to variables. A collection of subclasses implement classic
optimization algorithms such as GradientDescent and Adagrad.
You never instantiate the Optimizer class itself, but instead instantiate one
of the subclasses.
@@Optimizer
@@GradientDescentOptimizer
@@AdadeltaOptimizer
@@AdagradOptimizer
@@MomentumOptimizer
@@AdamOptimizer
@@FtrlOptimizer
@@RMSPropOptimizer
## Gradient Computation
TensorFlow provides functions to compute the derivatives for a given
TensorFlow computation graph, adding operations to the graph. The
optimizer classes automatically compute derivatives on your graph, but
creators of new Optimizers or expert users can call the lower-level
functions below.
@@gradients
@@AggregationMethod
@@stop_gradient
## Gradient Clipping
TensorFlow provides several operations that you can use to add clipping
functions to your graph. You can use these functions to perform general data
clipping, but they're particularly useful for handling exploding or vanishing
gradients.
@@clip_by_value
@@clip_by_norm
@@clip_by_average_norm
@@clip_by_global_norm
@@global_norm
## Decaying the learning rate
@@exponential_decay
## Moving Averages
Some training algorithms, such as GradientDescent and Momentum often benefit
from maintaining a moving average of variables during optimization. Using the
moving averages for evaluations often improve results significantly.
@@ExponentialMovingAverage
## Coordinator and QueueRunner
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
for how to use threads and queues. For documentation on the Queue API,
see [Queues](../../api_docs/python/io_ops.md#queues).
@@Coordinator
@@QueueRunner
@@add_queue_runner
@@start_queue_runners
## Summary Operations
The following ops output
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffers as serialized string tensors.
You can fetch the output of a summary op in a session, and pass it to
a [SummaryWriter](../../api_docs/python/train.md#SummaryWriter) to append it
to an event file. Event files contain
[`Event`](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
protos that can contain `Summary` protos along with the timestamp and
step. You can then use TensorBoard to visualize the contents of the
event files. See [TensorBoard and
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
details.
@@scalar_summary
@@image_summary
@@histogram_summary
@@zero_fraction
@@merge_summary
@@merge_all_summaries
## Adding Summaries to Event Files
See [Summaries and
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
overview of summaries, event files, and visualization in TensorBoard.
@@SummaryWriter
@@summary_iterator
## Training utilities
@@global_step
@@write_graph
"""
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.ops import gradients
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import *
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.saver import generate_checkpoint_state_proto
from tensorflow.python.training.saver import get_checkpoint_state
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.saver import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.summary_io import SummaryWriter
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import exponential_decay
from tensorflow.python.util.all_util import make_all
# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = make_all(__name__, [sys.modules[__name__], io_ops, state_ops])
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
"BytesList",
"Example",
"Feature",
"FeatureList",
"FeatureLists",
"Features",
"FloatList",
"InferenceExample",
"Int64List",
"LooperThread",
"SaverDef",
"SequenceExample",
"export_meta_graph",
"generate_checkpoint_state_proto",
"import_meta_graph",
"queue_runner",
])
| apache-2.0 | 2,207,461,269,452,418,600 | 32.952381 | 84 | 0.78892 | false |
pacoqueen/bbinn | formularios/utils_almacen.py | 1 | 5346 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# ([email protected], [email protected]) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## utils_almacen.py - Utilidades del módulo almacén.
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
##
###################################################################
## Changelog:
## 4 de octubre de 2005 -> Inicio
## 19 de enero de 2006 -> Fork a v02.
###################################################################
import sys
import time
sys.path.append('../framework')
import pclases, sqlobject, time
def id_propia_empresa_proveedor():
"""
Devuelve el id de la propia empresa en la tabla proveedores.
"""
try:
empresa = pclases.DatosDeLaEmpresa.select()[0]
except IndexError:
print "ERROR: No hay datos de la empresa."
return 0
try:
empresa = pclases.Proveedor.select(pclases.Proveedor.q.nombre==empresa.nombre)[0]
except: #IndexError? SQLObjectNotFound?
print "ERROR: La empresa no está en la tabla de de proveedores."
return 0
return empresa.id
def id_propia_empresa():
"""
Devuelve el id de la propia empresa en la tabla clientes.
"""
try:
empresa = pclases.DatosDeLaEmpresa.select()[0]
except IndexError:
print "ERROR: No hay datos de la empresa."
return 0
try:
empresa = pclases.Cliente.select(pclases.Cliente.q.nombre==empresa.nombre)[0]
except: #IndexError? SQLObjectNotFound?
print "ERROR: La empresa no está en la tabla de clientes."
return 0
return empresa.id
def ultimo_pedido_de_compra_mas_uno():
"""
Devuelve el último número de pedido de compra válido
0 si no hay pedidos de compra.
Devuelve el número de pedido como numérico (aunque en
realidad sea un str en la BD).
No tiene en cuenta aquellos pedidos cuyo número de
pedido no se puede interpretar como número y solo
tiene en cuenta los pedidos del año corriente.
El criterio para averiguar el último número de
pedido es la fecha.
Si el número siguiente al del último pedido por fecha
está ocupado (no debería), sigue sumando 1 hasta que
llegue a un número de pedido libre.
"""
import mx
from mx.DateTime import localtime as ahora
strnumspedido = pclases.PedidoCompra._connection.queryAll("SELECT numpedido FROM pedido_compra WHERE date_part('year', fecha) = %d ORDER BY fecha, numpedido;" % (ahora().year))
intnumspedido = []
for numpedido in strnumspedido:
try:
intnumspedido.append(int(numpedido[0]))
except (ValueError, IndexError):
pass
if len(intnumspedido) == 0:
ultimo = 0
else:
ultimo = intnumspedido[-1]
while pclases.PedidoCompra.select(pclases.PedidoCompra.q.numpedido == str(ultimo)).count() != 0:
ultimo += 1
return ultimo
def ultimo_numalbaran(venta, interno):
"""
Devuelve el último número de albarán que cumpla
las condiciones venta==True/False e interno==True/False
o 0 si no hay ninguno.
"""
albs = pclases.Albaran.select(sqlobject.AND(pclases.Albaran.q.venta == venta,
pclases.Albaran.q.interno == interno),
orderBy="-numalbaran")
if albs.count() == 0:
return 0
return albs[0].numalbaran
def productosConFicha():
"""
Devuelve una lista de identificadores de productos que tienen ficha de
producción.
"""
fichas = pclases.FichaDeProduccion.select()
return [f.idproducto.id for f in fichas]
| gpl-2.0 | 4,870,805,919,252,435,000 | 39.5 | 181 | 0.532548 | false |
FederatedAI/FATE | python/federatedml/param/encrypted_mode_calculation_param.py | 1 | 1888 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.param.base_param import BaseParam
class EncryptedModeCalculatorParam(BaseParam):
"""
Define the encrypted_mode_calulator parameters.
Parameters
----------
mode: str, support 'strict', 'fast', 'balance', 'confusion_opt', ' only, default: strict
re_encrypted_rate: float or int, numeric number in [0, 1], use when mode equals to 'balance, default: 1
"""
def __init__(self, mode="strict", re_encrypted_rate=1):
self.mode = mode
self.re_encrypted_rate = re_encrypted_rate
def check(self):
descr = "encrypted_mode_calculator param"
self.mode = self.check_and_change_lower(self.mode,
["strict", "fast", "balance", "confusion_opt", "confusion_opt_balance"],
descr)
if self.mode in ["balance", "confusion_opt_balance"]:
if type(self.re_encrypted_rate).__name__ not in ["int", "long", "float"]:
raise ValueError("re_encrypted_rate should be a numeric number")
if not 0.0 <= self.re_encrypted_rate <= 1:
raise ValueError("re_encrypted_rate should in [0, 1]")
return True
| apache-2.0 | -2,847,998,658,587,991,000 | 36.019608 | 120 | 0.630826 | false |
pnwairfire/bluesky | test/unit/bluesky/test_datautils.py | 1 | 18223 | """Unit tests for bluesky.datetimeutils"""
__author__ = "Joel Dubowy"
from py.test import raises
from bluesky import datautils
from bluesky.models import activity
from bluesky.models.fires import Fire
# TODO: moke Fire class
class MockFiresManager(object):
def __init__(self, fires):
self.fires = [Fire(f) for f in fires]
@property
def fire_failure_handler(self):
class klass(object):
def __init__(self, fire):
pass
def __enter__(self):
pass
def __exit__(self, e_type, value, tb):
pass
return klass
def summarize(self, **summary):
self.summary = summary
class TestSummarizeAllLevels(object):
def test_no_fires(self):
fm = MockFiresManager([])
datautils.summarize_all_levels(fm, 'emissions')
assert fm.fires == []
assert fm.summary == {"emissions": {'summary': {'total': 0.0}}}
def test_no_active_areas(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750"
}
])
datautils.summarize_all_levels(fm, 'emissions')
assert fm.fires == [{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"emissions": {'summary': {'total': 0.0}}
}]
assert fm.summary == {"emissions": {'summary': {'total': 0.0}}}
def test_empty_active_areas(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity_areas": []
}
])
datautils.summarize_all_levels(fm, 'emissions')
assert fm.fires == [{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"activity_areas": [],
"emissions": {'summary': {'total': 0.0}}
}]
assert fm.summary == {"emissions": {'summary': {'total': 0.0}}}
def test_no_locations(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
}]
}]
}
])
with raises(ValueError) as e_info:
datautils.summarize_all_levels(fm, 'emissions')
assert e_info.value.args[0] == activity.ActiveArea.MISSING_LOCATION_INFO_MSG
def test_no_fuelbeds(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{'area': 34, 'lat': 45.0, 'lng': -120.0}
]
}]
}]
}
])
datautils.summarize_all_levels(fm, 'emissions')
assert fm.fires == [{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"emissions": {'summary': {'total': 0.0}}
}
],
"emissions": {'summary': {'total': 0.0}}
}],
"emissions": {'summary': {'total': 0.0}}
}],
"emissions": {'summary': {'total': 0.0}}
}]
assert fm.summary == {"emissions": {'summary': {'total': 0.0}}}
def test_one_fire_one_fuelbed_no_emissions(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": {}
}
]
}]
}]
}
])
datautils.summarize_all_levels(fm, 'emissions')
assert fm.fires == [{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": {},
"emissions": {'summary': {'total': 0.0}}
}
],
"emissions": {'summary': {'total': 0.0}}
}],
"emissions": {'summary': {'total': 0.0}}
}],
"emissions": {'summary': {'total': 0.0}}
}]
assert fm.summary == {"emissions": {'summary': {'total': 0.0}}}
def test_one_fire_one_fuelbed_with_emissions(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [{
"emissions": {
"flaming": {"PM2.5": [10]},
"smoldering":{"PM2.5": [7]},
"total": {"PM2.5": [22]} # incorrect, but should be ignored
}
}]
}
]
}]
}]
}
])
datautils.summarize_all_levels(fm, 'emissions')
expected_fires = [{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [{
"emissions": {
"flaming": {"PM2.5": [10]},
"smoldering":{"PM2.5": [7]},
"total": {"PM2.5": [22]} # incorrect, but should be ignored
}
}],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}
],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}]
assert fm.fires == expected_fires
assert fm.summary == {
"emissions": {
"flaming": {"PM2.5": [10]},
"smoldering": {"PM2.5": [7]},
'summary': {'PM2.5': 17.0, 'total': 17.0}
}
}
def test_one_fire_one_location_two_fuelbed(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [
{
"emissions": {
"flaming": {"PM2.5": [10]},
"smoldering":{"PM2.5": [7]},
"total": {"PM2.5": [22]}, # incorrect, but should be ignored
}
},
{
"emissions": {
"flaming": {"PM2.5": [42]},
"residual":{"PM2.5": [1],"CO": [123]},
"total": {"PM2.5": [22]}, # incorrect, but should be ignored
}
}
]
}
]
}]
}]
}
])
datautils.summarize_all_levels(fm, 'emissions')
expected_fires = [{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [
{
"emissions": {
"flaming": {"PM2.5": [10]},
"smoldering":{"PM2.5": [7]},
"total": {"PM2.5": [22]} # incorrect, but should be ignored
}
},
{
"emissions": {
"flaming": {"PM2.5": [42]},
"residual":{"PM2.5": [1],"CO": [123]},
"total": {"PM2.5": [22]} # incorrect, but should be ignored
}
}
],
"emissions": {'summary': {'PM2.5': 60.0, 'CO': 123.0, 'total': 183.0}}
}
],
"emissions": {'summary': {'PM2.5': 60.0, 'CO': 123.0, 'total': 183.0}}
}],
"emissions": {'summary': {'PM2.5': 60.0, 'CO': 123.0, 'total': 183.0}}
}],
"emissions": {'summary': {'PM2.5': 60.0, 'CO': 123.0, 'total': 183.0}}
}]
assert fm.fires == expected_fires
expected_summary = {
"emissions": {
"flaming": {"PM2.5": [52.0]},
"residual": {"PM2.5": [1.0], "CO": [123.0]},
"smoldering": {"PM2.5": [7.0]},
'summary': {'PM2.5': 60.0, "CO":123.0, 'total': 183.0}
}
}
assert fm.summary == expected_summary
def test_one_fire_one_ac_two_aa_two_locations_two_fuelbed(self):
pass
def test_one_fire_two_ac_two_aa_two_locations_two_fuelbed(self):
pass
def test_two_fires_two_ac_two_aa_two_locations_two_fuelbed(self):
fm = MockFiresManager([
{
"id": "SF11C14225236095807750",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [
{
"emissions": {
"flaming": {"PM2.5": [10]},
"smoldering":{"PM2.5": [7]},
"total": {"PM2.5": [22]}, # incorrect, but should be ignored
}
}
]
}
]
}]
}]
},
{
"id": "sfkjfsdlksdflkjdf",
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [
{
"emissions": {
"flaming": {"PM2.5": [42]},
"residual":{"PM2.5": [1],"CO": [123]},
"total": {"PM2.5": [22]}, # incorrect, but should be ignored
}
}
]
}
]
}]
}]
}
])
datautils.summarize_all_levels(fm, 'emissions')
expected_fires = [
{
"id": "SF11C14225236095807750",
'fuel_type': 'natural',
'type': 'wildfire',
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [
{
"emissions": {
"flaming": {
"PM2.5": [10]
},
"smoldering":{
"PM2.5": [7]
},
"total": {
"PM2.5": [22] # incorrect, but should be ignored
},
}
}
],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}
],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
}],
"emissions": {'summary': {'PM2.5': 17.0, 'total': 17.0}}
},
{
"id": "sfkjfsdlksdflkjdf",
'fuel_type': 'natural',
'type': 'wildfire',
"activity": [{
"active_areas": [{
"start": "2014-05-25T17:00:00",
"end": "2014-05-26T17:00:00",
'specified_points': [
{
'area': 34, 'lat': 45.0, 'lng': -120.0,
"fuelbeds": [
{
"emissions": {
"flaming": {
"PM2.5": [42]
},
"residual":{
"PM2.5": [1],
"CO": [123]
},
"total": {
"PM2.5": [22] # incorrect, but should be ignored
},
}
}
],
"emissions": {'summary': {'PM2.5': 43.0, 'CO': 123.0, 'total': 166.0}}
}
],
"emissions": {'summary': {'PM2.5': 43.0, 'CO': 123.0, 'total': 166.0}}
}],
"emissions": {'summary': {'PM2.5': 43.0, 'CO': 123.0, 'total': 166.0}}
}],
"emissions": {'summary': {'PM2.5': 43.0, 'CO': 123.0, 'total': 166.0}}
}
]
assert fm.fires == expected_fires
expected_summary = {
"emissions": {
"flaming": {"PM2.5": [52.0]},
"residual": {"PM2.5": [1.0], "CO": [123.0]},
"smoldering": {"PM2.5": [7.0]},
'summary': {'PM2.5': 60.0, "CO":123.0, 'total': 183.0}
}
}
assert fm.summary == expected_summary
def test_multi(self):
pass
| gpl-3.0 | 92,842,793,735,075,460 | 38.78821 | 104 | 0.305493 | false |
appleseedhq/cortex | python/IECoreMaya/Collapsible.py | 5 | 8941 | ##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import maya.OpenMaya
import IECoreMaya
## In Maya 2011 and 2012, the collapsible frameLayout became rather ugly,
# and stopped indenting the arrow with the label. This made complex uis
# consisting of lots of ClassVectorParameters and ClassParameters somewhat
# unreadable. So we introduce this class to get back some control. Aside
# from spelling collapsible properly and being prettier, this class also
# has the advantage of supporting annotations which are displayed on the label.
# As with the maya frameLayout, the preExpandCommand, expandCommand and
# collapseCommand are only called as a result of user action, and never as
# a result of a call to setCollapsed or getCollapsed. There are separate
# implementations for maya before qt and maya after qt.
class _CollapsibleMotif( IECoreMaya.UIElement ) :
def __init__( self,
label="",
labelVisible=True,
labelIndent=0,
labelFont = "boldLabelFont",
annotation="",
collapsed = True,
preExpandCommand = None,
expandCommand = None,
collapseCommand = None,
) :
kw = {}
if preExpandCommand is not None :
kw["preExpandCommand"] = preExpandCommand
if expandCommand is not None :
kw["expandCommand"] = expandCommand
if collapseCommand is not None :
kw["collapseCommand"] = collapseCommand
# implementation for motif is pretty simple - just a frame layout
IECoreMaya.UIElement.__init__( self,
maya.cmds.frameLayout(
label = label,
labelVisible = labelVisible,
labelIndent = labelIndent,
labelAlign = "center",
font = labelFont,
borderVisible = False,
collapsable = True,
collapse = collapsed,
marginWidth = 0,
**kw
)
)
# can't display it but at least we can store it
self.__annotation = annotation
self.__frameLayout = self._topLevelUI()
## The maya frameLayout whose collapsibility is controlled by this
# class. Add children by editing the contents of this layout.
def frameLayout( self ) :
return self._topLevelUI()
def setLabel( self, label ) :
maya.cmds.frameLayout( self.frameLayout(), edit=True, label = label )
def getLabel( self ) :
return maya.cmds.frameLayout( self.frameLayout(), query=True, label = True )
def setAnnotation( self, annotation ) :
self.__annotation = annotation
def getAnnotation( self ) :
return self.__annotation
def getCollapsed( self ) :
return maya.cmds.frameLayout( self.frameLayout(), query=True, collapse=True )
def setCollapsed( self, collapsed ) :
maya.cmds.frameLayout( self.frameLayout(), edit=True, collapse=collapsed )
class _CollapsibleQt( IECoreMaya.UIElement ) :
def __init__( self,
label="",
labelVisible=True,
labelIndent=0,
labelFont = "boldLabelFont",
annotation="",
collapsed = True,
preExpandCommand = None,
expandCommand = None,
collapseCommand = None,
) :
IECoreMaya.UIElement.__init__( self, maya.cmds.formLayout() )
attachForm = []
attachControl = []
# make the layout to put things in. this is actually a frameLayout, just
# with the ugly header bit we don't like hidden.
########################################################################
self.__frameLayout = maya.cmds.frameLayout(
labelVisible = False,
borderVisible = False,
collapsable = True,
collapse = collapsed,
marginWidth = 0,
)
# passing borderVisible=False to the constructor does bugger all so we have to do it with
# an edit
maya.cmds.frameLayout( self.__frameLayout, edit=True, borderVisible=False, marginWidth=0 )
attachForm.append( ( self.__frameLayout, "left", 0 ) )
attachForm.append( ( self.__frameLayout, "right", 0 ) )
attachForm.append( ( self.__frameLayout, "bottom", 0 ) )
# optional header, with the triangle for expanding and collapsing
########################################################################
self.__collapsibleIcon = None
self.__labelControl = None
if labelVisible :
# have to make one button for the icon and one for the label
# because otherwise the icon size changes when we toggle
# the image, and the text moves.
self.__collapsibleIcon = maya.cmds.iconTextButton(
parent = self._topLevelUI(),
height = 20,
width = 15,
image = "arrowRight.xpm",
command = self.__toggle,
annotation = annotation,
)
self.__labelControl = maya.cmds.iconTextButton(
parent = self._topLevelUI(),
height = 20,
label = label,
# the font flag appears to do nothing, but maybe it will
# miraculously be supported in the future?
font = labelFont,
style = "textOnly",
command = self.__toggle,
annotation = annotation,
)
attachForm.append( ( self.__collapsibleIcon, "left", labelIndent ) )
attachForm.append( ( self.__collapsibleIcon, "top", 0 ) )
attachForm.append( ( self.__labelControl, "top", 0 ) )
attachControl.append( ( self.__labelControl, "left", 0, self.__collapsibleIcon ) )
attachControl.append( ( self.__frameLayout, "top", 0, self.__labelControl ) )
else :
attachForm.append( ( self.__frameLayout, "top", 0 ) )
maya.cmds.formLayout(
self._topLevelUI(),
edit = True,
attachForm = attachForm,
attachControl = attachControl,
)
maya.cmds.setParent( self.__frameLayout )
self.__annotation = annotation
self.__labelText = label
self.__preExpandCommand = preExpandCommand
self.__expandCommand = expandCommand
self.__collapseCommand = collapseCommand
## The maya frameLayout whose collapsibility is controlled by this
# class. Add children by editing the contents of this layout.
def frameLayout( self ) :
return self.__frameLayout
def setLabel( self, label ) :
self.__labelText = label
if self.__labelControl is not None :
maya.cmds.iconTextButton( self.__labelControl, edit=True, label=label )
def getLabel( self ) :
return self.__labelText
def setAnnotation( self, annotation ) :
self.__annotation = annotation
if self.__labelControl is not None :
maya.cmds.iconTextButton( self.__labelControl, edit=True, annotation=annotation )
maya.cmds.iconTextButton( self.__collapsibleIcon, edit=True, annotation=annotation )
def getAnnotation( self ) :
return self.__annotation
def getCollapsed( self ) :
return maya.cmds.frameLayout( self.__frameLayout, query=True, collapse=True )
def setCollapsed( self, collapsed ) :
maya.cmds.frameLayout( self.__frameLayout, edit=True, collapse=collapsed )
if self.__collapsibleIcon is not None :
maya.cmds.iconTextButton(
self.__collapsibleIcon,
edit = True,
image = "arrowRight.xpm" if collapsed else "arrowDown.xpm",
)
def __toggle( self ) :
collapsed = not self.getCollapsed()
if not collapsed and self.__preExpandCommand is not None :
self.__preExpandCommand()
self.setCollapsed( not self.getCollapsed() )
if collapsed :
if self.__collapseCommand is not None :
self.__collapseCommand()
else :
if self.__expandCommand is not None :
self.__expandCommand()
# choose the right implementation based on the current maya version
if maya.OpenMaya.MGlobal.apiVersion() >= 201100 :
Collapsible = _CollapsibleQt
else :
Collapsible = _CollapsibleMotif
| bsd-3-clause | -3,500,785,268,951,882,000 | 29.831034 | 92 | 0.690527 | false |
lefnire/tensorforce | tensorforce/models/q_naf_model.py | 1 | 8828 | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.models import QModel
from tensorforce.core.networks import Linear
class QNAFModel(QModel):
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
execution,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount,
network,
distributions,
entropy_regularization,
target_sync_frequency,
target_update_weight,
double_q_model,
huber_loss
):
if any(action['type'] != 'float' or 'min_value' in action or 'max_value' in action for action in actions.values()):
raise TensorForceError("Only unconstrained float actions valid for NAFModel.")
super(QNAFModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing,
update_mode=update_mode,
memory=memory,
optimizer=optimizer,
discount=discount,
network=network,
distributions=distributions,
entropy_regularization=entropy_regularization,
target_sync_frequency=target_sync_frequency,
target_update_weight=target_update_weight,
double_q_model=double_q_model,
huber_loss=huber_loss
)
def initialize(self, custom_getter):
super(QNAFModel, self).initialize(custom_getter)
self.state_values = dict()
self.l_entries = dict()
for name, action in self.actions_spec.items():
num_action = util.prod(action['shape'])
self.state_values[name] = Linear(size=num_action, scope='state-value')
self.l_entries[name] = Linear(size=(num_action * (num_action - 1) // 2), scope='l-entries')
def tf_q_value(self, embedding, distr_params, action, name):
num_action = util.prod(self.actions_spec[name]['shape'])
mean, stddev, _ = distr_params
flat_mean = tf.reshape(tensor=mean, shape=(-1, num_action))
flat_stddev = tf.reshape(tensor=stddev, shape=(-1, num_action))
# Advantage computation
# Network outputs entries of lower triangular matrix L
if self.l_entries[name] is None:
l_matrix = flat_stddev
l_matrix = tf.exp(l_matrix)
else:
l_matrix = tf.map_fn(fn=tf.diag, elems=flat_stddev)
l_entries = self.l_entries[name].apply(x=embedding)
l_entries = tf.exp(l_entries)
offset = 0
columns = list()
for zeros, size in enumerate(xrange(num_action - 1, -1, -1), 1):
column = tf.pad(tensor=l_entries[:, offset: offset + size], paddings=((0, 0), (zeros, 0)))
columns.append(column)
offset += size
l_matrix += tf.stack(values=columns, axis=1)
# P = LL^T
p_matrix = tf.matmul(a=l_matrix, b=tf.transpose(a=l_matrix, perm=(0, 2, 1)))
# A = -0.5 (a - mean)P(a - mean)
flat_action = tf.reshape(tensor=action, shape=(-1, num_action))
difference = flat_action - flat_mean
advantage = tf.matmul(a=p_matrix, b=tf.expand_dims(input=difference, axis=2))
advantage = tf.matmul(a=tf.expand_dims(input=difference, axis=1), b=advantage)
advantage = tf.squeeze(input=(-advantage / 2.0), axis=2)
# Q = A + V
# State-value function
state_value = self.state_values[name].apply(x=embedding)
q_value = state_value + advantage
return tf.reshape(tensor=q_value, shape=((-1,) + self.actions_spec[name]['shape']))
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
# Michael: doubling this function because NAF needs V'(s) not Q'(s), see comment below
embedding = self.network.apply(x=states, internals=internals, update=update)
# Both networks can use the same internals, could that be a problem?
# Otherwise need to handle internals indices correctly everywhere
target_embedding = self.target_network.apply(
x=next_states,
internals=next_internals,
update=update
)
deltas = list()
for name, distribution in self.distributions.items():
target_distribution = self.target_distributions[name]
distr_params = distribution.parameterize(x=embedding)
target_distr_params = target_distribution.parameterize(x=target_embedding)
q_value = self.tf_q_value(embedding=embedding, distr_params=distr_params, action=actions[name], name=name)
# Notice, this is V', not Q' because NAF outputs V(s) separately
next_state_value = target_distribution.state_value(distr_params=target_distr_params)
delta = self.tf_q_delta(q_value=q_value, next_q_value=next_state_value, terminal=terminal, reward=reward)
collapsed_size = util.prod(util.shape(delta)[1:])
delta = tf.reshape(tensor=delta, shape=(-1, collapsed_size))
deltas.append(delta)
# Surrogate loss as the mean squared error between actual observed rewards and expected rewards
loss_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=deltas, axis=1), axis=1)
if self.huber_loss is not None and self.huber_loss > 0.0:
return tf.where(
condition=(tf.abs(x=loss_per_instance) <= self.huber_loss),
x=(0.5 * tf.square(x=loss_per_instance)),
y=(self.huber_loss * (tf.abs(x=loss_per_instance) - 0.5 * self.huber_loss))
)
else:
return tf.square(x=loss_per_instance)
def tf_regularization_losses(self, states, internals, update):
losses = super(QNAFModel, self).tf_regularization_losses(
states=states,
internals=internals,
update=update
)
for state_value in self.state_values.values():
regularization_loss = state_value.regularization_loss()
if regularization_loss is not None:
if 'state-values' in losses:
losses['state-values'] += regularization_loss
else:
losses['state-values'] = regularization_loss
for l_entries in self.l_entries.values():
regularization_loss = l_entries.regularization_loss()
if regularization_loss is not None:
if 'l-entries' in losses:
losses['l-entries'] += regularization_loss
else:
losses['l-entries'] = regularization_loss
return losses
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(QNAFModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
state_values_variables = [
variable for name in sorted(self.state_values)
for variable in self.state_values[name].get_variables()
]
model_variables += state_values_variables
l_entries_variables = [
variable for name in sorted(self.l_entries)
for variable in self.l_entries[name].get_variables()
]
model_variables += l_entries_variables
return model_variables
| apache-2.0 | 6,982,727,479,257,415,000 | 38.410714 | 134 | 0.609538 | false |
victorianorton/SimpleRPGGame | src/game/Creature.py | 1 | 2202 |
from src.common.Observable import*
from src.game.AttackSpell import *
from src.game.AttackWeapon import *
from src.game.Attributes import *
from src.game.visit import *
from src.game.Heroes import *
#from src.game.AttackInventory import*
from random import*
class Creatures(Observable, GameAttributes, Visitor):
def __init__(self):
super(Creatures, self).__init__()
self._attackSpell = AttackSpell()
self._attackWeapon = AttackWeapon()
self._name = ''
self.gameAttributes = GameAttributes()
self.health = 1
self.healthMax = 1
def doDamage(self, monster):
self.damage = min(
max(randint(0, 2) - randint(0, monster.health), 0),
monster.health)
monster.health -= self.damage
if self.damage == 0:
print ("%s avoids heros's attack." % monster)
else:
print ("hero injures %s!" % monster)
return monster.health <= 0
def setAttStr(self, strength, con, dex, intt):
self.Strength = strength
self.Constitution = con
self.Dexterity = dex
self.Intelligence = intt
def setAttackSpell(self, attackSpell):
self.attackSpell = attackSpell
def setAttackWeapon(self, attackWeapon):
self.attackWeapon = attackWeapon
def AttackSpell(self):
self.attackSpell()
def AttackWeapon(self):
self.attackWeapon.attackWeapon()
def planMove(self):
self.ready = True
def roam(self):
print ("%s is roaming around the castle" % self._name)
self.notifyObservers()
def act(self):
if self.ready:
self.roam()
self.ready = False
def north(self):
print ("%s is moving in the direction north" % self._name)
self.roam()
self.notifyObservers()
def south(self):
print ("%s is moving in the direction south" % self._name)
self.roam()
def east(self):
print ("%s is moving in the direction east" % self._name)
self.roam()
def west(self):
print ("%s is moving in the direction west" % self._name)
self.roam()
def Display(self):
pass | mit | 8,173,256,232,380,542,000 | 26.197531 | 66 | 0.601726 | false |
maulik13/django-user-registration | user_registration/backends/default.py | 1 | 2007 | from django.contrib.sites.models import Site
from django.contrib.sites.models import RequestSite
from user_registration.models import UserRegistration
class DefaultRegistrationBackend(object):
"""
Backend defines how the registration and activation processes are defined
@register: What to do after valid register form data is receieved
@activate: Activation process for a user based on registration data
@is_registration_open: defines if registration is open
"""
def register(self, request, **kwargs):
"""
Registration process is defined in this method. This should do the following:
1. Store the appropriate data based on your logic
2. Send an email / SMS or any other action for registration process
'kwargs' should contain all the required parameters to create a user
we can confirm that by using REQUIRED_FIELDS list + USERNAME_FIELD in the User model
"""
# create the user and registration data for this user
new_user, reg_data = UserRegistration.objects.register_user(**kwargs)
# Send an email
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
reg_data.send_activation_email(site)
return new_user
def activate(self, request, activation_key):
"""
Activation process should be defined here. By default, it is only doing check
against activation key when user accesses this URL.
You could also check against a secret code that should be provided by the user in
addition to the key. This code can be sent to the user in registration process by
email, SMS etc.
"""
activated = UserRegistration.objects.activate_user(activation_key)
return activated
def is_registration_open(self):
"""
Override this method to add logic for deciding when registration is allowed
"""
return True
| mit | -1,950,066,381,843,887,600 | 38.352941 | 92 | 0.683109 | false |
eric-stanley/NewsBlur | apps/social/models.py | 1 | 137635 | import datetime
import time
import zlib
import hashlib
import redis
import re
import mongoengine as mongo
import random
import requests
import HTMLParser
from collections import defaultdict
from pprint import pprint
from BeautifulSoup import BeautifulSoup
from mongoengine.queryset import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from django.core.mail import EmailMultiAlternatives
from apps.reader.models import UserSubscription, RUserStory
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
from apps.rss_feeds.models import Feed, MStory
from apps.rss_feeds.text_importer import TextImporter
from apps.profile.models import Profile, MSentEmail
from vendor import facebook
from vendor import tweepy
from vendor import appdotnet
from vendor import pynliner
from utils import log as logging
from utils import json_functions as json
from utils.feed_functions import relative_timesince, chunks
from utils.story_functions import truncate_chars, strip_tags, linkify, image_size
from utils.scrubber import SelectiveScriptScrubber
from utils import s3_utils
from StringIO import StringIO
RECOMMENDATIONS_LIMIT = 5
IGNORE_IMAGE_SOURCES = [
"http://feeds.feedburner.com"
]
class MRequestInvite(mongo.Document):
email = mongo.EmailField()
request_date = mongo.DateTimeField(default=datetime.datetime.now)
invite_sent = mongo.BooleanField(default=False)
invite_sent_date = mongo.DateTimeField()
meta = {
'collection': 'social_invites',
'allow_inheritance': False,
}
def __unicode__(self):
return "%s%s" % (self.email, '*' if self.invite_sent else '')
@classmethod
def blast(cls):
invites = cls.objects.filter(email_sent=None)
print ' ---> Found %s invites...' % invites.count()
for invite in invites:
try:
invite.send_email()
except:
print ' ***> Could not send invite to: %s. Deleting.' % invite.username
invite.delete()
def send_email(self):
user = User.objects.filter(username__iexact=self.username)
if not user:
user = User.objects.filter(email__iexact=self.username)
if user:
user = user[0]
email = user.email or self.username
else:
user = {
'username': self.username,
'profile': {
'autologin_url': '/',
}
}
email = self.username
params = {
'user': user,
}
text = render_to_string('mail/email_social_beta.txt', params)
html = render_to_string('mail/email_social_beta.xhtml', params)
subject = "Psst, you're in..."
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['<%s>' % (email)])
msg.attach_alternative(html, "text/html")
msg.send()
self.email_sent = True
self.save()
logging.debug(" ---> ~BB~FM~SBSending email for social beta: %s" % self.username)
class MSocialProfile(mongo.Document):
user_id = mongo.IntField(unique=True)
username = mongo.StringField(max_length=30, unique=True)
email = mongo.StringField()
bio = mongo.StringField(max_length=160)
blurblog_title = mongo.StringField(max_length=256)
custom_bgcolor = mongo.StringField(max_length=50)
custom_css = mongo.StringField()
photo_url = mongo.StringField()
photo_service = mongo.StringField()
location = mongo.StringField(max_length=40)
website = mongo.StringField(max_length=200)
bb_permalink_direct = mongo.BooleanField()
subscription_count = mongo.IntField(default=0)
shared_stories_count = mongo.IntField(default=0)
following_count = mongo.IntField(default=0)
follower_count = mongo.IntField(default=0)
following_user_ids = mongo.ListField(mongo.IntField())
follower_user_ids = mongo.ListField(mongo.IntField())
unfollowed_user_ids = mongo.ListField(mongo.IntField())
requested_follow_user_ids = mongo.ListField(mongo.IntField())
popular_publishers = mongo.StringField()
stories_last_month = mongo.IntField(default=0)
average_stories_per_month = mongo.IntField(default=0)
story_count_history = mongo.ListField()
feed_classifier_counts = mongo.DictField()
favicon_color = mongo.StringField(max_length=6)
protected = mongo.BooleanField()
private = mongo.BooleanField()
meta = {
'collection': 'social_profile',
'indexes': ['user_id', 'following_user_ids', 'follower_user_ids', 'unfollowed_user_ids', 'requested_follow_user_ids'],
'allow_inheritance': False,
'index_drop_dups': True,
}
def __unicode__(self):
return "%s [%s] following %s/%s, shared %s" % (self.username, self.user_id,
self.following_count, self.follower_count, self.shared_stories_count)
@classmethod
def get_user(cls, user_id):
profile, created = cls.objects.get_or_create(user_id=user_id)
if created:
profile.save()
return profile
def save(self, *args, **kwargs):
if not self.username:
self.import_user_fields()
if not self.subscription_count:
self.count_follows(skip_save=True)
if self.bio and len(self.bio) > MSocialProfile.bio.max_length:
self.bio = self.bio[:80]
if self.bio:
self.bio = strip_tags(self.bio)
if self.website:
self.website = strip_tags(self.website)
if self.location:
self.location = strip_tags(self.location)
if self.custom_css:
self.custom_css = strip_tags(self.custom_css)
super(MSocialProfile, self).save(*args, **kwargs)
if self.user_id not in self.following_user_ids:
self.follow_user(self.user_id, force=True)
self.count_follows()
return self
@property
def blurblog_url(self):
return "http://%s.%s" % (
self.username_slug,
Site.objects.get_current().domain.replace('www.', ''))
@property
def blurblog_rss(self):
return "%s%s" % (self.blurblog_url, reverse('shared-stories-rss-feed',
kwargs={'user_id': self.user_id,
'username': self.username_slug}))
def find_stories(self, query, offset=0, limit=25):
stories_db = MSharedStory.objects(
Q(user_id=self.user_id) &
(Q(story_title__icontains=query) |
Q(story_author_name__icontains=query) |
Q(story_tags__icontains=query))
).order_by('-shared_date')[offset:offset+limit]
stories = Feed.format_stories(stories_db)
return stories
def recommended_users(self):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
following_key = "F:%s:F" % (self.user_id)
social_follow_key = "FF:%s:F" % (self.user_id)
profile_user_ids = []
# Find potential twitter/fb friends
services = MSocialServices.objects.get(user_id=self.user_id)
facebook_user_ids = [u.user_id for u in
MSocialServices.objects.filter(facebook_uid__in=services.facebook_friend_ids).only('user_id')]
twitter_user_ids = [u.user_id for u in
MSocialServices.objects.filter(twitter_uid__in=services.twitter_friend_ids).only('user_id')]
social_user_ids = facebook_user_ids + twitter_user_ids
# Find users not currently followed by this user
r.delete(social_follow_key)
nonfriend_user_ids = []
if social_user_ids:
r.sadd(social_follow_key, *social_user_ids)
nonfriend_user_ids = r.sdiff(social_follow_key, following_key)
profile_user_ids = [int(f) for f in nonfriend_user_ids]
r.delete(social_follow_key)
# Not enough? Grab popular users.
if len(nonfriend_user_ids) < RECOMMENDATIONS_LIMIT:
homepage_user = User.objects.get(username='popular')
suggested_users_list = r.sdiff("F:%s:F" % homepage_user.pk, following_key)
suggested_users_list = [int(f) for f in suggested_users_list]
suggested_user_ids = []
slots_left = min(len(suggested_users_list), RECOMMENDATIONS_LIMIT - len(nonfriend_user_ids))
for slot in range(slots_left):
suggested_user_ids.append(random.choice(suggested_users_list))
profile_user_ids.extend(suggested_user_ids)
# Sort by shared story count
profiles = MSocialProfile.profiles(profile_user_ids).order_by('-shared_stories_count')[:RECOMMENDATIONS_LIMIT]
return profiles
@property
def username_slug(self):
return slugify(self.username)
def count_stories(self):
# Popular Publishers
self.save_popular_publishers()
def save_popular_publishers(self, feed_publishers=None):
if not feed_publishers:
publishers = defaultdict(int)
for story in MSharedStory.objects(user_id=self.user_id).only('story_feed_id')[:500]:
publishers[story.story_feed_id] += 1
feed_titles = dict((f.id, f.feed_title)
for f in Feed.objects.filter(pk__in=publishers.keys()).only('id', 'feed_title'))
feed_publishers = sorted([{'id': k, 'feed_title': feed_titles[k], 'story_count': v}
for k, v in publishers.items()
if k in feed_titles],
key=lambda f: f['story_count'],
reverse=True)[:20]
popular_publishers = json.encode(feed_publishers)
if len(popular_publishers) < 1023:
self.popular_publishers = popular_publishers
self.save()
return
if len(popular_publishers) > 1:
self.save_popular_publishers(feed_publishers=feed_publishers[:-1])
@classmethod
def profile(cls, user_id, include_follows=True):
profile = cls.get_user(user_id)
return profile.canonical(include_follows=True)
@classmethod
def profiles(cls, user_ids):
profiles = cls.objects.filter(user_id__in=user_ids)
return profiles
@classmethod
def profile_feeds(cls, user_ids):
profiles = cls.objects.filter(user_id__in=user_ids)
profiles = dict((p.user_id, p.feed()) for p in profiles)
return profiles
@classmethod
def sync_all_redis(cls):
for profile in cls.objects.all():
profile.sync_redis(force=True)
def sync_redis(self, force=False):
self.following_user_ids = list(set(self.following_user_ids))
self.save()
for user_id in self.following_user_ids:
self.follow_user(user_id, force=force)
self.follow_user(self.user_id)
@property
def title(self):
return self.blurblog_title if self.blurblog_title else self.username + "'s blurblog"
def feed(self):
params = self.canonical(compact=True)
params.update({
'feed_title': self.title,
'page_url': reverse('load-social-page', kwargs={'user_id': self.user_id, 'username': self.username_slug}),
'shared_stories_count': self.shared_stories_count,
})
return params
def page(self):
params = self.canonical(include_follows=True)
params.update({
'feed_title': self.title,
'custom_css': self.custom_css,
})
return params
@property
def profile_photo_url(self):
if self.photo_url:
return self.photo_url
return settings.MEDIA_URL + 'img/reader/default_profile_photo.png'
@property
def large_photo_url(self):
photo_url = self.email_photo_url
if 'graph.facebook.com' in photo_url:
return photo_url + '?type=large'
elif 'twimg' in photo_url:
return photo_url.replace('_normal', '')
elif '/avatars/' in photo_url:
return photo_url.replace('thumbnail_', 'large_')
return photo_url
@property
def email_photo_url(self):
if self.photo_url:
if self.photo_url.startswith('//'):
self.photo_url = 'http:' + self.photo_url
return self.photo_url
domain = Site.objects.get_current().domain
return 'http://' + domain + settings.MEDIA_URL + 'img/reader/default_profile_photo.png'
def canonical(self, compact=False, include_follows=False, common_follows_with_user=None,
include_settings=False, include_following_user=None):
domain = Site.objects.get_current().domain
params = {
'id': 'social:%s' % self.user_id,
'user_id': self.user_id,
'username': self.username,
'photo_url': self.email_photo_url,
'large_photo_url': self.large_photo_url,
'location': self.location,
'num_subscribers': self.follower_count,
'feed_title': self.title,
'feed_address': "http://%s%s" % (domain, reverse('shared-stories-rss-feed',
kwargs={'user_id': self.user_id, 'username': self.username_slug})),
'feed_link': self.blurblog_url,
'protected': self.protected,
'private': self.private,
}
if not compact:
params.update({
'large_photo_url': self.large_photo_url,
'bio': self.bio,
'website': self.website,
'shared_stories_count': self.shared_stories_count,
'following_count': self.following_count,
'follower_count': self.follower_count,
'popular_publishers': json.decode(self.popular_publishers),
'stories_last_month': self.stories_last_month,
'average_stories_per_month': self.average_stories_per_month,
})
if include_settings:
params.update({
'custom_css': self.custom_css,
'custom_bgcolor': self.custom_bgcolor,
'bb_permalink_direct': self.bb_permalink_direct,
})
if include_follows:
params.update({
'photo_service': self.photo_service,
'following_user_ids': self.following_user_ids_without_self[:48],
'follower_user_ids': self.follower_user_ids_without_self[:48],
})
if common_follows_with_user:
FOLLOWERS_LIMIT = 128
with_user = MSocialProfile.get_user(common_follows_with_user)
followers_youknow, followers_everybody = with_user.common_follows(self.user_id, direction='followers')
following_youknow, following_everybody = with_user.common_follows(self.user_id, direction='following')
params['followers_youknow'] = followers_youknow[:FOLLOWERS_LIMIT]
params['followers_everybody'] = followers_everybody[:FOLLOWERS_LIMIT]
params['following_youknow'] = following_youknow[:FOLLOWERS_LIMIT]
params['following_everybody'] = following_everybody[:FOLLOWERS_LIMIT]
params['requested_follow'] = common_follows_with_user in self.requested_follow_user_ids
if include_following_user or common_follows_with_user:
if not include_following_user:
include_following_user = common_follows_with_user
if include_following_user != self.user_id:
params['followed_by_you'] = bool(self.is_followed_by_user(include_following_user))
params['following_you'] = self.is_following_user(include_following_user)
return params
@property
def following_user_ids_without_self(self):
if self.user_id in self.following_user_ids:
return [u for u in self.following_user_ids if u != self.user_id]
return self.following_user_ids
@property
def follower_user_ids_without_self(self):
if self.user_id in self.follower_user_ids:
return [u for u in self.follower_user_ids if u != self.user_id]
return self.follower_user_ids
def import_user_fields(self, skip_save=False):
user = User.objects.get(pk=self.user_id)
self.username = user.username
self.email = user.email
def count_follows(self, skip_save=False):
self.subscription_count = UserSubscription.objects.filter(user__pk=self.user_id).count()
self.shared_stories_count = MSharedStory.objects.filter(user_id=self.user_id).count()
self.following_count = len(self.following_user_ids_without_self)
self.follower_count = len(self.follower_user_ids_without_self)
if not skip_save:
self.save()
def follow_user(self, user_id, check_unfollowed=False, force=False):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if check_unfollowed and user_id in self.unfollowed_user_ids:
return
if self.user_id == user_id:
followee = self
else:
followee = MSocialProfile.get_user(user_id)
logging.debug(" ---> ~FB~SB%s~SN (%s) following %s" % (self.username, self.user_id, user_id))
if not followee.protected or force:
if user_id not in self.following_user_ids:
self.following_user_ids.append(user_id)
elif not force:
return
if user_id in self.unfollowed_user_ids:
self.unfollowed_user_ids.remove(user_id)
self.count_follows()
self.save()
if followee.protected and user_id != self.user_id and not force:
if self.user_id not in followee.requested_follow_user_ids:
followee.requested_follow_user_ids.append(self.user_id)
MFollowRequest.add(self.user_id, user_id)
elif self.user_id not in followee.follower_user_ids:
followee.follower_user_ids.append(self.user_id)
followee.count_follows()
followee.save()
if followee.protected and user_id != self.user_id and not force:
from apps.social.tasks import EmailFollowRequest
EmailFollowRequest.apply_async(kwargs=dict(follower_user_id=self.user_id,
followee_user_id=user_id),
countdown=settings.SECONDS_TO_DELAY_CELERY_EMAILS)
return
following_key = "F:%s:F" % (self.user_id)
r.sadd(following_key, user_id)
follower_key = "F:%s:f" % (user_id)
r.sadd(follower_key, self.user_id)
if user_id != self.user_id:
MInteraction.new_follow(follower_user_id=self.user_id, followee_user_id=user_id)
MActivity.new_follow(follower_user_id=self.user_id, followee_user_id=user_id)
socialsub, _ = MSocialSubscription.objects.get_or_create(user_id=self.user_id,
subscription_user_id=user_id)
socialsub.needs_unread_recalc = True
socialsub.save()
MFollowRequest.remove(self.user_id, user_id)
if not force:
from apps.social.tasks import EmailNewFollower
EmailNewFollower.apply_async(kwargs=dict(follower_user_id=self.user_id,
followee_user_id=user_id),
countdown=settings.SECONDS_TO_DELAY_CELERY_EMAILS)
return socialsub
def is_following_user(self, user_id):
# XXX TODO: Outsource to redis
return user_id in self.following_user_ids
def is_followed_by_user(self, user_id):
# XXX TODO: Outsource to redis
return user_id in self.follower_user_ids
def unfollow_user(self, user_id):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if not isinstance(user_id, int):
user_id = int(user_id)
if user_id == self.user_id:
# Only unfollow other people, not yourself.
return
if user_id in self.following_user_ids:
self.following_user_ids.remove(user_id)
if user_id not in self.unfollowed_user_ids:
self.unfollowed_user_ids.append(user_id)
self.count_follows()
self.save()
followee = MSocialProfile.get_user(user_id)
if self.user_id in followee.follower_user_ids:
followee.follower_user_ids.remove(self.user_id)
followee.count_follows()
followee.save()
if self.user_id in followee.requested_follow_user_ids:
followee.requested_follow_user_ids.remove(self.user_id)
followee.count_follows()
followee.save()
MFollowRequest.remove(self.user_id, user_id)
following_key = "F:%s:F" % (self.user_id)
r.srem(following_key, user_id)
follower_key = "F:%s:f" % (user_id)
r.srem(follower_key, self.user_id)
try:
MSocialSubscription.objects.get(user_id=self.user_id, subscription_user_id=user_id).delete()
except MSocialSubscription.DoesNotExist:
return False
def common_follows(self, user_id, direction='followers'):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
my_followers = "F:%s:%s" % (self.user_id, 'F' if direction == 'followers' else 'F')
their_followers = "F:%s:%s" % (user_id, 'f' if direction == 'followers' else 'F')
follows_inter = r.sinter(their_followers, my_followers)
follows_diff = r.sdiff(their_followers, my_followers)
follows_inter = [int(f) for f in follows_inter]
follows_diff = [int(f) for f in follows_diff]
if user_id in follows_inter:
follows_inter.remove(user_id)
if user_id in follows_diff:
follows_diff.remove(user_id)
return follows_inter, follows_diff
def send_email_for_new_follower(self, follower_user_id):
user = User.objects.get(pk=self.user_id)
if follower_user_id not in self.follower_user_ids:
logging.user(user, "~FMNo longer being followed by %s" % follower_user_id)
return
if not user.email:
logging.user(user, "~FMNo email to send to, skipping.")
return
elif not user.profile.send_emails:
logging.user(user, "~FMDisabled emails, skipping.")
return
if self.user_id == follower_user_id:
return
emails_sent = MSentEmail.objects.filter(receiver_user_id=user.pk,
sending_user_id=follower_user_id,
email_type='new_follower')
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
for email in emails_sent:
if email.date_sent > day_ago:
logging.user(user, "~SK~FMNot sending new follower email, already sent before. NBD.")
return
follower_profile = MSocialProfile.get_user(follower_user_id)
common_followers, _ = self.common_follows(follower_user_id, direction='followers')
common_followings, _ = self.common_follows(follower_user_id, direction='following')
if self.user_id in common_followers:
common_followers.remove(self.user_id)
if self.user_id in common_followings:
common_followings.remove(self.user_id)
common_followers = MSocialProfile.profiles(common_followers)
common_followings = MSocialProfile.profiles(common_followings)
data = {
'user': user,
'follower_profile': follower_profile,
'common_followers': common_followers,
'common_followings': common_followings,
}
text = render_to_string('mail/email_new_follower.txt', data)
html = render_to_string('mail/email_new_follower.xhtml', data)
subject = "%s is now following your Blurblog on NewsBlur!" % follower_profile.username
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user.username, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
MSentEmail.record(receiver_user_id=user.pk, sending_user_id=follower_user_id,
email_type='new_follower')
logging.user(user, "~BB~FM~SBSending email for new follower: %s" % follower_profile.username)
def send_email_for_follow_request(self, follower_user_id):
user = User.objects.get(pk=self.user_id)
if follower_user_id not in self.requested_follow_user_ids:
logging.user(user, "~FMNo longer being followed by %s" % follower_user_id)
return
if not user.email:
logging.user(user, "~FMNo email to send to, skipping.")
return
elif not user.profile.send_emails:
logging.user(user, "~FMDisabled emails, skipping.")
return
if self.user_id == follower_user_id:
return
emails_sent = MSentEmail.objects.filter(receiver_user_id=user.pk,
sending_user_id=follower_user_id,
email_type='follow_request')
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
for email in emails_sent:
if email.date_sent > day_ago:
logging.user(user, "~SK~FMNot sending follow request email, already sent before. NBD.")
return
follower_profile = MSocialProfile.get_user(follower_user_id)
common_followers, _ = self.common_follows(follower_user_id, direction='followers')
common_followings, _ = self.common_follows(follower_user_id, direction='following')
if self.user_id in common_followers:
common_followers.remove(self.user_id)
if self.user_id in common_followings:
common_followings.remove(self.user_id)
common_followers = MSocialProfile.profiles(common_followers)
common_followings = MSocialProfile.profiles(common_followings)
data = {
'user': user,
'follower_profile': follower_profile,
'common_followers': common_followers,
'common_followings': common_followings,
}
text = render_to_string('mail/email_follow_request.txt', data)
html = render_to_string('mail/email_follow_request.xhtml', data)
subject = "%s has requested to follow your Blurblog on NewsBlur" % follower_profile.username
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user.username, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
MSentEmail.record(receiver_user_id=user.pk, sending_user_id=follower_user_id,
email_type='follow_request')
logging.user(user, "~BB~FM~SBSending email for follow request: %s" % follower_profile.username)
def save_feed_story_history_statistics(self):
"""
Fills in missing months between earlier occurances and now.
Save format: [('YYYY-MM, #), ...]
Example output: [(2010-12, 123), (2011-01, 146)]
"""
now = datetime.datetime.utcnow()
min_year = now.year
total = 0
month_count = 0
# Count stories, aggregate by year and month. Map Reduce!
map_f = """
function() {
var date = (this.shared_date.getFullYear()) + "-" + (this.shared_date.getMonth()+1);
emit(date, 1);
}
"""
reduce_f = """
function(key, values) {
var total = 0;
for (var i=0; i < values.length; i++) {
total += values[i];
}
return total;
}
"""
dates = {}
res = MSharedStory.objects(user_id=self.user_id).map_reduce(map_f, reduce_f, output='inline')
for r in res:
dates[r.key] = r.value
year = int(re.findall(r"(\d{4})-\d{1,2}", r.key)[0])
if year < min_year:
min_year = year
# Assemble a list with 0's filled in for missing months,
# trimming left and right 0's.
months = []
start = False
for year in range(min_year, now.year+1):
for month in range(1, 12+1):
if datetime.datetime(year, month, 1) < now:
key = u'%s-%s' % (year, month)
if dates.get(key) or start:
start = True
months.append((key, dates.get(key, 0)))
total += dates.get(key, 0)
month_count += 1
self.story_count_history = months
self.average_stories_per_month = total / max(1, month_count)
self.save()
def save_classifier_counts(self):
def calculate_scores(cls, facet):
map_f = """
function() {
emit(this["%s"], {
pos: this.score>0 ? this.score : 0,
neg: this.score<0 ? Math.abs(this.score) : 0
});
}
""" % (facet)
reduce_f = """
function(key, values) {
var result = {pos: 0, neg: 0};
values.forEach(function(value) {
result.pos += value.pos;
result.neg += value.neg;
});
return result;
}
"""
scores = []
res = cls.objects(social_user_id=self.user_id).map_reduce(map_f, reduce_f, output='inline')
for r in res:
facet_values = dict([(k, int(v)) for k,v in r.value.iteritems()])
facet_values[facet] = r.key
scores.append(facet_values)
scores = sorted(scores, key=lambda v: v['neg'] - v['pos'])
return scores
scores = {}
for cls, facet in [(MClassifierTitle, 'title'),
(MClassifierAuthor, 'author'),
(MClassifierTag, 'tag'),
(MClassifierFeed, 'feed_id')]:
scores[facet] = calculate_scores(cls, facet)
if facet == 'feed_id' and scores[facet]:
scores['feed'] = scores[facet]
del scores['feed_id']
elif not scores[facet]:
del scores[facet]
if scores:
self.feed_classifier_counts = scores
self.save()
class MSocialSubscription(mongo.Document):
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user_id = mongo.IntField()
subscription_user_id = mongo.IntField(unique_with='user_id')
follow_date = mongo.DateTimeField(default=datetime.datetime.utcnow())
last_read_date = mongo.DateTimeField(default=UNREAD_CUTOFF)
mark_read_date = mongo.DateTimeField(default=UNREAD_CUTOFF)
unread_count_neutral = mongo.IntField(default=0)
unread_count_positive = mongo.IntField(default=0)
unread_count_negative = mongo.IntField(default=0)
unread_count_updated = mongo.DateTimeField()
oldest_unread_story_date = mongo.DateTimeField()
needs_unread_recalc = mongo.BooleanField(default=False)
feed_opens = mongo.IntField(default=0)
is_trained = mongo.BooleanField(default=False)
meta = {
'collection': 'social_subscription',
'indexes': [('user_id', 'subscription_user_id')],
'allow_inheritance': False,
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
subscription_user = User.objects.get(pk=self.subscription_user_id)
return "Socialsub %s:%s" % (user, subscription_user)
@classmethod
def feeds(cls, user_id=None, subscription_user_id=None, calculate_all_scores=False,
update_counts=False, *args, **kwargs):
params = {
'user_id': user_id,
}
if subscription_user_id:
params["subscription_user_id"] = subscription_user_id
social_subs = cls.objects.filter(**params)
social_feeds = []
if social_subs:
if calculate_all_scores:
for s in social_subs: s.calculate_feed_scores()
# Fetch user profiles of subscriptions
social_user_ids = [sub.subscription_user_id for sub in social_subs]
social_profiles = MSocialProfile.profile_feeds(social_user_ids)
for social_sub in social_subs:
user_id = social_sub.subscription_user_id
if social_profiles[user_id]['shared_stories_count'] <= 0:
continue
if update_counts and social_sub.needs_unread_recalc:
social_sub.calculate_feed_scores()
# Combine subscription read counts with feed/user info
feed = dict(social_sub.canonical().items() + social_profiles[user_id].items())
social_feeds.append(feed)
return social_feeds
@classmethod
def feeds_with_updated_counts(cls, user, social_feed_ids=None):
feeds = {}
# Get social subscriptions for user
user_subs = cls.objects.filter(user_id=user.pk)
if social_feed_ids:
social_user_ids = [int(f.replace('social:', '')) for f in social_feed_ids]
user_subs = user_subs.filter(subscription_user_id__in=social_user_ids)
profiles = MSocialProfile.objects.filter(user_id__in=social_user_ids)
profiles = dict((p.user_id, p) for p in profiles)
for i, sub in enumerate(user_subs):
# Count unreads if subscription is stale.
if (sub.needs_unread_recalc or
(sub.unread_count_updated and
sub.unread_count_updated < user.profile.unread_cutoff) or
(sub.oldest_unread_story_date and
sub.oldest_unread_story_date < user.profile.unread_cutoff)):
sub = sub.calculate_feed_scores(force=True, silent=True)
feed_id = "social:%s" % sub.subscription_user_id
feeds[feed_id] = {
'ps': sub.unread_count_positive,
'nt': sub.unread_count_neutral,
'ng': sub.unread_count_negative,
'id': feed_id,
}
if social_feed_ids and sub.subscription_user_id in profiles:
feeds[feed_id]['shared_stories_count'] = profiles[sub.subscription_user_id].shared_stories_count
return feeds
def canonical(self):
return {
'user_id': self.user_id,
'subscription_user_id': self.subscription_user_id,
'nt': self.unread_count_neutral,
'ps': self.unread_count_positive,
'ng': self.unread_count_negative,
'is_trained': self.is_trained,
'feed_opens': self.feed_opens,
}
@classmethod
def subs_for_users(cls, user_id, subscription_user_ids=None, read_filter="unread"):
socialsubs = cls.objects
if read_filter == "unread":
socialsubs = socialsubs.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0))
if not subscription_user_ids:
socialsubs = socialsubs.filter(user_id=user_id)\
.only('subscription_user_id', 'mark_read_date', 'is_trained')
else:
socialsubs = socialsubs.filter(user_id=user_id,
subscription_user_id__in=subscription_user_ids)\
.only('subscription_user_id', 'mark_read_date', 'is_trained')
return socialsubs
@classmethod
def story_hashes(cls, user_id, relative_user_id, subscription_user_ids=None, socialsubs=None,
read_filter="unread", order="newest",
include_timestamps=False, group_by_user=True, cutoff_date=None):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
story_hashes = {} if group_by_user else []
if not socialsubs:
socialsubs = cls.subs_for_users(relative_user_id,
subscription_user_ids=subscription_user_ids,
read_filter=read_filter)
subscription_user_ids = [sub.subscription_user_id for sub in socialsubs]
if not subscription_user_ids:
return story_hashes
read_dates = dict((us.subscription_user_id,
int(us.mark_read_date.strftime('%s'))) for us in socialsubs)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
unread_timestamp = int(time.mktime(cutoff_date.timetuple()))-1000
feed_counter = 0
for sub_user_id_group in chunks(subscription_user_ids, 20):
pipeline = r.pipeline()
for sub_user_id in sub_user_id_group:
stories_key = 'B:%s' % (sub_user_id)
sorted_stories_key = 'zB:%s' % (sub_user_id)
read_stories_key = 'RS:%s' % (user_id)
read_social_stories_key = 'RS:%s:B:%s' % (user_id, sub_user_id)
unread_stories_key = 'UB:%s:%s' % (user_id, sub_user_id)
sorted_stories_key = 'zB:%s' % (sub_user_id)
unread_ranked_stories_key = 'zUB:%s:%s' % (user_id, sub_user_id)
expire_unread_stories_key = False
max_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
min_score = read_dates[sub_user_id] + 1
pipeline.sdiffstore(unread_stories_key, stories_key, read_stories_key)
pipeline.sdiffstore(unread_stories_key, unread_stories_key, read_social_stories_key)
expire_unread_stories_key = True
else:
min_score = unread_timestamp
unread_stories_key = stories_key
if order == 'oldest':
byscorefunc = pipeline.zrangebyscore
else:
byscorefunc = pipeline.zrevrangebyscore
min_score, max_score = max_score, min_score
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps)
pipeline.delete(unread_ranked_stories_key)
if expire_unread_stories_key:
pipeline.delete(unread_stories_key)
results = pipeline.execute()
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_user:
story_hashes[subscription_user_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all',
withscores=False, hashes_only=False, cutoff_date=None,
mark_read_complement=False):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
ignore_user_stories = False
stories_key = 'B:%s' % (self.subscription_user_id)
read_stories_key = 'RS:%s' % (self.user_id)
read_social_stories_key = 'RS:%s:B:%s' % (self.user_id, self.subscription_user_id)
unread_stories_key = 'UB:%s:%s' % (self.user_id, self.subscription_user_id)
if not r.exists(stories_key):
return []
elif read_filter != 'unread' or not r.exists(read_stories_key):
ignore_user_stories = True
unread_stories_key = stories_key
else:
r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
r.sdiffstore(unread_stories_key, unread_stories_key, read_social_stories_key)
sorted_stories_key = 'zB:%s' % (self.subscription_user_id)
unread_ranked_stories_key = 'z%sUB:%s:%s' % ('h' if hashes_only else '',
self.user_id, self.subscription_user_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
now = datetime.datetime.now()
current_time = int(time.time() + 60*60*24)
mark_read_time = int(time.mktime(self.mark_read_date.timetuple())) + 1
if cutoff_date:
mark_read_time = int(time.mktime(cutoff_date.timetuple())) + 1
if order == 'oldest':
byscorefunc = r.zrangebyscore
min_score = mark_read_time
max_score = current_time
else: # newest
byscorefunc = r.zrevrangebyscore
min_score = current_time
if mark_read_complement:
min_score = mark_read_time
now = datetime.datetime.now()
unread_cutoff = cutoff_date
if not unread_cutoff:
unread_cutoff = now - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
max_score = int(time.mktime(unread_cutoff.timetuple()))-1
story_ids = byscorefunc(unread_ranked_stories_key, min_score,
max_score, start=offset, num=limit,
withscores=withscores)
if withscores:
story_ids = [(s[0], int(s[1])) for s in story_ids]
r.expire(unread_ranked_stories_key, 1*60*60)
if not ignore_user_stories:
r.delete(unread_stories_key)
return story_ids
@classmethod
def feed_stories(cls, user_id, social_user_ids, offset=0, limit=6,
order='newest', read_filter='all', relative_user_id=None, cache=True,
socialsubs=None, cutoff_date=None):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
if not relative_user_id:
relative_user_id = user_id
if order == 'oldest':
range_func = rt.zrange
else:
range_func = rt.zrevrange
if not isinstance(social_user_ids, list):
social_user_ids = [social_user_ids]
ranked_stories_keys = 'zU:%s:social' % (user_id)
unread_ranked_stories_keys = 'zhU:%s:social' % (user_id)
if (offset and cache and
rt.exists(ranked_stories_keys) and
rt.exists(unread_ranked_stories_keys)):
story_hashes_and_dates = range_func(ranked_stories_keys, offset, limit, withscores=True)
if not story_hashes_and_dates:
return [], [], []
story_hashes, story_dates = zip(*story_hashes_and_dates)
if read_filter == "unread":
unread_story_hashes = story_hashes
else:
unread_story_hashes = range_func(unread_ranked_stories_keys, 0, offset+limit)
return story_hashes, story_dates, unread_story_hashes
else:
rt.delete(ranked_stories_keys)
rt.delete(unread_ranked_stories_keys)
story_hashes = cls.story_hashes(user_id, relative_user_id,
subscription_user_ids=social_user_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_user=False,
socialsubs=socialsubs,
cutoff_date=cutoff_date)
if not story_hashes:
return [], [], []
pipeline = rt.pipeline()
for story_hash_group in chunks(story_hashes, 100):
pipeline.zadd(ranked_stories_keys, **dict(story_hash_group))
pipeline.execute()
story_hashes_and_dates = range_func(ranked_stories_keys, offset, limit, withscores=True)
if not story_hashes_and_dates:
return [], [], []
story_hashes, story_dates = zip(*story_hashes_and_dates)
if read_filter == "unread":
unread_feed_story_hashes = story_hashes
rt.zunionstore(unread_ranked_stories_keys, [ranked_stories_keys])
else:
unread_story_hashes = cls.story_hashes(user_id, relative_user_id,
subscription_user_ids=social_user_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_user=False,
socialsubs=socialsubs,
cutoff_date=cutoff_date)
if unread_story_hashes:
pipeline = rt.pipeline()
for unread_story_hash_group in chunks(unread_story_hashes, 100):
pipeline.zadd(unread_ranked_stories_keys, **dict(unread_story_hash_group))
pipeline.execute()
unread_feed_story_hashes = range_func(unread_ranked_stories_keys, offset, limit)
rt.expire(ranked_stories_keys, 60*60)
rt.expire(unread_ranked_stories_keys, 60*60)
return story_hashes, story_dates, unread_feed_story_hashes
def mark_story_ids_as_read(self, story_hashes, feed_id=None, mark_all_read=False, request=None):
data = dict(code=0, payload=story_hashes)
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if not request:
request = User.objects.get(pk=self.user_id)
if not self.needs_unread_recalc and not mark_all_read:
self.needs_unread_recalc = True
self.save()
sub_username = MSocialProfile.get_user(self.subscription_user_id).username
if len(story_hashes) > 1:
logging.user(request, "~FYRead %s stories in social subscription: %s" % (len(story_hashes), sub_username))
else:
logging.user(request, "~FYRead story in social subscription: %s" % (sub_username))
for story_hash in set(story_hashes):
if feed_id is not None:
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=feed_id)
if feed_id is None:
feed_id, _ = MStory.split_story_hash(story_hash)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (self.user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in r.sinter(share_key, friend_key)]
RUserStory.mark_read(self.user_id, feed_id, story_hash, social_user_ids=friends_with_shares,
aggregated=mark_all_read)
if self.user_id in friends_with_shares:
friends_with_shares.remove(self.user_id)
if friends_with_shares:
socialsubs = MSocialSubscription.objects.filter(
user_id=self.user_id,
subscription_user_id__in=friends_with_shares)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc and not mark_all_read:
socialsub.needs_unread_recalc = True
socialsub.save()
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=self.user_id, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save()
return data
@classmethod
def mark_unsub_story_ids_as_read(cls, user_id, social_user_id, story_ids, feed_id=None,
request=None):
data = dict(code=0, payload=story_ids)
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if not request:
request = User.objects.get(pk=user_id)
if len(story_ids) > 1:
logging.user(request, "~FYRead %s social stories from global" % (len(story_ids)))
else:
logging.user(request, "~FYRead social story from global")
for story_id in set(story_ids):
try:
story = MSharedStory.objects.get(user_id=social_user_id,
story_guid=story_id)
except MSharedStory.DoesNotExist:
continue
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story.story_hash)
friends_with_shares = [int(f) for f in r.sinter(share_key, friend_key)]
RUserStory.mark_read(user_id, story.story_feed_id, story.story_hash,
social_user_ids=friends_with_shares)
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=user_id, feed=story.story_feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save()
# XXX TODO: Real-time notification, just for this user
return data
def mark_feed_read(self, cutoff_date=None):
user_profile = Profile.objects.get(user_id=self.user_id)
recount = True
if cutoff_date:
cutoff_date = cutoff_date + datetime.timedelta(seconds=1)
else:
# Use the latest story to get last read time.
latest_shared_story = MSharedStory.objects(user_id=self.subscription_user_id,
shared_date__gte=user_profile.unread_cutoff
).order_by('-shared_date').only('shared_date').first()
if latest_shared_story:
cutoff_date = latest_shared_story['shared_date'] + datetime.timedelta(seconds=1)
else:
cutoff_date = datetime.datetime.utcnow()
recount = False
self.last_read_date = cutoff_date
self.mark_read_date = cutoff_date
self.oldest_unread_story_date = cutoff_date
if not recount:
self.unread_count_negative = 0
self.unread_count_positive = 0
self.unread_count_neutral = 0
self.unread_count_updated = datetime.datetime.utcnow()
self.needs_unread_recalc = False
else:
self.needs_unread_recalc = True
# Manually mark all shared stories as read.
unread_story_hashes = self.get_stories(read_filter='unread', limit=500, hashes_only=True,
mark_read_complement=True)
self.mark_story_ids_as_read(unread_story_hashes, mark_all_read=True)
self.save()
def calculate_feed_scores(self, force=False, silent=False):
if not self.needs_unread_recalc and not force:
return self
now = datetime.datetime.now()
user_profile = Profile.objects.get(user_id=self.user_id)
if user_profile.last_seen_on < user_profile.unread_cutoff:
# if not silent:
# logging.info(' ---> [%s] SKIPPING Computing scores: %s (1 week+)' % (self.user, self.feed))
return self
feed_scores = dict(negative=0, neutral=0, positive=0)
# Two weeks in age. If mark_read_date is older, mark old stories as read.
date_delta = user_profile.unread_cutoff
if date_delta < self.mark_read_date:
date_delta = self.mark_read_date
else:
self.mark_read_date = date_delta
unread_story_hashes = self.get_stories(read_filter='unread', limit=500, hashes_only=True,
cutoff_date=user_profile.unread_cutoff)
stories_db = MSharedStory.objects(user_id=self.subscription_user_id,
story_hash__in=unread_story_hashes)
story_feed_ids = set()
for s in stories_db:
story_feed_ids.add(s['story_feed_id'])
story_feed_ids = list(story_feed_ids)
usersubs = UserSubscription.objects.filter(user__pk=self.user_id, feed__pk__in=story_feed_ids)
usersubs_map = dict((sub.feed_id, sub) for sub in usersubs)
oldest_unread_story_date = now
unread_stories_db = []
for story in stories_db:
if story['story_hash'] not in unread_story_hashes:
continue
feed_id = story.story_feed_id
if usersubs_map.get(feed_id) and story.shared_date < usersubs_map[feed_id].mark_read_date:
continue
unread_stories_db.append(story)
if story.shared_date < oldest_unread_story_date:
oldest_unread_story_date = story.shared_date
stories = Feed.format_stories(unread_stories_db)
classifier_feeds = list(MClassifierFeed.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
classifier_authors = list(MClassifierAuthor.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
classifier_titles = list(MClassifierTitle.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
classifier_tags = list(MClassifierTag.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
# Merge with feed specific classifiers
if story_feed_ids:
classifier_feeds = classifier_feeds + list(MClassifierFeed.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
classifier_authors = classifier_authors + list(MClassifierAuthor.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
classifier_titles = classifier_titles + list(MClassifierTitle.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
classifier_tags = classifier_tags + list(MClassifierTag.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
for story in stories:
scores = {
'feed' : apply_classifier_feeds(classifier_feeds, story['story_feed_id'],
social_user_ids=self.subscription_user_id),
'author' : apply_classifier_authors(classifier_authors, story),
'tags' : apply_classifier_tags(classifier_tags, story),
'title' : apply_classifier_titles(classifier_titles, story),
}
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
feed_scores['positive'] += 1
elif min_score < 0:
feed_scores['negative'] += 1
else:
if scores['feed'] > 0:
feed_scores['positive'] += 1
elif scores['feed'] < 0:
feed_scores['negative'] += 1
else:
feed_scores['neutral'] += 1
self.unread_count_positive = feed_scores['positive']
self.unread_count_neutral = feed_scores['neutral']
self.unread_count_negative = feed_scores['negative']
self.unread_count_updated = datetime.datetime.now()
self.oldest_unread_story_date = oldest_unread_story_date
self.needs_unread_recalc = False
self.save()
if (self.unread_count_positive == 0 and
self.unread_count_neutral == 0):
self.mark_feed_read()
if not silent:
logging.info(' ---> [%s] Computing social scores: %s (%s/%s/%s)' % (user_profile, self.subscription_user_id, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
return self
@classmethod
def mark_dirty_sharing_story(cls, user_id, story_feed_id, story_guid_hash):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
friends_key = "F:%s:F" % (user_id)
share_key = "S:%s:%s" % (story_feed_id, story_guid_hash)
following_user_ids = r.sinter(friends_key, share_key)
following_user_ids = [int(f) for f in following_user_ids]
if not following_user_ids:
return None
social_subs = cls.objects.filter(user_id=user_id, subscription_user_id__in=following_user_ids)
for social_sub in social_subs:
social_sub.needs_unread_recalc = True
social_sub.save()
return social_subs
class MCommentReply(mongo.EmbeddedDocument):
reply_id = mongo.ObjectIdField()
user_id = mongo.IntField()
publish_date = mongo.DateTimeField()
comments = mongo.StringField()
email_sent = mongo.BooleanField(default=False)
liking_users = mongo.ListField(mongo.IntField())
def canonical(self):
reply = {
'reply_id': self.reply_id,
'user_id': self.user_id,
'publish_date': relative_timesince(self.publish_date),
'date': self.publish_date,
'comments': self.comments,
}
return reply
meta = {
'ordering': ['publish_date'],
'id_field': 'reply_id',
'allow_inheritance': False,
}
class MSharedStory(mongo.Document):
user_id = mongo.IntField()
shared_date = mongo.DateTimeField()
comments = mongo.StringField()
has_comments = mongo.BooleanField(default=False)
has_replies = mongo.BooleanField(default=False)
replies = mongo.ListField(mongo.EmbeddedDocumentField(MCommentReply))
source_user_id = mongo.IntField()
story_hash = mongo.StringField()
story_feed_id = mongo.IntField()
story_date = mongo.DateTimeField()
story_title = mongo.StringField(max_length=1024)
story_content = mongo.StringField()
story_content_z = mongo.BinaryField()
story_original_content = mongo.StringField()
story_original_content_z = mongo.BinaryField()
original_text_z = mongo.BinaryField()
story_content_type = mongo.StringField(max_length=255)
story_author_name = mongo.StringField()
story_permalink = mongo.StringField()
story_guid = mongo.StringField(unique_with=('user_id',))
story_guid_hash = mongo.StringField(max_length=6)
image_urls = mongo.ListField(mongo.StringField(max_length=1024))
story_tags = mongo.ListField(mongo.StringField(max_length=250))
posted_to_services = mongo.ListField(mongo.StringField(max_length=20))
mute_email_users = mongo.ListField(mongo.IntField())
liking_users = mongo.ListField(mongo.IntField())
emailed_reshare = mongo.BooleanField(default=False)
emailed_replies = mongo.ListField(mongo.ObjectIdField())
image_count = mongo.IntField()
image_sizes = mongo.ListField(mongo.DictField())
meta = {
'collection': 'shared_stories',
'indexes': [('user_id', '-shared_date'), ('user_id', 'story_feed_id'),
'shared_date', 'story_guid', 'story_feed_id', 'story_hash'],
'index_drop_dups': True,
'ordering': ['-shared_date'],
'allow_inheritance': False,
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "%s: %s (%s)%s%s" % (user.username,
self.decoded_story_title[:20],
self.story_feed_id,
': ' if self.has_comments else '',
self.comments[:20])
@property
def guid_hash(self):
return hashlib.sha1(self.story_guid).hexdigest()[:6]
@property
def feed_guid_hash(self):
return "%s:%s" % (self.story_feed_id or "0", self.guid_hash)
@property
def decoded_story_title(self):
h = HTMLParser.HTMLParser()
return h.unescape(self.story_title)
def canonical(self):
return {
"user_id": self.user_id,
"shared_date": self.shared_date,
"story_title": self.story_title,
"story_content": self.story_content_z and zlib.decompress(self.story_content_z),
"comments": self.comments,
}
def save(self, *args, **kwargs):
scrubber = SelectiveScriptScrubber()
if self.story_content:
self.story_content = scrubber.scrub(self.story_content)
self.story_content_z = zlib.compress(self.story_content)
self.story_content = None
if self.story_original_content:
self.story_original_content_z = zlib.compress(self.story_original_content)
self.story_original_content = None
self.story_guid_hash = hashlib.sha1(self.story_guid).hexdigest()[:6]
self.story_title = strip_tags(self.story_title)
self.story_hash = self.feed_guid_hash
self.comments = linkify(strip_tags(self.comments))
for reply in self.replies:
reply.comments = linkify(strip_tags(reply.comments))
self.shared_date = self.shared_date or datetime.datetime.utcnow()
self.has_replies = bool(len(self.replies))
super(MSharedStory, self).save(*args, **kwargs)
author = MSocialProfile.get_user(self.user_id)
author.count_follows()
self.sync_redis()
MActivity.new_shared_story(user_id=self.user_id, source_user_id=self.source_user_id,
story_title=self.story_title,
comments=self.comments, story_feed_id=self.story_feed_id,
story_id=self.story_guid, share_date=self.shared_date)
return self
def delete(self, *args, **kwargs):
MActivity.remove_shared_story(user_id=self.user_id, story_feed_id=self.story_feed_id,
story_id=self.story_guid)
self.remove_from_redis()
super(MSharedStory, self).delete(*args, **kwargs)
def unshare_story(self):
socialsubs = MSocialSubscription.objects.filter(subscription_user_id=self.user_id,
needs_unread_recalc=False)
for socialsub in socialsubs:
socialsub.needs_unread_recalc = True
socialsub.save()
self.delete()
@classmethod
def feed_quota(cls, user_id, feed_id, days=1, quota=1):
day_ago = datetime.datetime.now()-datetime.timedelta(days=days)
shared_count = cls.objects.filter(shared_date__gte=day_ago, story_feed_id=feed_id).count()
return shared_count >= quota
@classmethod
def count_potential_spammers(cls, days=1):
day_ago = datetime.datetime.now()-datetime.timedelta(days=days)
stories = cls.objects.filter(shared_date__gte=day_ago)
shared = [{'u': s.user_id, 'f': s.story_feed_id} for s in stories]
ddusers = defaultdict(lambda: defaultdict(int))
for story in shared:
ddusers[story['u']][story['f']] += 1
users = {}
for user_id, feeds in ddusers.items():
users[user_id] = dict(feeds)
pprint(users)
return users
@classmethod
def get_shared_stories_from_site(cls, feed_id, user_id, story_url, limit=3):
your_story = cls.objects.filter(story_feed_id=feed_id,
story_permalink=story_url,
user_id=user_id).limit(1).first()
same_stories = cls.objects.filter(story_feed_id=feed_id,
story_permalink=story_url,
user_id__ne=user_id
).order_by('-shared_date')
same_stories = [{
"user_id": story.user_id,
"comments": story.comments,
"relative_date": relative_timesince(story.shared_date),
"blurblog_permalink": story.blurblog_permalink(),
} for story in same_stories]
other_stories = []
if feed_id:
other_stories = cls.objects.filter(story_feed_id=feed_id,
story_permalink__ne=story_url
).order_by('-shared_date').limit(limit)
other_stories = [{
"user_id": story.user_id,
"story_title": story.story_title,
"story_permalink": story.story_permalink,
"comments": story.comments,
"relative_date": relative_timesince(story.shared_date),
"blurblog_permalink": story.blurblog_permalink(),
} for story in other_stories]
return your_story, same_stories, other_stories
def set_source_user_id(self, source_user_id):
if source_user_id == self.user_id:
return
def find_source(source_user_id, seen_user_ids):
parent_shared_story = MSharedStory.objects.filter(user_id=source_user_id,
story_guid=self.story_guid,
story_feed_id=self.story_feed_id).limit(1)
if parent_shared_story and parent_shared_story[0].source_user_id:
user_id = parent_shared_story[0].source_user_id
if user_id in seen_user_ids:
return source_user_id
else:
seen_user_ids.append(user_id)
return find_source(user_id, seen_user_ids)
else:
return source_user_id
if source_user_id:
source_user_id = find_source(source_user_id, [])
if source_user_id == self.user_id:
return
elif not self.source_user_id or source_user_id != self.source_user_id:
self.source_user_id = source_user_id
logging.debug(" ---> Re-share from %s." % source_user_id)
self.save()
MInteraction.new_reshared_story(user_id=self.source_user_id,
reshare_user_id=self.user_id,
comments=self.comments,
story_title=self.story_title,
story_feed_id=self.story_feed_id,
story_id=self.story_guid)
def mute_for_user(self, user_id):
if user_id not in self.mute_email_users:
self.mute_email_users.append(user_id)
self.save()
@classmethod
def switch_feed(cls, original_feed_id, duplicate_feed_id):
shared_stories = cls.objects.filter(story_feed_id=duplicate_feed_id)
logging.info(" ---> %s shared stories" % shared_stories.count())
for story in shared_stories:
story.story_feed_id = original_feed_id
story.save()
@classmethod
def collect_popular_stories(cls, cutoff=None, days=None, shared_feed_ids=None):
if not days:
days = 3
if not cutoff:
cutoff = 6
if not shared_feed_ids:
shared_feed_ids = []
# shared_stories_count = sum(json.decode(MStatistics.get('stories_shared')))
# cutoff = cutoff or max(math.floor(.025 * shared_stories_count), 3)
today = datetime.datetime.now() - datetime.timedelta(days=days)
map_f = """
function() {
emit(this.story_hash, {
'story_hash': this.story_hash,
'feed_id': this.story_feed_id,
'title': this.story_title,
'count': 1
});
}
"""
reduce_f = """
function(key, values) {
var r = {'story_hash': key, 'count': 0};
for (var i=0; i < values.length; i++) {
r.feed_id = values[i].feed_id;
r.title = values[i].title;
r.count += values[i].count;
}
return r;
}
"""
finalize_f = """
function(key, value) {
if (value.count >= %(cutoff)s && [%(shared_feed_ids)s].indexOf(value.feed_id) == -1) {
var english_title = value.title.replace(/[^\\062-\\177]/g, "");
if (english_title.length < 5) return;
return value;
}
}
""" % {'cutoff': cutoff, 'shared_feed_ids': ', '.join(shared_feed_ids)}
res = cls.objects(shared_date__gte=today).map_reduce(map_f, reduce_f,
finalize_f=finalize_f,
output='inline')
stories = dict([(r.key, r.value) for r in res if r.value])
return stories, cutoff
@classmethod
def share_popular_stories(cls, cutoff=None, days=None, interactive=True):
publish_new_stories = False
popular_profile = MSocialProfile.objects.get(username='popular')
popular_user = User.objects.get(pk=popular_profile.user_id)
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
shared_feed_ids = [str(s.story_feed_id)
for s in MSharedStory.objects(user_id=popular_profile.user_id,
shared_date__gte=week_ago).only('story_feed_id')]
shared_stories_today, cutoff = cls.collect_popular_stories(cutoff=cutoff, days=days,
shared_feed_ids=shared_feed_ids)
shared = 0
for story_hash, story_info in shared_stories_today.items():
story, _ = MStory.find_story(story_info['feed_id'], story_info['story_hash'])
if not story:
logging.user(popular_user, "~FRPopular stories, story not found: %s" % story_info)
continue
if story.story_feed_id in shared_feed_ids:
logging.user(popular_user, "~FRPopular stories, story feed just shared: %s" % story_info)
continue
if interactive:
feed = Feed.get_by_id(story.story_feed_id)
accept_story = raw_input("%s / %s [Y/n]: " % (story.decoded_story_title, feed.title))
if accept_story in ['n', 'N']: continue
story_db = dict([(k, v) for k, v in story._data.items()
if k is not None and v is not None])
story_db.pop('user_id', None)
story_db.pop('id', None)
story_db.pop('comments', None)
story_db.pop('replies', None)
story_db['has_comments'] = False
story_db['has_replies'] = False
story_db['shared_date'] = datetime.datetime.now()
story_values = {
'user_id': popular_profile.user_id,
'story_guid': story_db['story_guid'],
'defaults': story_db,
}
shared_story, created = MSharedStory.objects.get_or_create(**story_values)
if created:
shared_story.post_to_service('twitter')
shared += 1
shared_feed_ids.append(story.story_feed_id)
publish_new_stories = True
logging.user(popular_user, "~FCSharing: ~SB~FM%s (%s shares, %s min)" % (
story.decoded_story_title[:50],
story_info['count'],
cutoff))
if publish_new_stories:
socialsubs = MSocialSubscription.objects.filter(subscription_user_id=popular_user.pk)
for socialsub in socialsubs:
socialsub.needs_unread_recalc = True
socialsub.save()
shared_story.publish_update_to_subscribers()
return shared
@staticmethod
def check_shared_story_hashes(user_id, story_hashes, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_POOL)
pipeline = r.pipeline()
for story_hash in story_hashes:
feed_id, guid_hash = MStory.split_story_hash(story_hash)
share_key = "S:%s:%s" % (feed_id, guid_hash)
pipeline.sismember(share_key, user_id)
shared_hashes = pipeline.execute()
return [story_hash for s, story_hash in enumerate(story_hashes) if shared_hashes[s]]
@classmethod
def sync_all_redis(cls, drop=False):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
h = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# h2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
if drop:
for key_name in ["C", "S"]:
keys = r.keys("%s:*" % key_name)
print " ---> Removing %s keys named %s:*" % (len(keys), key_name)
for key in keys:
r.delete(key)
for story in cls.objects.all():
story.sync_redis_shares(r=r)
story.sync_redis_story(r=h)
def sync_redis(self):
self.sync_redis_shares()
self.sync_redis_story()
def sync_redis_shares(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_POOL)
share_key = "S:%s:%s" % (self.story_feed_id, self.guid_hash)
comment_key = "C:%s:%s" % (self.story_feed_id, self.guid_hash)
r.sadd(share_key, self.user_id)
if self.has_comments:
r.sadd(comment_key, self.user_id)
else:
r.srem(comment_key, self.user_id)
def sync_redis_story(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
r.sadd('B:%s' % self.user_id, self.feed_guid_hash)
# r2.sadd('B:%s' % self.user_id, self.feed_guid_hash)
r.zadd('zB:%s' % self.user_id, self.feed_guid_hash,
time.mktime(self.shared_date.timetuple()))
# r2.zadd('zB:%s' % self.user_id, self.feed_guid_hash,
# time.mktime(self.shared_date.timetuple()))
r.expire('B:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('B:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire('zB:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('zB:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
def remove_from_redis(self):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
share_key = "S:%s:%s" % (self.story_feed_id, self.guid_hash)
r.srem(share_key, self.user_id)
comment_key = "C:%s:%s" % (self.story_feed_id, self.guid_hash)
r.srem(comment_key, self.user_id)
h = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# h2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
h.srem('B:%s' % self.user_id, self.feed_guid_hash)
# h2.srem('B:%s' % self.user_id, self.feed_guid_hash)
h.zrem('zB:%s' % self.user_id, self.feed_guid_hash)
# h2.zrem('zB:%s' % self.user_id, self.feed_guid_hash)
def publish_update_to_subscribers(self):
try:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feed_id = "social:%s" % self.user_id
listeners_count = r.publish(feed_id, 'story:new')
if listeners_count:
logging.debug(" ---> ~FMPublished to %s subscribers" % (listeners_count))
except redis.ConnectionError:
logging.debug(" ***> ~BMRedis is unavailable for real-time.")
def comments_with_author(self):
comments = {
'id': self.id,
'user_id': self.user_id,
'comments': self.comments,
'shared_date': relative_timesince(self.shared_date),
'date': self.shared_date,
'replies': [reply.canonical() for reply in self.replies],
'liking_users': self.liking_users and list(self.liking_users),
'source_user_id': self.source_user_id,
}
return comments
def comment_with_author_and_profiles(self):
comment = self.comments_with_author()
profile_user_ids = set([comment['user_id']])
reply_user_ids = [reply['user_id'] for reply in comment['replies']]
profile_user_ids = profile_user_ids.union(reply_user_ids)
profile_user_ids = profile_user_ids.union(comment['liking_users'])
if comment['source_user_id']:
profile_user_ids.add(comment['source_user_id'])
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
return comment, profiles
@classmethod
def stories_with_comments_and_profiles(cls, stories, user_id, check_all=False):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
friend_key = "F:%s:F" % (user_id)
profile_user_ids = set()
for story in stories:
story['friend_comments'] = []
story['public_comments'] = []
story['reply_count'] = 0
if check_all or story['comment_count']:
comment_key = "C:%s:%s" % (story['story_feed_id'], story['guid_hash'])
story['comment_count'] = r.scard(comment_key)
friends_with_comments = [int(f) for f in r.sinter(comment_key, friend_key)]
sharer_user_ids = [int(f) for f in r.smembers(comment_key)]
shared_stories = []
if sharer_user_ids:
params = {
'story_hash': story['story_hash'],
'user_id__in': sharer_user_ids,
}
shared_stories = cls.objects.filter(**params)
for shared_story in shared_stories:
comments = shared_story.comments_with_author()
story['reply_count'] += len(comments['replies'])
if shared_story.user_id in friends_with_comments:
story['friend_comments'].append(comments)
else:
story['public_comments'].append(comments)
if comments.get('source_user_id'):
profile_user_ids.add(comments['source_user_id'])
if comments.get('liking_users'):
profile_user_ids = profile_user_ids.union(comments['liking_users'])
all_comments = story['friend_comments'] + story['public_comments']
profile_user_ids = profile_user_ids.union([reply['user_id']
for c in all_comments
for reply in c['replies']])
if story.get('source_user_id'):
profile_user_ids.add(story['source_user_id'])
story['comment_count_friends'] = len(friends_with_comments)
story['comment_count_public'] = story['comment_count'] - len(friends_with_comments)
if check_all or story['share_count']:
share_key = "S:%s:%s" % (story['story_feed_id'], story['guid_hash'])
story['share_count'] = r.scard(share_key)
friends_with_shares = [int(f) for f in r.sinter(share_key, friend_key)]
nonfriend_user_ids = [int(f) for f in r.sdiff(share_key, friend_key)]
profile_user_ids.update(nonfriend_user_ids)
profile_user_ids.update(friends_with_shares)
story['commented_by_public'] = [c['user_id'] for c in story['public_comments']]
story['commented_by_friends'] = [c['user_id'] for c in story['friend_comments']]
story['shared_by_public'] = list(set(nonfriend_user_ids) -
set(story['commented_by_public']))
story['shared_by_friends'] = list(set(friends_with_shares) -
set(story['commented_by_friends']))
story['share_count_public'] = story['share_count'] - len(friends_with_shares)
story['share_count_friends'] = len(friends_with_shares)
story['friend_user_ids'] = list(set(story['commented_by_friends'] + story['shared_by_friends']))
story['public_user_ids'] = list(set(story['commented_by_public'] + story['shared_by_public']))
if not story['share_user_ids']:
story['share_user_ids'] = story['friend_user_ids'] + story['public_user_ids']
if story.get('source_user_id'):
profile_user_ids.add(story['source_user_id'])
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
# Toss public comments by private profiles
profiles_dict = dict((profile['user_id'], profile) for profile in profiles)
for story in stories:
commented_by_public = story.get('commented_by_public') or [c['user_id'] for c in story['public_comments']]
for user_id in commented_by_public:
if profiles_dict[user_id]['private']:
story['public_comments'] = [c for c in story['public_comments'] if c['user_id'] != user_id]
story['comment_count_public'] -= 1
return stories, profiles
@staticmethod
def attach_users_to_stories(stories, profiles):
profiles = dict([(p['user_id'], p) for p in profiles])
for s, story in enumerate(stories):
for u, user_id in enumerate(story['shared_by_friends']):
if user_id not in profiles: continue
stories[s]['shared_by_friends'][u] = profiles[user_id]
for u, user_id in enumerate(story['shared_by_public']):
if user_id not in profiles: continue
stories[s]['shared_by_public'][u] = profiles[user_id]
for comment_set in ['friend_comments', 'public_comments']:
for c, comment in enumerate(story[comment_set]):
if comment['user_id'] not in profiles: continue
stories[s][comment_set][c]['user'] = profiles[comment['user_id']]
if comment['source_user_id'] and comment['source_user_id'] in profiles:
stories[s][comment_set][c]['source_user'] = profiles[comment['source_user_id']]
for r, reply in enumerate(comment['replies']):
if reply['user_id'] not in profiles: continue
stories[s][comment_set][c]['replies'][r]['user'] = profiles[reply['user_id']]
stories[s][comment_set][c]['liking_user_ids'] = list(comment['liking_users'])
for u, user_id in enumerate(comment['liking_users']):
if user_id not in profiles: continue
stories[s][comment_set][c]['liking_users'][u] = profiles[user_id]
return stories
@staticmethod
def attach_users_to_comment(comment, profiles):
profiles = dict([(p['user_id'], p) for p in profiles])
if comment['user_id'] not in profiles: return comment
comment['user'] = profiles[comment['user_id']]
if comment['source_user_id']:
comment['source_user'] = profiles[comment['source_user_id']]
for r, reply in enumerate(comment['replies']):
if reply['user_id'] not in profiles: continue
comment['replies'][r]['user'] = profiles[reply['user_id']]
comment['liking_user_ids'] = list(comment['liking_users'])
for u, user_id in enumerate(comment['liking_users']):
if user_id not in profiles: continue
comment['liking_users'][u] = profiles[user_id]
return comment
def add_liking_user(self, user_id):
if user_id not in self.liking_users:
self.liking_users.append(user_id)
self.save()
def remove_liking_user(self, user_id):
if user_id in self.liking_users:
self.liking_users.remove(user_id)
self.save()
def blurblog_permalink(self):
profile = MSocialProfile.get_user(self.user_id)
return "%s/story/%s/%s" % (
profile.blurblog_url,
slugify(self.story_title)[:20],
self.guid_hash[:6]
)
def generate_post_to_service_message(self, truncate=None, include_url=True):
message = strip_tags(self.comments)
if not message or len(message) < 1:
message = self.decoded_story_title
if include_url and truncate:
message = truncate_chars(message, truncate - 18 - 30)
feed = Feed.get_by_id(self.story_feed_id)
if feed:
if truncate:
message += " (%s)" % truncate_chars(feed.feed_title, 18)
else:
message += " (%s)" % truncate_chars(feed.feed_title, 30)
if include_url:
message += " " + self.blurblog_permalink()
elif include_url:
if truncate:
message = truncate_chars(message, truncate - 14)
message += " " + self.blurblog_permalink()
return message
def post_to_service(self, service):
user = User.objects.get(pk=self.user_id)
if service in self.posted_to_services:
logging.user(user, "~BM~FRAlready posted to %s." % (service))
return
posted = False
social_service = MSocialServices.objects.get(user_id=self.user_id)
message = self.generate_post_to_service_message()
logging.user(user, "~BM~FGPosting to %s: ~SB%s" % (service, message))
if service == 'twitter':
posted = social_service.post_to_twitter(self)
elif service == 'facebook':
posted = social_service.post_to_facebook(self)
elif service == 'appdotnet':
posted = social_service.post_to_appdotnet(self)
if posted:
self.posted_to_services.append(service)
self.save()
def notify_user_ids(self, include_parent=True):
user_ids = set()
for reply in self.replies:
if reply.user_id not in self.mute_email_users:
user_ids.add(reply.user_id)
if include_parent and self.user_id not in self.mute_email_users:
user_ids.add(self.user_id)
return list(user_ids)
def reply_for_id(self, reply_id):
for reply in self.replies:
if reply.reply_id == reply_id:
return reply
def send_emails_for_new_reply(self, reply_id):
if reply_id in self.emailed_replies:
logging.debug(" ***> Already sent reply email: %s on %s" % (reply_id, self))
return
reply = self.reply_for_id(reply_id)
if not reply:
logging.debug(" ***> Reply doesn't exist: %s on %s" % (reply_id, self))
return
notify_user_ids = self.notify_user_ids()
if reply.user_id in notify_user_ids:
notify_user_ids.remove(reply.user_id)
reply_user = User.objects.get(pk=reply.user_id)
reply_user_profile = MSocialProfile.get_user(reply.user_id)
sent_emails = 0
story_feed = Feed.get_by_id(self.story_feed_id)
comment = self.comments_with_author()
profile_user_ids = set([comment['user_id']])
reply_user_ids = list(r['user_id'] for r in comment['replies'])
profile_user_ids = profile_user_ids.union(reply_user_ids)
if self.source_user_id:
profile_user_ids.add(self.source_user_id)
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
comment = MSharedStory.attach_users_to_comment(comment, profiles)
for user_id in notify_user_ids:
user = User.objects.get(pk=user_id)
if not user.email or not user.profile.send_emails:
if not user.email:
logging.user(user, "~FMNo email to send to, skipping.")
elif not user.profile.send_emails:
logging.user(user, "~FMDisabled emails, skipping.")
continue
mute_url = "http://%s%s" % (
Site.objects.get_current().domain,
reverse('social-mute-story', kwargs={
'secret_token': user.profile.secret_token,
'shared_story_id': self.id,
})
)
data = {
'reply_user_profile': reply_user_profile,
'comment': comment,
'shared_story': self,
'story_feed': story_feed,
'mute_url': mute_url,
}
story_title = self.decoded_story_title.replace('\n', ' ')
text = render_to_string('mail/email_reply.txt', data)
html = pynliner.fromString(render_to_string('mail/email_reply.xhtml', data))
subject = "%s replied to you on \"%s\" on NewsBlur" % (reply_user.username, story_title)
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user.username, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
sent_emails += 1
logging.user(reply_user, "~BB~FM~SBSending %s/%s email%s for new reply: %s" % (
sent_emails, len(notify_user_ids),
'' if len(notify_user_ids) == 1 else 's',
self.decoded_story_title[:30]))
self.emailed_replies.append(reply.reply_id)
self.save()
def send_email_for_reshare(self):
if self.emailed_reshare:
logging.debug(" ***> Already sent reply email: %s" % self)
return
reshare_user = User.objects.get(pk=self.user_id)
reshare_user_profile = MSocialProfile.get_user(self.user_id)
original_user = User.objects.get(pk=self.source_user_id)
original_shared_story = MSharedStory.objects.get(user_id=self.source_user_id,
story_guid=self.story_guid)
if not original_user.email or not original_user.profile.send_emails:
if not original_user.email:
logging.user(original_user, "~FMNo email to send to, skipping.")
elif not original_user.profile.send_emails:
logging.user(original_user, "~FMDisabled emails, skipping.")
return
story_feed = Feed.get_by_id(self.story_feed_id)
comment = self.comments_with_author()
profile_user_ids = set([comment['user_id']])
reply_user_ids = [reply['user_id'] for reply in comment['replies']]
profile_user_ids = profile_user_ids.union(reply_user_ids)
if self.source_user_id:
profile_user_ids.add(self.source_user_id)
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
comment = MSharedStory.attach_users_to_comment(comment, profiles)
mute_url = "http://%s%s" % (
Site.objects.get_current().domain,
reverse('social-mute-story', kwargs={
'secret_token': original_user.profile.secret_token,
'shared_story_id': original_shared_story.id,
})
)
data = {
'comment': comment,
'shared_story': self,
'reshare_user_profile': reshare_user_profile,
'original_shared_story': original_shared_story,
'story_feed': story_feed,
'mute_url': mute_url,
}
story_title = self.decoded_story_title.replace('\n', ' ')
text = render_to_string('mail/email_reshare.txt', data)
html = pynliner.fromString(render_to_string('mail/email_reshare.xhtml', data))
subject = "%s re-shared \"%s\" from you on NewsBlur" % (reshare_user.username, story_title)
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (original_user.username, original_user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
self.emailed_reshare = True
self.save()
logging.user(reshare_user, "~BB~FM~SBSending %s email for story re-share: %s" % (
original_user.username,
self.decoded_story_title[:30]))
def calculate_image_sizes(self, force=False):
if not self.story_content_z:
return
if not force and self.image_count:
return self.image_sizes
headers = {
'User-Agent': 'NewsBlur Image Fetcher - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' % (
settings.NEWSBLUR_URL
),
}
soup = BeautifulSoup(zlib.decompress(self.story_content_z))
image_sources = [img.get('src') for img in soup.findAll('img')]
image_sizes = []
for image_source in image_sources[:10]:
if any(ignore in image_source for ignore in IGNORE_IMAGE_SOURCES):
continue
req = requests.get(image_source, headers=headers, stream=True)
datastream = StringIO(req.content[:30])
_, width, height = image_size(datastream)
if width <= 16 or height <= 16:
continue
image_sizes.append({'src': image_source, 'size': (width, height)})
if image_sizes:
image_sizes = sorted(image_sizes, key=lambda i: i['size'][0] * i['size'][1],
reverse=True)
self.image_sizes = image_sizes
self.image_count = len(image_sizes)
self.save()
logging.debug(" ---> ~SN~FGFetched image sizes on shared story: ~SB%s images" % self.image_count)
return image_sizes
def fetch_original_text(self, force=False, request=None):
original_text_z = self.original_text_z
feed = Feed.get_by_id(self.story_feed_id)
if not original_text_z or force:
ti = TextImporter(self, feed, request=request)
original_text = ti.fetch()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story text, ~SBfound.")
original_text = zlib.decompress(original_text_z)
return original_text
class MSocialServices(mongo.Document):
user_id = mongo.IntField()
autofollow = mongo.BooleanField(default=True)
twitter_uid = mongo.StringField()
twitter_access_key = mongo.StringField()
twitter_access_secret = mongo.StringField()
twitter_friend_ids = mongo.ListField(mongo.StringField())
twitter_picture_url = mongo.StringField()
twitter_username = mongo.StringField()
twitter_refresh_date = mongo.DateTimeField()
facebook_uid = mongo.StringField()
facebook_access_token = mongo.StringField()
facebook_friend_ids = mongo.ListField(mongo.StringField())
facebook_picture_url = mongo.StringField()
facebook_refresh_date = mongo.DateTimeField()
appdotnet_uid = mongo.StringField()
appdotnet_access_token= mongo.StringField()
appdotnet_friend_ids = mongo.ListField(mongo.StringField())
appdotnet_picture_url = mongo.StringField()
appdotnet_refresh_date= mongo.DateTimeField()
upload_picture_url = mongo.StringField()
syncing_twitter = mongo.BooleanField(default=False)
syncing_facebook = mongo.BooleanField(default=False)
syncing_appdotnet = mongo.BooleanField(default=False)
meta = {
'collection': 'social_services',
'indexes': ['user_id', 'twitter_friend_ids', 'facebook_friend_ids', 'twitter_uid', 'facebook_uid', 'appdotnet_uid'],
'allow_inheritance': False,
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "%s (Twitter: %s, FB: %s, ADN: %s)" % (user.username, self.twitter_uid, self.facebook_uid, self.appdotnet_uid)
def canonical(self):
user = User.objects.get(pk=self.user_id)
return {
'twitter': {
'twitter_username': self.twitter_username,
'twitter_picture_url': self.twitter_picture_url,
'twitter_uid': self.twitter_uid,
'syncing': self.syncing_twitter,
},
'facebook': {
'facebook_uid': self.facebook_uid,
'facebook_picture_url': self.facebook_picture_url,
'syncing': self.syncing_facebook,
},
'appdotnet': {
'appdotnet_uid': self.appdotnet_uid,
'appdotnet_picture_url': self.appdotnet_picture_url,
'syncing': self.syncing_appdotnet,
},
'gravatar': {
'gravatar_picture_url': "https://www.gravatar.com/avatar/" + \
hashlib.md5(user.email.lower()).hexdigest()
},
'upload': {
'upload_picture_url': self.upload_picture_url
}
}
@classmethod
def get_user(cls, user_id):
try:
profile, created = cls.objects.get_or_create(user_id=user_id)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(user_id=user_id)
logging.debug(" ---> ~FRDeleting dupe social services. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
profile = dupes[0]
created = False
if created:
profile.save()
return profile
@classmethod
def profile(cls, user_id):
profile = cls.get_user(user_id=user_id)
return profile.canonical()
def save_uploaded_photo(self, photo):
photo_body = photo.read()
filename = photo.name
s3 = s3_utils.S3Store()
image_name = s3.save_profile_picture(self.user_id, filename, photo_body)
if image_name:
self.upload_picture_url = "https://s3.amazonaws.com/%s/avatars/%s/thumbnail_%s" % (
settings.S3_AVATARS_BUCKET_NAME,
self.user_id,
image_name,
)
self.save()
return image_name and self.upload_picture_url
def twitter_api(self):
twitter_consumer_key = settings.TWITTER_CONSUMER_KEY
twitter_consumer_secret = settings.TWITTER_CONSUMER_SECRET
auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(self.twitter_access_key, self.twitter_access_secret)
api = tweepy.API(auth)
return api
def facebook_api(self):
graph = facebook.GraphAPI(self.facebook_access_token)
return graph
def appdotnet_api(self):
adn_api = appdotnet.Appdotnet(access_token=self.appdotnet_access_token)
return adn_api
def sync_twitter_friends(self):
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMTwitter import starting...")
api = self.twitter_api()
if not api:
logging.user(user, "~BG~FMTwitter import ~SBfailed~SN: no api access.")
self.syncing_twitter = False
self.save()
return
twitter_user = api.me()
self.twitter_picture_url = twitter_user.profile_image_url_https
self.twitter_username = twitter_user.screen_name
self.twitter_refreshed_date = datetime.datetime.utcnow()
self.syncing_twitter = False
self.save()
profile = MSocialProfile.get_user(self.user_id)
profile.location = profile.location or twitter_user.location
profile.bio = profile.bio or twitter_user.description
profile.website = profile.website or twitter_user.url
profile.save()
profile.count_follows()
if not profile.photo_url or not profile.photo_service:
self.set_photo('twitter')
try:
friend_ids = list(unicode(friend.id) for friend in tweepy.Cursor(api.friends).items())
except tweepy.TweepError, e:
logging.user(user, "~BG~FMTwitter import ~SBfailed~SN: %s" % e)
return
if not friend_ids:
logging.user(user, "~BG~FMTwitter import ~SBfailed~SN: no friend_ids.")
self.twitter_friend_ids = friend_ids
self.save()
following = self.follow_twitter_friends()
if not following:
logging.user(user, "~BG~FMTwitter import finished.")
def follow_twitter_friends(self):
social_profile = MSocialProfile.get_user(self.user_id)
following = []
followers = 0
if not self.autofollow:
return following
# Follow any friends already on NewsBlur
user_social_services = MSocialServices.objects.filter(twitter_uid__in=self.twitter_friend_ids)
for user_social_service in user_social_services:
followee_user_id = user_social_service.user_id
socialsub = social_profile.follow_user(followee_user_id)
if socialsub:
following.append(followee_user_id)
# Friends already on NewsBlur should follow back
# following_users = MSocialServices.objects.filter(twitter_friend_ids__contains=self.twitter_uid)
# for following_user in following_users:
# if following_user.autofollow:
# following_user_profile = MSocialProfile.get_user(following_user.user_id)
# following_user_profile.follow_user(self.user_id, check_unfollowed=True)
# followers += 1
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMTwitter import: %s users, now following ~SB%s~SN with ~SB%s~SN follower-backs" % (len(self.twitter_friend_ids), len(following), followers))
return following
def sync_facebook_friends(self):
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMFacebook import starting...")
graph = self.facebook_api()
if not graph:
logging.user(user, "~BG~FMFacebook import ~SBfailed~SN: no api access.")
self.syncing_facebook = False
self.save()
return
friends = graph.get_connections("me", "friends")
if not friends:
logging.user(user, "~BG~FMFacebook import ~SBfailed~SN: no friend_ids.")
self.syncing_facebook = False
self.save()
return
facebook_friend_ids = [unicode(friend["id"]) for friend in friends["data"]]
self.facebook_friend_ids = facebook_friend_ids
self.facebook_refresh_date = datetime.datetime.utcnow()
self.facebook_picture_url = "//graph.facebook.com/%s/picture" % self.facebook_uid
self.syncing_facebook = False
self.save()
facebook_user = graph.request('me', args={'fields':'website,bio,location'})
profile = MSocialProfile.get_user(self.user_id)
profile.location = profile.location or (facebook_user.get('location') and facebook_user['location']['name'])
profile.bio = profile.bio or facebook_user.get('bio')
if not profile.website and facebook_user.get('website'):
profile.website = facebook_user.get('website').split()[0]
profile.save()
profile.count_follows()
if not profile.photo_url or not profile.photo_service:
self.set_photo('facebook')
self.follow_facebook_friends()
def follow_facebook_friends(self):
social_profile = MSocialProfile.get_user(self.user_id)
following = []
followers = 0
if not self.autofollow:
return following
# Follow any friends already on NewsBlur
user_social_services = MSocialServices.objects.filter(facebook_uid__in=self.facebook_friend_ids)
for user_social_service in user_social_services:
followee_user_id = user_social_service.user_id
socialsub = social_profile.follow_user(followee_user_id)
if socialsub:
following.append(followee_user_id)
# Friends already on NewsBlur should follow back
# following_users = MSocialServices.objects.filter(facebook_friend_ids__contains=self.facebook_uid)
# for following_user in following_users:
# if following_user.autofollow:
# following_user_profile = MSocialProfile.get_user(following_user.user_id)
# following_user_profile.follow_user(self.user_id, check_unfollowed=True)
# followers += 1
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMFacebook import: %s users, now following ~SB%s~SN with ~SB%s~SN follower-backs" % (len(self.facebook_friend_ids), len(following), followers))
return following
def sync_appdotnet_friends(self):
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMApp.net import starting...")
api = self.appdotnet_api()
if not api:
logging.user(user, "~BG~FMApp.net import ~SBfailed~SN: no api access.")
self.syncing_appdotnet = False
self.save()
return
friend_ids = []
has_more_friends = True
before_id = None
since_id = None
while has_more_friends:
friends_resp = api.getUserFollowingIds(self.appdotnet_uid,
before_id=before_id,
since_id=since_id)
friends = json.decode(friends_resp)
before_id = friends['meta'].get('min_id')
since_id = friends['meta'].get('max_id')
has_more_friends = friends['meta'].get('more')
friend_ids.extend([fid for fid in friends['data']])
if not friend_ids:
logging.user(user, "~BG~FMApp.net import ~SBfailed~SN: no friend_ids.")
self.syncing_appdotnet = False
self.save()
return
adn_user = json.decode(api.getUser(self.appdotnet_uid))['data']
self.appdotnet_picture_url = adn_user['avatar_image']['url']
self.appdotnet_username = adn_user['username']
self.appdotnet_friend_ids = friend_ids
self.appdotnet_refreshed_date = datetime.datetime.utcnow()
self.syncing_appdotnet = False
self.save()
profile = MSocialProfile.get_user(self.user_id)
profile.bio = profile.bio or adn_user['description']['text']
profile.save()
profile.count_follows()
if not profile.photo_url or not profile.photo_service:
self.set_photo('appdotnet')
self.follow_appdotnet_friends()
def follow_appdotnet_friends(self):
social_profile = MSocialProfile.get_user(self.user_id)
following = []
followers = 0
if not self.autofollow:
return following
# Follow any friends already on NewsBlur
user_social_services = MSocialServices.objects.filter(appdotnet_uid__in=self.appdotnet_friend_ids)
for user_social_service in user_social_services:
followee_user_id = user_social_service.user_id
socialsub = social_profile.follow_user(followee_user_id)
if socialsub:
following.append(followee_user_id)
# Friends already on NewsBlur should follow back
# following_users = MSocialServices.objects.filter(appdotnet_friend_ids__contains=self.appdotnet_uid)
# for following_user in following_users:
# if following_user.autofollow:
# following_user_profile = MSocialProfile.get_user(following_user.user_id)
# following_user_profile.follow_user(self.user_id, check_unfollowed=True)
# followers += 1
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMApp.net import: %s users, now following ~SB%s~SN with ~SB%s~SN follower-backs" % (len(self.appdotnet_friend_ids), len(following), followers))
return following
def disconnect_twitter(self):
self.twitter_uid = None
self.save()
def disconnect_facebook(self):
self.facebook_uid = None
self.save()
def disconnect_appdotnet(self):
self.appdotnet_uid = None
self.save()
def set_photo(self, service):
profile = MSocialProfile.get_user(self.user_id)
if service == 'nothing':
service = None
profile.photo_service = service
if not service:
profile.photo_url = None
elif service == 'twitter':
profile.photo_url = self.twitter_picture_url
elif service == 'facebook':
profile.photo_url = self.facebook_picture_url
elif service == 'upload':
profile.photo_url = self.upload_picture_url
elif service == 'gravatar':
user = User.objects.get(pk=self.user_id)
profile.photo_url = "https://www.gravatar.com/avatar/" + \
hashlib.md5(user.email).hexdigest()
profile.save()
return profile
@classmethod
def sync_all_twitter_photos(cls, days=14):
week_ago = datetime.datetime.now() - datetime.timedelta(days=days)
shares = MSharedStory.objects.filter(shared_date__gte=week_ago)
sharers = sorted(set([s.user_id for s in shares]))
print " ---> %s sharing user_ids" % len(sorted(sharers))
for user_id in sharers:
profile = MSocialProfile.objects.get(user_id=user_id)
if not profile.photo_service == 'twitter': continue
ss = MSocialServices.objects.get(user_id=user_id)
try:
ss.sync_twitter_photo()
print " ---> Syncing %s" % user_id
except Exception, e:
print " ***> Exception on %s: %s" % (user_id, e)
def sync_twitter_photo(self):
profile = MSocialProfile.get_user(self.user_id)
if profile.photo_service != "twitter":
return
user = User.objects.get(pk=self.user_id)
logging.user(user, "~FCSyncing Twitter profile photo...")
try:
api = self.twitter_api()
me = api.me()
except tweepy.TweepError, e:
logging.user(user, "~FRException (%s): ~FCsetting to blank profile photo" % e)
self.twitter_picture_url = None
self.set_photo("nothing")
return
self.twitter_picture_url = me.profile_image_url_https
self.save()
self.set_photo('twitter')
def post_to_twitter(self, shared_story):
message = shared_story.generate_post_to_service_message(truncate=140)
try:
api = self.twitter_api()
api.update_status(status=message)
except tweepy.TweepError, e:
print e
return
return True
def post_to_facebook(self, shared_story):
message = shared_story.generate_post_to_service_message(include_url=False)
shared_story.calculate_image_sizes()
content = zlib.decompress(shared_story.story_content_z)[:1024]
try:
api = self.facebook_api()
# api.put_wall_post(message=message)
api.put_object('me', '%s:share' % settings.FACEBOOK_NAMESPACE,
link=shared_story.blurblog_permalink(),
type="link",
name=shared_story.decoded_story_title,
description=content,
website=shared_story.blurblog_permalink(),
message=message,
)
except facebook.GraphAPIError, e:
print e
return
return True
def post_to_appdotnet(self, shared_story):
message = shared_story.generate_post_to_service_message(truncate=256)
try:
api = self.appdotnet_api()
api.createPost(text=message, links=[{
'text': shared_story.decoded_story_title,
'url': shared_story.blurblog_permalink()
}])
except Exception, e:
print e
return
return True
class MInteraction(mongo.Document):
user_id = mongo.IntField()
date = mongo.DateTimeField(default=datetime.datetime.now)
category = mongo.StringField()
title = mongo.StringField()
content = mongo.StringField()
with_user_id = mongo.IntField()
feed_id = mongo.DynamicField()
story_feed_id= mongo.IntField()
content_id = mongo.StringField()
meta = {
'collection': 'interactions',
'indexes': [('user_id', '-date'), 'category', 'with_user_id'],
'allow_inheritance': False,
'index_drop_dups': True,
'ordering': ['-date'],
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
with_user = self.with_user_id and User.objects.get(pk=self.with_user_id)
return "<%s> %s on %s: %s - %s" % (user.username, with_user and with_user.username, self.date,
self.category, self.content and self.content[:20])
def canonical(self):
return {
'date': self.date,
'category': self.category,
'title': self.title,
'content': self.content,
'with_user_id': self.with_user_id,
'feed_id': self.feed_id,
'story_feed_id': self.story_feed_id,
'content_id': self.content_id,
}
@classmethod
def publish_update_to_subscribers(self, user_id):
user = User.objects.get(pk=user_id)
try:
r = redis.Redis(connection_pool=settings.REDIS_POOL)
listeners_count = r.publish(user.username, 'interaction:new')
if listeners_count:
logging.debug(" ---> ~FMPublished to %s subscribers" % (listeners_count))
except redis.ConnectionError:
logging.debug(" ***> ~BMRedis is unavailable for real-time.")
@classmethod
def user(cls, user_id, page=1, limit=None, categories=None):
user_profile = Profile.objects.get(user=user_id)
dashboard_date = user_profile.dashboard_date or user_profile.last_seen_on
page = max(1, page)
limit = int(limit) if limit else 4
offset = (page-1) * limit
interactions_db = cls.objects.filter(user_id=user_id)
if categories:
interactions_db = interactions_db.filter(category__in=categories)
interactions_db = interactions_db[offset:offset+limit+1]
has_next_page = len(interactions_db) > limit
interactions_db = interactions_db[offset:offset+limit]
with_user_ids = [i.with_user_id for i in interactions_db if i.with_user_id]
social_profiles = dict((p.user_id, p) for p in MSocialProfile.objects.filter(user_id__in=with_user_ids))
interactions = []
for interaction_db in interactions_db:
interaction = interaction_db.canonical()
social_profile = social_profiles.get(interaction_db.with_user_id)
if social_profile:
interaction['photo_url'] = social_profile.profile_photo_url
interaction['with_user'] = social_profiles.get(interaction_db.with_user_id)
interaction['time_since'] = relative_timesince(interaction_db.date)
interaction['date'] = interaction_db.date
interaction['is_new'] = interaction_db.date > dashboard_date
interactions.append(interaction)
return interactions, has_next_page
@classmethod
def user_unread_count(cls, user_id):
user_profile = Profile.objects.get(user=user_id)
dashboard_date = user_profile.dashboard_date or user_profile.last_seen_on
interactions_count = cls.objects.filter(user_id=user_id, date__gte=dashboard_date).count()
return interactions_count
@classmethod
def new_follow(cls, follower_user_id, followee_user_id):
params = {
'user_id': followee_user_id,
'with_user_id': follower_user_id,
'category': 'follow',
}
try:
cls.objects.get_or_create(**params)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**params).order_by('-date')
logging.debug(" ---> ~FRDeleting dupe follow interactions. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
cls.publish_update_to_subscribers(followee_user_id)
@classmethod
def new_comment_reply(cls, user_id, reply_user_id, reply_content, story_id, story_feed_id, story_title=None, original_message=None):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % user_id,
'story_feed_id': story_feed_id,
'title': story_title,
'content_id': story_id,
}
if original_message:
params['content'] = original_message
original = cls.objects.filter(**params).limit(1)
if original:
original = original[0]
original.content = linkify(strip_tags(reply_content))
original.save()
else:
original_message = None
if not original_message:
cls.objects.create(**params)
cls.publish_update_to_subscribers(user_id)
@classmethod
def remove_comment_reply(cls, user_id, reply_user_id, reply_content, story_id, story_feed_id):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
cls.publish_update_to_subscribers(user_id)
@classmethod
def new_comment_like(cls, liking_user_id, comment_user_id, story_id, story_title, comments):
cls.objects.get_or_create(user_id=comment_user_id,
with_user_id=liking_user_id,
category="comment_like",
feed_id="social:%s" % comment_user_id,
content_id=story_id,
defaults={
"title": story_title,
"content": comments,
})
cls.publish_update_to_subscribers(comment_user_id)
@classmethod
def new_reply_reply(cls, user_id, comment_user_id, reply_user_id, reply_content, story_id, story_feed_id, story_title=None, original_message=None):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'reply_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'title': story_title,
'content_id': story_id,
}
if original_message:
params['content'] = original_message
original = cls.objects.filter(**params).limit(1)
if original:
original = original[0]
original.content = reply_content
original.save()
else:
original_message = None
if not original_message:
cls.objects.create(**params)
cls.publish_update_to_subscribers(user_id)
@classmethod
def remove_reply_reply(cls, user_id, comment_user_id, reply_user_id, reply_content, story_id, story_feed_id):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'reply_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
cls.publish_update_to_subscribers(user_id)
@classmethod
def new_reshared_story(cls, user_id, reshare_user_id, comments, story_title, story_feed_id, story_id, original_comments=None):
params = {
'user_id': user_id,
'with_user_id': reshare_user_id,
'category': 'story_reshare',
'content': comments,
'title': story_title,
'feed_id': "social:%s" % reshare_user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
if original_comments:
params['content'] = original_comments
original = cls.objects.filter(**params).limit(1)
if original:
interaction = original[0]
interaction.content = comments
interaction.save()
else:
original_comments = None
if not original_comments:
cls.objects.create(**params)
cls.publish_update_to_subscribers(user_id)
class MActivity(mongo.Document):
user_id = mongo.IntField()
date = mongo.DateTimeField(default=datetime.datetime.now)
category = mongo.StringField()
title = mongo.StringField()
content = mongo.StringField()
with_user_id = mongo.IntField()
feed_id = mongo.DynamicField()
story_feed_id= mongo.IntField()
content_id = mongo.StringField()
meta = {
'collection': 'activities',
'indexes': [('user_id', '-date'), 'category', 'with_user_id'],
'allow_inheritance': False,
'index_drop_dups': True,
'ordering': ['-date'],
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "<%s> %s - %s" % (user.username, self.category, self.content and self.content[:20])
def canonical(self):
return {
'date': self.date,
'category': self.category,
'title': self.title,
'content': self.content,
'user_id': self.user_id,
'with_user_id': self.with_user_id or self.user_id,
'feed_id': self.feed_id or self.story_feed_id,
'story_feed_id': self.story_feed_id or self.feed_id,
'content_id': self.content_id,
}
@classmethod
def user(cls, user_id, page=1, limit=4, public=False, categories=None):
user_profile = Profile.objects.get(user=user_id)
dashboard_date = user_profile.dashboard_date or user_profile.last_seen_on
page = max(1, page)
limit = int(limit)
offset = (page-1) * limit
activities_db = cls.objects.filter(user_id=user_id)
if categories:
activities_db = activities_db.filter(category__in=categories)
if public:
activities_db = activities_db.filter(category__nin=['star', 'feedsub'])
activities_db = activities_db[offset:offset+limit+1]
has_next_page = len(activities_db) > limit
activities_db = activities_db[offset:offset+limit]
with_user_ids = [a.with_user_id for a in activities_db if a.with_user_id]
social_profiles = dict((p.user_id, p) for p in MSocialProfile.objects.filter(user_id__in=with_user_ids))
activities = []
for activity_db in activities_db:
activity = activity_db.canonical()
activity['date'] = activity_db.date
activity['time_since'] = relative_timesince(activity_db.date)
social_profile = social_profiles.get(activity_db.with_user_id)
if social_profile:
activity['photo_url'] = social_profile.profile_photo_url
activity['is_new'] = activity_db.date > dashboard_date
activity['with_user'] = social_profiles.get(activity_db.with_user_id or activity_db.user_id)
activities.append(activity)
return activities, has_next_page
@classmethod
def new_starred_story(cls, user_id, story_title, story_feed_id, story_id):
cls.objects.get_or_create(user_id=user_id,
category='star',
story_feed_id=story_feed_id,
content_id=story_id,
defaults=dict(content=story_title))
@classmethod
def remove_starred_story(cls, user_id, story_feed_id, story_id):
params = {
'user_id': user_id,
'category': 'star',
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
@classmethod
def new_feed_subscription(cls, user_id, feed_id, feed_title):
params = {
"user_id": user_id,
"category": 'feedsub',
"feed_id": feed_id,
}
try:
cls.objects.get_or_create(defaults=dict(content=feed_title), **params)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**params).order_by('-date')
logging.debug(" ---> ~FRDeleting dupe feed subscription activities. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
@classmethod
def new_follow(cls, follower_user_id, followee_user_id):
params = {
'user_id': follower_user_id,
'with_user_id': followee_user_id,
'category': 'follow',
}
try:
cls.objects.get_or_create(**params)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**params).order_by('-date')
logging.debug(" ---> ~FRDeleting dupe follow activities. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
@classmethod
def new_comment_reply(cls, user_id, comment_user_id, reply_content, story_id, story_feed_id, story_title=None, original_message=None):
params = {
'user_id': user_id,
'with_user_id': comment_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'title': story_title,
'content_id': story_id,
}
if original_message:
params['content'] = original_message
original = cls.objects.filter(**params).limit(1)
if original:
original = original[0]
original.content = linkify(strip_tags(reply_content))
original.save()
else:
original_message = None
if not original_message:
cls.objects.create(**params)
@classmethod
def remove_comment_reply(cls, user_id, comment_user_id, reply_content, story_id, story_feed_id):
params = {
'user_id': user_id,
'with_user_id': comment_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
@classmethod
def new_comment_like(cls, liking_user_id, comment_user_id, story_id, story_title, comments):
cls.objects.get_or_create(user_id=liking_user_id,
with_user_id=comment_user_id,
category="comment_like",
feed_id="social:%s" % comment_user_id,
content_id=story_id,
defaults={
"title": story_title,
"content": comments,
})
@classmethod
def new_shared_story(cls, user_id, source_user_id, story_title, comments, story_feed_id, story_id, share_date=None):
data = {
"user_id": user_id,
"category": 'sharedstory',
"feed_id": "social:%s" % user_id,
"story_feed_id": story_feed_id,
"content_id": story_id,
}
try:
a, _ = cls.objects.get_or_create(defaults={
'with_user_id': source_user_id,
'title': story_title,
'content': comments,
}, **data)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**data)
logging.debug(" ---> ~FRDeleting dupe shared story activities. %s found." % dupes.count())
a = dupes[0]
for dupe in dupes[1:]:
dupe.delete()
if a.content != comments:
a.content = comments
a.save()
if source_user_id and a.with_user_id != source_user_id:
a.source_user_id = source_user_id
a.save()
if share_date:
a.date = share_date
a.save()
@classmethod
def remove_shared_story(cls, user_id, story_feed_id, story_id):
params = dict(user_id=user_id,
category='sharedstory',
feed_id="social:%s" % user_id,
story_feed_id=story_feed_id,
content_id=story_id)
try:
a = cls.objects.get(**params)
except cls.DoesNotExist:
return
except cls.MultipleObjectsReturned:
a = cls.objects.filter(**params)
a.delete()
@classmethod
def new_signup(cls, user_id):
cls.objects.get_or_create(user_id=user_id,
with_user_id=user_id,
category="signup")
class MFollowRequest(mongo.Document):
follower_user_id = mongo.IntField(unique_with='followee_user_id')
followee_user_id = mongo.IntField()
date = mongo.DateTimeField(default=datetime.datetime.now)
meta = {
'collection': 'follow_request',
'indexes': ['follower_user_id', 'followee_user_id'],
'ordering': ['-date'],
'allow_inheritance': False,
'index_drop_dups': True,
}
@classmethod
def add(cls, follower_user_id, followee_user_id):
cls.objects.get_or_create(follower_user_id=follower_user_id,
followee_user_id=followee_user_id)
@classmethod
def remove(cls, follower_user_id, followee_user_id):
cls.objects.filter(follower_user_id=follower_user_id,
followee_user_id=followee_user_id).delete()
| mit | -3,288,628,194,197,878,000 | 42.804901 | 195 | 0.551655 | false |
tsdotca/dmclient | core/hacks.py | 1 | 2964 | # core/hacks.py
# Copyright (C) 2018 Alex Mair. All rights reserved.
# This file is part of dmclient.
#
# dmclient is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# dmclient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dmclient. If not, see <http://www.gnu.org/licenses/>.
#
"""This module provides dirty hacks to make PyQt more pleasant to work with.
.. todo::
These should only be around when ``__debug__`` is turned on
"""
from PyQt5.QtCore import QDate
from PyQt5.QtCore import QDateTime
from PyQt5.QtCore import QItemSelection
from PyQt5.QtCore import QModelIndex
# If set to true, things like QModelIndexes will show their parent
# methods such as __repr__
from PyQt5.QtCore import QPointF
show_recursive_relationships = __debug__ # FIXME Should enable via cmdopt
def _qdate__repr__(qdate):
return "<QDate({}-{}-{})>".format(qdate.year(), qdate.month(), qdate.day())
def _qdatetime__repr__(qdatetime):
date, time = qdatetime.date(), qdatetime.time()
return "<QDateTime({}-{}-{} {}:{}:{})>".format(date.year(),
date.month(),
date.day(),
time.hour(),
time.minute(),
time.second())
def _qitemselection__repr__(qitemselection):
indexes = qitemselection.indexes()
return "<QItemSelection({},{})>".format(len(indexes), indexes)
def _qmodelindex__repr__(index):
if index.isValid():
parent = index.parent()
if show_recursive_relationships:
parent_str = "{}".format(parent)
else:
parent_str = "{}".format(type(parent))
return "<QModelIndex({}, {}, parent={}, model={})>".format(index.row(),
index.column(),
parent_str,
index.model())
else:
return "<QModelIndex(<invalid>, model={})>".format(index.model())
def _qpointf__repr__(qpointf):
return "QPointF({}, {})".format(qpointf.x(), qpointf.y())
def install_qt_reprs():
QDate.__repr__ = _qdate__repr__
QDateTime.__repr__ = _qdatetime__repr__
QItemSelection.__repr__ = _qitemselection__repr__
QModelIndex.__repr__ = _qmodelindex__repr__
QPointF.__repr__ = _qpointf__repr__
def install_hacks():
install_qt_reprs()
| gpl-2.0 | 6,981,152,763,329,449,000 | 34.285714 | 82 | 0.568826 | false |
gkc1000/pyscf | pyscf/mcscf/test/test_newton_casscf.py | 1 | 2281 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import gto, scf, lib, fci
from pyscf.mcscf import newton_casscf
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 5.,-1. , 1. )],
['H', ( 0.,-5. ,-2. )],
['H', ( 4.,-0.5 ,-3. )],
['H', ( 0.,-4.5 ,-1. )],
['H', ( 3.,-0.5 ,-0. )],
['H', ( 0.,-3. ,-1. )],
['H', ( 2.,-2.5 , 0. )],
['H', ( 1., 1. , 3. )],
]
mol.basis = 'sto-3g'
mol.build()
mf = scf.RHF(mol)
mf.max_cycle = 3
mf.kernel()
mc = newton_casscf.CASSCF(mf, 4, 4)
mc.fcisolver = fci.direct_spin1.FCI(mol)
mc.kernel()
def tearDownModule():
global mol, mf, mc
del mol, mf, mc
class KnownValues(unittest.TestCase):
def test_gen_g_hop(self):
numpy.random.seed(1)
mo = numpy.random.random(mf.mo_coeff.shape)
ci0 = numpy.random.random((6,6))
ci0/= numpy.linalg.norm(ci0)
gall, gop, hop, hdiag = newton_casscf.gen_g_hop(mc, mo, ci0, mc.ao2mo(mo))
self.assertAlmostEqual(lib.finger(gall), 21.288022525148595, 8)
self.assertAlmostEqual(lib.finger(hdiag), -4.6864640132374618, 8)
x = numpy.random.random(gall.size)
u, ci1 = newton_casscf.extract_rotation(mc, x, 1, ci0)
self.assertAlmostEqual(lib.finger(gop(u, ci1)), -412.9441873541524, 8)
self.assertAlmostEqual(lib.finger(hop(x)), 73.358310983341198, 8)
def test_get_grad(self):
self.assertAlmostEqual(mc.e_tot, -3.6268060853430573, 8)
self.assertAlmostEqual(abs(mc.get_grad()).max(), 0, 5)
if __name__ == "__main__":
print("Full Tests for mcscf.addons")
unittest.main()
| apache-2.0 | -3,234,945,309,184,798,000 | 31.585714 | 82 | 0.626041 | false |
F5Networks/f5-common-python | f5/bigip/tm/util/test/unit/test_get_dossier.py | 1 | 1572 | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import pytest
from f5.bigip import ManagementRoot
from f5.bigip.tm.util.get_dossier import Get_Dossier
@pytest.fixture
def FakeGetDossier():
fake_sys = mock.MagicMock()
fake_get_dossier = Get_Dossier(fake_sys)
return fake_get_dossier
@pytest.fixture
def FakeiControl(fakeicontrolsession):
mr = ManagementRoot('host', 'fake_admin', 'fake_admin')
mock_session = mock.MagicMock()
mock_session.post.return_value.json.return_value = {}
mr._meta_data['icr_session'] = mock_session
return mr.tm.util.get_dossier
class TestGetDossierCommand(object):
def test_command_get_dossier(self, FakeiControl):
FakeiControl.exec_cmd('run', utilCmdArgs='-b registration-key')
session = FakeiControl._meta_data['bigip']._meta_data['icr_session']
assert session.post.call_args == mock.call(
'https://host:443/mgmt/tm/util/get-dossier/',
json={'utilCmdArgs': '-b registration-key', 'command': 'run'}
)
| apache-2.0 | 1,810,308,920,897,840,400 | 33.173913 | 76 | 0.713104 | false |
cirosantilli/python-utils | sandbox/elearning/generate_tocs.py | 1 | 2277 | import os.path
from xml.dom.minidom import parse
home_dir = os.path.dirname(os.path.dirname(__file__)) #elearning/
html_path = os.path.join(home_dir,'toc.html') #elearning/toc.html
tocs_root_rel_path = 'sidebars'
tocs_path = os.path.join(home_dir,tocs_root_rel_path) #elearning/tocs partial tocs home
class_name = 'nav_tree'
#takes full tree html and generates partial tocs with breadcrumbs in #elearning/tocs dir
def generate_partial_tocs(html_path,tocs_path):
root = parse(html_path)
remove_whilespace_nodes(root,True) #simpler without beautification blank
lis = root.getElementsByTagName('li')
for li in lis:
anc = li.childNodes[0]
if(anc.nodeType == anc.ELEMENT_NODE and anc.localName == "a"):
id = anc.attributes["href"].value[1:]
print '<ul class="'+class_name+'">' + li_ascendants(root,li) + li.toxml() + '</ul>'
#lists ascendants list link up to root.
def li_ascendants(root,li):
result = ''
print 'NODE:\n\n' + li.toxml() + '\n\n'
li.childNodes[0]
ul = li.parentNode
while(not ul is root):
li = ul.parentNode
result += li.childNodes[0].toxml() # should add the hole
ul = li.parentNode
return result
#to simplify tasks
def remove_whilespace_nodes(node, unlink=False):
"""Removes all of the whitespace-only text decendants of a DOM node.
When creating a DOM from an XML source, XML parsers are required to
consider several conditions when deciding whether to include
whitespace-only text nodes. This function ignores all of those
conditions and removes all whitespace-only text decendants of the
specified node. If the unlink flag is specified, the removed text
nodes are unlinked so that their storage can be reclaimed. If the
specified node is a whitespace-only text node then it is left
unmodified."""
remove_list = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE and \
not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
remove_whilespace_nodes(child, unlink)
for node in remove_list:
node.parentNode.removeChild(node)
if unlink:
node.unlink()
if __name__ == '__main__':
generate_partial_tocs(html_path,tocs_path) | mit | 458,288,741,566,199,200 | 37.610169 | 88 | 0.684673 | false |
Autodesk/molecular-design-toolkit | moldesign/compute/remote_procedure_calls.py | 1 | 5016 | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import future.utils
from pyccc import python as bpy
import moldesign as mdt
from moldesign import utils
from . import configuration, run_job
from ..helpers import display_log
class RpcWrapper(object):
""" A wrapper that lets to transparently execute python functions in remote
environments - usually in docker containers.
These wrappers are built to allow a lot of run-time flexibility based on the description
of the package (``self.pkg``) that's being called.
Note:
This ONLY works for pure functions - where you're interested in the
return value only. Side effects - including any object state - will be discarded.
Args:
pkg (mdt.compute.packages.InterfacedPackage): package to run this command with
display (bool): Create a jupyter logging display for the remote job
(default: True in Jupyter notebooks, False otherwise)
jobname (str): Name metadata - defaults to the __name__ of the function
sendsource (bool): if False (default), call this function directly on the remote worker;
if True, send the function's source code (for debugging, mostly)
persist_refs (bool): Persist python object references across the RPC roundtrip
is_imethod (bool): This is an instancemethod
Note: we can't determine this at import-time without going to great lengths ...
- see, e.g., http://stackoverflow.com/questions/2366713/ )
"""
def __init__(self, pkg,
display=True,
jobname=None,
sendsource=False,
is_imethod=False,
persist_refs=False):
self.pkg = pkg
self.display = display
self.sendsource = sendsource
self.jobname = jobname
self.is_imethod = is_imethod
self.persist_refs = persist_refs
def __call__(self, func):
"""
This gets called with the function we wish to wrap
"""
from .compute import get_image_path
assert callable(func)
if self.jobname is None:
self.jobname = func.__name__
assert func.__name__ != 'wrapper' # who wraps the wrappers?
@utils.args_from(func,
wraps=True,
inject_kwargs={'wait': True})
def wrapper(*args, **kwargs):
""" Wraps a python function so that it will be executed remotely using a compute engine
Note:
At runtime, this documentation should be replaced with that of the wrapped function
"""
f = func # keeps a reference to the original function in this closure
wait = kwargs.get('wait', True)
if wait and not self.pkg.force_remote:
return f(*args, **kwargs)
# Bind instance methods to their objects
if self.is_imethod:
f, args = _bind_instance_method(f, args)
# Submit job to remote engine
python_call = bpy.PythonCall(f, *args, **kwargs)
engine = utils.if_not_none(self.pkg.engine, mdt.compute.get_engine())
job = bpy.PythonJob(engine=engine,
image=self.pkg.get_docker_image_path(),
command=python_call,
name=self.jobname,
sendsource=self.sendsource,
interpreter='python', # always run in image's native interpreter
persist_references=self.persist_refs,
submit=False)
return run_job(job, wait=wait, _return_result=True)
wrapper.__name__ = func.__name__
wrapper.__wrapped__ = func
return wrapper
def _bind_instance_method(f, args):
# We can't call this function like normal, because the decorators can't identify
# instance methods. Instead, we'll create another bound copy of the instancemethod (probably
# only need to do this once)
fn_self = args[0]
if future.utils.PY2 == 2:
f = types.MethodType(f, fn_self, fn_self.__class__)
else:
f = types.MethodType(f, fn_self)
args = args[1:]
return f, args
| apache-2.0 | -2,045,807,102,273,718,300 | 38.496063 | 99 | 0.61862 | false |
astagi/chickenfoot | test.py | 1 | 1972 | from chickenfoot import Chickenfoot
import socket
class TestChickenfootClient():
def setUp(self):
TCP_IP = '192.168.0.6'
TCP_PORT = 5005
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((TCP_IP, TCP_PORT))
def tearDown(self):
self.s.close()
def test_moves(self):
self.left()
self.right()
self.stop_wheel()
self.up()
self.down()
self.stop()
def left(self):
bundle = """
{
"m" : "M1",
"a" : "rl",
"p" : {
"p1name" : "p1",
"p2name": 5
}
}
"""
assert self.__send(bundle)
def right(self):
bundle = """
{
"m" : "M1",
"a" : "rr",
"p" : {
"p1name" : "p1"
}
}
"""
assert self.__send(bundle)
def up(self):
bundle = """
{
"m" : "M2",
"a" : "fw",
"p" : {
"p1name" : "p1",
"p2name" : "p2"
}
}
"""
assert self.__send(bundle)
def down(self):
bundle = """
{
"m" : "M2",
"a" : "rw"
}
"""
assert self.__send(bundle)
def stop(self):
bundle = """
{
"m" : "M1",
"a" : "stop",
"p" : {
"p1name" : "stop"
}
}
"""
assert self.__send(bundle)
def stop_wheel(self):
bundle = """
{
"m" : "M2",
"a" : "stop",
"p" : {
"p1name" : "stop_wheel"
}
}
"""
assert self.__send(bundle)
def __send(self, data):
byte_to_send = len(data) + 1
byte_sent = self.s.send(data + "\n")
return byte_sent == byte_to_send | mit | -5,306,917,132,645,212,000 | 19.340206 | 66 | 0.345842 | false |
rossella/neutron | quantum/api/extensions.py | 1 | 22315 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta
import imp
import os
import routes
import webob.dec
import webob.exc
from quantum.api.v2 import attributes
from quantum.common import constants
from quantum.common import exceptions
import quantum.extensions
from quantum.manager import QuantumManager
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum import wsgi
LOG = logging.getLogger('quantum.api.extensions')
class PluginInterface(object):
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, klass):
"""
The __subclasshook__ method is a class method
that will be called everytime a class is tested
using issubclass(klass, PluginInterface).
In that case, it will check that every method
marked with the abstractmethod decorator is
provided by the plugin class.
"""
for method in cls.__abstractmethods__:
if any(method in base.__dict__ for base in klass.__mro__):
continue
return NotImplemented
return True
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
def get_name(self):
"""The name of the extension.
e.g. 'Fox In Socks'
"""
raise NotImplementedError()
def get_alias(self):
"""The alias for the extension.
e.g. 'FOXNSOX'
"""
raise NotImplementedError()
def get_description(self):
"""Friendly description for the extension.
e.g. 'The Fox In Socks Extension'
"""
raise NotImplementedError()
def get_namespace(self):
"""The XML namespace for the extension.
e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
"""
raise NotImplementedError()
def get_updated(self):
"""The timestamp when the extension was last updated.
e.g. '2011-01-22T13:25:27-06:00'
"""
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
raise NotImplementedError()
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_actions(self):
"""List of extensions.ActionExtension extension objects.
Actions are verbs callable from the API.
"""
actions = []
return actions
def get_request_extensions(self):
"""List of extensions.RequestException extension objects.
Request extensions are used to handle custom request data.
"""
request_exts = []
return request_exts
def get_extended_resources(self, version):
"""retrieve extended resources or attributes for core resources.
Extended attributes are implemented by a core plugin similarly
to the attributes defined in the core, and can appear in
request and response messages. Their names are scoped with the
extension's prefix. The core API version is passed to this
function, which must return a
map[<resource_name>][<attribute_name>][<attribute_property>]
specifying the extended resource attribute properties required
by that API version.
Extension can add resources and their attr definitions too.
The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP.
"""
return {}
def get_plugin_interface(self):
"""
Returns an abstract class which defines contract for the plugin.
The abstract class should inherit from extesnions.PluginInterface,
Methods in this abstract class should be decorated as abstractmethod
"""
return None
class ActionExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, request, id):
input_dict = self._deserialize(request.body,
request.get_content_type())
for action_name, handler in self.action_handlers.iteritems():
if action_name in input_dict:
return handler(input_dict, request, id)
# no action handler found (bump to downstream application)
response = self.application
return response
class RequestExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, request, *args, **kwargs):
res = request.get_response(self.application)
# currently request handlers are un-ordered
for handler in self.handlers:
response = handler(request, res)
return response
class ExtensionController(wsgi.Controller):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['namespace'] = ext.get_namespace()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, request):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, request, id):
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions.get(id, None)
if not ext:
raise webob.exc.HTTPNotFound(
_("Extension with alias %s does not exist") % id)
return dict(extension=self._translate(ext))
def delete(self, request, id):
raise webob.exc.HTTPNotFound()
def create(self, request):
raise webob.exc.HTTPNotFound()
class ExtensionMiddleware(wsgi.Middleware):
"""Extensions middleware for WSGI."""
def __init__(self, application,
ext_mgr=None):
self.ext_mgr = (ext_mgr
or ExtensionManager(
get_extensions_path()))
mapper = routes.Mapper()
# extended resources
for resource in self.ext_mgr.get_resources():
path_prefix = resource.path_prefix
if resource.parent:
path_prefix = (resource.path_prefix +
"/%s/{%s_id}" %
(resource.parent["collection_name"],
resource.parent["member_name"]))
LOG.debug(_('Extended resource: %s'),
resource.collection)
for action, method in resource.collection_actions.iteritems():
conditions = dict(method=[method])
path = "/%s/%s" % (resource.collection, action)
with mapper.submapper(controller=resource.controller,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path)
submap.connect("%s.:(format)" % path)
mapper.resource(resource.collection, resource.collection,
controller=resource.controller,
member=resource.member_actions,
parent_resource=resource.parent,
path_prefix=path_prefix)
# extended actions
action_controllers = self._action_ext_controllers(application,
self.ext_mgr, mapper)
for action in self.ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
controller = action_controllers[action.collection]
controller.add_action(action.action_name, action.handler)
# extended requests
req_controllers = self._request_ext_controllers(application,
self.ext_mgr, mapper)
for request_ext in self.ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
super(ExtensionMiddleware, self).__init__(application)
@classmethod
def factory(cls, global_config, **local_config):
"""Paste factory."""
def _factory(app):
return cls(app, global_config, **local_config)
return _factory
def _action_ext_controllers(self, application, ext_mgr, mapper):
"""Return a dict of ActionExtensionController-s by collection."""
action_controllers = {}
for action in ext_mgr.get_actions():
if action.collection not in action_controllers.keys():
controller = ActionExtensionController(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
action_controllers[action.collection] = controller
return action_controllers
def _request_ext_controllers(self, application, ext_mgr, mapper):
"""Returns a dict of RequestExtensionController-s by collection."""
request_ext_controllers = {}
for req_ext in ext_mgr.get_request_extensions():
if req_ext.key not in request_ext_controllers.keys():
controller = RequestExtensionController(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=controller,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
controller=controller,
conditions=req_ext.conditions)
request_ext_controllers[req_ext.key] = controller
return request_ext_controllers
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Route the incoming request with router."""
req.environ['extended.app'] = self.application
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=wsgi.Request)
def _dispatch(req):
"""Dispatch the request.
Returns the routed WSGI app's response or defers to the extended
application.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return req.environ['extended.app']
app = match['controller']
return app
def plugin_aware_extension_middleware_factory(global_config, **local_config):
"""Paste factory."""
def _factory(app):
ext_mgr = PluginAwareExtensionManager.get_instance()
return ExtensionMiddleware(app, ext_mgr=ext_mgr)
return _factory
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See tests/unit/extensions/foxinsocks.py for an
example extension implementation.
"""
def __init__(self, path):
LOG.info(_('Initializing extension manager.'))
self.path = path
self.extensions = {}
self._load_all_extensions()
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionController(self)))
for ext in self.extensions.itervalues():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_actions(self):
"""Returns a list of ActionExtension objects."""
actions = []
for ext in self.extensions.itervalues():
try:
actions.extend(ext.get_actions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have action
# extensions
pass
return actions
def get_request_extensions(self):
"""Returns a list of RequestExtension objects."""
request_exts = []
for ext in self.extensions.itervalues():
try:
request_exts.extend(ext.get_request_extensions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have request
# extensions
pass
return request_exts
def extend_resources(self, version, attr_map):
"""Extend resources with additional resources or attributes.
:param: attr_map, the existing mapping from resource name to
attrs definition.
After this function, we will extend the attr_map if an extension
wants to extend this map.
"""
for ext in self.extensions.itervalues():
if not hasattr(ext, 'get_extended_resources'):
continue
try:
extended_attrs = ext.get_extended_resources(version)
for resource, resource_attrs in extended_attrs.iteritems():
if attr_map.get(resource, None):
attr_map[resource].update(resource_attrs)
else:
attr_map[resource] = resource_attrs
if extended_attrs:
attributes.EXT_NSES[ext.get_alias()] = ext.get_namespace()
except AttributeError:
LOG.exception(_("Error fetching extended attributes for "
"extension '%s'"), ext.get_name())
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.get_name())
LOG.debug(_('Ext alias: %s'), extension.get_alias())
LOG.debug(_('Ext description: %s'), extension.get_description())
LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
if hasattr(extension, 'check_env'):
try:
extension.check_env()
except exceptions.InvalidExtenstionEnv as ex:
LOG.warn(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
Load extensions from the configured path. The extension name is
constructed from the module_name. If your extension module was named
widgets.py the extension class within that module should be
'Widgets'.
See tests/unit/extensions/foxinsocks.py for an example
extension implementation.
"""
for path in self.path.split(':'):
if os.path.exists(path):
self._load_all_extensions_from_path(path)
else:
LOG.error(_("Extension path '%s' doesn't exist!"), path)
def _load_all_extensions_from_path(self, path):
for f in os.listdir(path):
try:
LOG.info(_('Loading extension file: %s'), f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warn(_('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warn(_("Extension file %(f)s wasn't loaded due to "
"%(exception)s"), locals())
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.get_alias()
LOG.info(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exceptions.Error(_("Found duplicate extension: %s") %
alias)
self.extensions[alias] = ext
class PluginAwareExtensionManager(ExtensionManager):
_instance = None
def __init__(self, path, plugins):
self.plugins = plugins
super(PluginAwareExtensionManager, self).__init__(path)
def _check_extension(self, extension):
"""Checks if any of plugins supports extension and implements the
extension contract."""
extension_is_valid = super(PluginAwareExtensionManager,
self)._check_extension(extension)
return (extension_is_valid and
self._plugins_support(extension) and
self._plugins_implement_interface(extension))
def _plugins_support(self, extension):
alias = extension.get_alias()
supports_extension = any((hasattr(plugin,
"supported_extension_aliases") and
alias in plugin.supported_extension_aliases)
for plugin in self.plugins.values())
if not supports_extension:
LOG.warn(_("Extension %s not supported by any of loaded plugins"),
alias)
return supports_extension
def _plugins_implement_interface(self, extension):
if(not hasattr(extension, "get_plugin_interface") or
extension.get_plugin_interface() is None):
return True
for plugin in self.plugins.values():
if isinstance(plugin, extension.get_plugin_interface()):
return True
LOG.warn(_("Loaded plugins do not implement extension %s interface"),
extension.get_alias())
return False
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls(get_extensions_path(),
QuantumManager.get_service_plugins())
return cls._instance
class RequestExtension(object):
"""Extend requests and responses of core Quantum OpenStack API controllers.
Provide a way to add data to responses and handle custom request data
that is sent to core Quantum OpenStack API controllers.
"""
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
class ActionExtension(object):
"""Add custom actions to core Quantum OpenStack API controllers."""
def __init__(self, collection, action_name, handler):
self.collection = collection
self.action_name = action_name
self.handler = handler
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in Quantum."""
def __init__(self, collection, controller, parent=None, path_prefix="",
collection_actions={}, member_actions={}, attr_map={}):
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.path_prefix = path_prefix
self.attr_map = attr_map
# Returns the extention paths from a config entry and the __path__
# of quantum.extensions
def get_extensions_path():
paths = ':'.join(quantum.extensions.__path__)
if cfg.CONF.api_extensions_path:
paths = ':'.join([cfg.CONF.api_extensions_path, paths])
return paths
| apache-2.0 | 665,210,720,194,459,000 | 35.581967 | 79 | 0.585122 | false |
ddico/odoo | addons/l10n_id_efaktur/models/account_move.py | 1 | 16354 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import re
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
FK_HEAD_LIST = ['FK', 'KD_JENIS_TRANSAKSI', 'FG_PENGGANTI', 'NOMOR_FAKTUR', 'MASA_PAJAK', 'TAHUN_PAJAK', 'TANGGAL_FAKTUR', 'NPWP', 'NAMA', 'ALAMAT_LENGKAP', 'JUMLAH_DPP', 'JUMLAH_PPN', 'JUMLAH_PPNBM', 'ID_KETERANGAN_TAMBAHAN', 'FG_UANG_MUKA', 'UANG_MUKA_DPP', 'UANG_MUKA_PPN', 'UANG_MUKA_PPNBM', 'REFERENSI']
LT_HEAD_LIST = ['LT', 'NPWP', 'NAMA', 'JALAN', 'BLOK', 'NOMOR', 'RT', 'RW', 'KECAMATAN', 'KELURAHAN', 'KABUPATEN', 'PROPINSI', 'KODE_POS', 'NOMOR_TELEPON']
OF_HEAD_LIST = ['OF', 'KODE_OBJEK', 'NAMA', 'HARGA_SATUAN', 'JUMLAH_BARANG', 'HARGA_TOTAL', 'DISKON', 'DPP', 'PPN', 'TARIF_PPNBM', 'PPNBM']
def _csv_row(data, delimiter=',', quote='"'):
return quote + (quote + delimiter + quote).join([str(x).replace(quote, '\\' + quote) for x in data]) + quote + '\n'
class AccountMove(models.Model):
_inherit = "account.move"
l10n_id_tax_number = fields.Char(string="Tax Number", copy=False)
l10n_id_replace_invoice_id = fields.Many2one('account.move', string="Replace Invoice", domain="['|', '&', '&', ('state', '=', 'posted'), ('partner_id', '=', partner_id), ('reversal_move_id', '!=', False), ('state', '=', 'cancel')]", copy=False)
l10n_id_attachment_id = fields.Many2one('ir.attachment', readonly=True, copy=False)
l10n_id_csv_created = fields.Boolean('CSV Created', compute='_compute_csv_created', copy=False)
l10n_id_kode_transaksi = fields.Selection([
('01', '01 Kepada Pihak yang Bukan Pemungut PPN (Customer Biasa)'),
('02', '02 Kepada Pemungut Bendaharawan (Dinas Kepemerintahan)'),
('03', '03 Kepada Pemungut Selain Bendaharawan (BUMN)'),
('04', '04 DPP Nilai Lain (PPN 1%)'),
('06', '06 Penyerahan Lainnya (Turis Asing)'),
('07', '07 Penyerahan yang PPN-nya Tidak Dipungut (Kawasan Ekonomi Khusus/ Batam)'),
('08', '08 Penyerahan yang PPN-nya Dibebaskan (Impor Barang Tertentu)'),
('09', '09 Penyerahan Aktiva ( Pasal 16D UU PPN )'),
], string='Kode Transaksi', help='Dua digit pertama nomor pajak',
readonly=True, states={'draft': [('readonly', False)]}, copy=False)
l10n_id_need_kode_transaksi = fields.Boolean(compute='_compute_need_kode_transaksi')
@api.onchange('partner_id')
def _onchange_partner_id(self):
self.l10n_id_kode_transaksi = self.partner_id.l10n_id_kode_transaksi
return super(AccountMove, self)._onchange_partner_id()
@api.onchange('l10n_id_tax_number')
def _onchange_l10n_id_tax_number(self):
for record in self:
if record.l10n_id_tax_number and record.type not in self.get_purchase_types():
raise UserError(_("You can only change the number manually for a Vendor Bills and Credit Notes"))
@api.depends('l10n_id_attachment_id')
def _compute_csv_created(self):
for record in self:
record.l10n_id_csv_created = bool(record.l10n_id_attachment_id)
@api.depends('partner_id')
def _compute_need_kode_transaksi(self):
for move in self:
move.l10n_id_need_kode_transaksi = move.partner_id.l10n_id_pkp and not move.l10n_id_tax_number and move.type == 'out_invoice' and move.country_code == 'ID'
@api.constrains('l10n_id_kode_transaksi', 'line_ids')
def _constraint_kode_ppn(self):
ppn_tag = self.env.ref('l10n_id.ppn_tag')
for move in self.filtered(lambda m: m.l10n_id_kode_transaksi != '08'):
if any(ppn_tag.id in line.tax_tag_ids.ids for line in move.line_ids if line.exclude_from_invoice_tab is False) and any(ppn_tag.id not in line.tax_tag_ids.ids for line in move.line_ids if line.exclude_from_invoice_tab is False):
raise UserError(_('Cannot mix VAT subject and Non-VAT subject items in the same invoice with this kode transaksi.'))
for move in self.filtered(lambda m: m.l10n_id_kode_transaksi == '08'):
if any(ppn_tag.id in line.tax_tag_ids.ids for line in move.line_ids if line.exclude_from_invoice_tab is False):
raise UserError('Kode transaksi 08 is only for non VAT subject items.')
@api.constrains('l10n_id_tax_number')
def _constrains_l10n_id_tax_number(self):
for record in self.filtered('l10n_id_tax_number'):
if record.l10n_id_tax_number != re.sub(r'\D', '', record.l10n_id_tax_number):
record.l10n_id_tax_number = re.sub(r'\D', '', record.l10n_id_tax_number)
if len(record.l10n_id_tax_number) != 16:
raise UserError(_('A tax number should have 16 digits'))
elif record.l10n_id_tax_number[:2] not in dict(self._fields['l10n_id_kode_transaksi'].selection).keys():
raise UserError(_('A tax number must begin by a valid Kode Transaksi'))
elif record.l10n_id_tax_number[2] not in ('0', '1'):
raise UserError(_('The third digit of a tax number must be 0 or 1'))
def post(self):
"""Set E-Faktur number after validation."""
for move in self:
if move.l10n_id_need_kode_transaksi:
if not move.l10n_id_kode_transaksi:
raise ValidationError(_('You need to put a Kode Transaksi for this partner.'))
if move.l10n_id_replace_invoice_id.l10n_id_tax_number:
if not move.l10n_id_replace_invoice_id.l10n_id_attachment_id:
raise ValidationError(_('Replacement invoice only for invoices on which the e-Faktur is generated. '))
rep_efaktur_str = move.l10n_id_replace_invoice_id.l10n_id_tax_number
move.l10n_id_tax_number = '%s1%s' % (move.l10n_id_kode_transaksi, rep_efaktur_str[3:])
else:
efaktur = self.env['l10n_id_efaktur.efaktur.range'].pop_number(move.company_id.id)
if not efaktur:
raise ValidationError(_('There is no Efaktur number available. Please configure the range you get from the government in the e-Faktur menu. '))
move.l10n_id_tax_number = '%s0%013d' % (str(move.l10n_id_kode_transaksi), efaktur)
return super(AccountMove, self).post()
def reset_efaktur(self):
"""Reset E-Faktur, so it can be use for other invoice."""
for move in self:
if move.l10n_id_attachment_id:
raise UserError(_('You have already generated the tax report for this document: %s', move.name))
self.env['l10n_id_efaktur.efaktur.range'].push_number(move.company_id.id, move.l10n_id_tax_number[3:])
move.message_post(
body='e-Faktur Reset: %s ' % (move.l10n_id_tax_number),
subject="Reset Efaktur")
move.l10n_id_tax_number = False
return True
def download_csv(self):
action = {
'type': 'ir.actions.act_url',
'url': "web/content/?model=ir.attachment&id=" + str(self.l10n_id_attachment_id.id) + "&filename_field=name&field=datas&download=true&name=" + self.l10n_id_attachment_id.name,
'target': 'self'
}
return action
def download_efaktur(self):
"""Collect the data and execute function _generate_efaktur."""
for record in self:
if record.state == 'draft':
raise ValidationError(_('Could not download E-faktur in draft state'))
if record.partner_id.l10n_id_pkp and not record.l10n_id_tax_number:
raise ValidationError(_('Connect ') + record.name + _(' with E-faktur to download this report'))
self._generate_efaktur(',')
return self.download_csv()
def _generate_efaktur_invoice(self, delimiter):
"""Generate E-Faktur for customer invoice."""
# Invoice of Customer
company_id = self.company_id
dp_product_id = self.env['ir.config_parameter'].sudo().get_param('sale.default_deposit_product_id')
output_head = '%s%s%s' % (
_csv_row(FK_HEAD_LIST, delimiter),
_csv_row(LT_HEAD_LIST, delimiter),
_csv_row(OF_HEAD_LIST, delimiter),
)
for move in self.filtered(lambda m: m.state == 'posted'):
eTax = move._prepare_etax()
nik = str(move.partner_id.l10n_id_nik) if not move.partner_id.vat else ''
if move.l10n_id_replace_invoice_id:
number_ref = str(move.l10n_id_replace_invoice_id.name) + " replaced by " + str(move.name) + " " + nik
else:
number_ref = str(move.name) + " " + nik
street = ', '.join([x for x in (move.partner_id.street, move.partner_id.street2) if x])
invoice_npwp = '000000000000000'
if not move.partner_id.vat:
if move.partner_id.vat and len(move.partner_id.vat) >= 12:
invoice_npwp = move.partner_id.vat
elif (not move.partner_id.vat or len(move.partner_id.vat) < 12) and move.partner_id.l10n_id_nik:
invoice_npwp = move.partner_id.l10n_id_nik
invoice_npwp = invoice_npwp.replace('.', '').replace('-', '')
# Here all fields or columns based on eTax Invoice Third Party
eTax['KD_JENIS_TRANSAKSI'] = move.l10n_id_tax_number[0:2] or 0
eTax['FG_PENGGANTI'] = move.l10n_id_tax_number[2:3] or 0
eTax['NOMOR_FAKTUR'] = move.l10n_id_tax_number[3:] or 0
eTax['MASA_PAJAK'] = move.invoice_date.month
eTax['TAHUN_PAJAK'] = move.invoice_date.year
eTax['TANGGAL_FAKTUR'] = '{0}/{1}/{2}'.format(move.invoice_date.day, move.invoice_date.month, move.invoice_date.year)
eTax['NPWP'] = invoice_npwp
eTax['NAMA'] = move.partner_id.name if eTax['NPWP'] == '000000000000000' else move.partner_id.l10n_id_tax_name or move.partner_id.name
eTax['ALAMAT_LENGKAP'] = move.partner_id.contact_address.replace('\n', '') if eTax['NPWP'] == '000000000000000' else move.partner_id.l10n_id_tax_address or street
eTax['JUMLAH_DPP'] = int(round(move.amount_untaxed, 0)) # currency rounded to the unit
eTax['JUMLAH_PPN'] = int(round(move.amount_tax, 0))
eTax['ID_KETERANGAN_TAMBAHAN'] = '1' if move.l10n_id_kode_transaksi == '07' else ''
eTax['REFERENSI'] = number_ref
lines = move.line_ids.filtered(lambda x: x.product_id.id == int(dp_product_id) and x.price_unit < 0)
eTax['FG_UANG_MUKA'] = 0
eTax['UANG_MUKA_DPP'] = int(abs(sum(lines.mapped('price_subtotal'))))
eTax['UANG_MUKA_PPN'] = int(abs(sum(lines.mapped(lambda l: l.price_total - l.price_subtotal))))
company_npwp = company_id.partner_id.vat or '000000000000000'
fk_values_list = ['FK'] + [eTax[f] for f in FK_HEAD_LIST[1:]]
eTax['JALAN'] = company_id.partner_id.l10n_id_tax_address or company_id.partner_id.street
eTax['NOMOR_TELEPON'] = company_id.phone or ''
lt_values_list = ['FAPR', company_npwp, company_id.name] + [eTax[f] for f in LT_HEAD_LIST[3:]]
# HOW TO ADD 2 line to 1 line for free product
free, sales = [], []
for line in move.line_ids.filtered(lambda l: not l.exclude_from_invoice_tab):
# *invoice_line_unit_price is price unit use for harga_satuan's column
# *invoice_line_quantity is quantity use for jumlah_barang's column
# *invoice_line_total_price is bruto price use for harga_total's column
# *invoice_line_discount_m2m is discount price use for diskon's column
# *line.price_subtotal is subtotal price use for dpp's column
# *tax_line or free_tax_line is tax price use for ppn's column
free_tax_line = tax_line = bruto_total = total_discount = 0.0
for tax in line.tax_ids:
if tax.amount > 0:
tax_line += line.price_subtotal * (tax.amount / 100.0)
invoice_line_unit_price = line.price_unit
invoice_line_total_price = invoice_line_unit_price * line.quantity
line_dict = {
'KODE_OBJEK': line.product_id.default_code or '',
'NAMA': line.product_id.name or '',
'HARGA_SATUAN': int(invoice_line_unit_price),
'JUMLAH_BARANG': line.quantity,
'HARGA_TOTAL': int(invoice_line_total_price),
'DPP': int(line.price_subtotal),
'product_id': line.product_id.id,
}
if line.price_subtotal < 0:
for tax in line.tax_ids:
free_tax_line += (line.price_subtotal * (tax.amount / 100.0)) * -1.0
line_dict.update({
'DISKON': int(invoice_line_total_price - line.price_subtotal),
'PPN': int(free_tax_line),
})
free.append(line_dict)
elif line.price_subtotal != 0.0:
invoice_line_discount_m2m = invoice_line_total_price - line.price_subtotal
line_dict.update({
'DISKON': int(invoice_line_discount_m2m),
'PPN': int(tax_line),
})
sales.append(line_dict)
sub_total_before_adjustment = sub_total_ppn_before_adjustment = 0.0
# We are finding the product that has affected
# by free product to adjustment the calculation
# of discount and subtotal.
# - the price total of free product will be
# included as a discount to related of product.
for sale in sales:
for f in free:
if f['product_id'] == sale['product_id']:
sale['DISKON'] = sale['DISKON'] - f['DISKON'] + f['PPN']
sale['DPP'] = sale['DPP'] + f['DPP']
tax_line = 0
for tax in line.tax_ids:
if tax.amount > 0:
tax_line += sale['DPP'] * (tax.amount / 100.0)
sale['PPN'] = int(tax_line)
free.remove(f)
sub_total_before_adjustment += sale['DPP']
sub_total_ppn_before_adjustment += sale['PPN']
bruto_total += sale['DISKON']
total_discount += round(sale['DISKON'], 2)
output_head += _csv_row(fk_values_list, delimiter)
output_head += _csv_row(lt_values_list, delimiter)
for sale in sales:
of_values_list = ['OF'] + [str(sale[f]) for f in OF_HEAD_LIST[1:-2]] + ['0', '0']
output_head += _csv_row(of_values_list, delimiter)
return output_head
def _prepare_etax(self):
# These values are never set
return {'JUMLAH_PPNBM': 0, 'UANG_MUKA_PPNBM': 0, 'BLOK': '', 'NOMOR': '', 'RT': '', 'RW': '', 'KECAMATAN': '', 'KELURAHAN': '', 'KABUPATEN': '', 'PROPINSI': '', 'KODE_POS': '', 'JUMLAH_BARANG': 0, 'TARIF_PPNBM': 0, 'PPNBM': 0}
def _generate_efaktur(self, delimiter):
if self.filtered(lambda x: not x.l10n_id_kode_transaksi):
raise UserError(_('Some documents don\'t have a transaction code'))
if self.filtered(lambda x: x.type != 'out_invoice'):
raise UserError(_('Some documents are not Customer Invoices'))
output_head = self._generate_efaktur_invoice(delimiter)
my_utf8 = output_head.encode("utf-8")
out = base64.b64encode(my_utf8)
attachment = self.env['ir.attachment'].create({
'datas': out,
'name': 'efaktur_%s.csv' % (fields.Datetime.to_string(fields.Datetime.now()).replace(" ", "_")),
'type': 'binary',
})
for record in self:
record.message_post(attachment_ids=[attachment.id])
self.l10n_id_attachment_id = attachment.id
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
| agpl-3.0 | -4,605,886,058,610,855,000 | 52.973597 | 308 | 0.577107 | false |
dbarenas/django-scheduler | agenda_template/agenda_template/schedule/periods.py | 1 | 14782 | from __future__ import unicode_literals
from builtins import range
from builtins import object
import pytz
import datetime
import calendar as standardlib_calendar
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.template.defaultfilters import date as date_filter
from django.utils.dates import WEEKDAYS, WEEKDAYS_ABBR
from schedule.conf.settings import SHOW_CANCELLED_OCCURRENCES
from schedule.models import Occurrence
from django.utils import timezone
weekday_names = []
weekday_abbrs = []
if settings.FIRST_DAY_OF_WEEK == 1:
# The calendar week starts on Monday
for i in range(7):
weekday_names.append(WEEKDAYS[i])
weekday_abbrs.append(WEEKDAYS_ABBR[i])
else:
# The calendar week starts on Sunday, not Monday
weekday_names.append(WEEKDAYS[6])
weekday_abbrs.append(WEEKDAYS_ABBR[6])
for i in range(6):
weekday_names.append(WEEKDAYS[i])
weekday_abbrs.append(WEEKDAYS_ABBR[i])
class Period(object):
"""
This class represents a period of time. It can return a set of occurrences
based on its events, and its time period (start and end).
"""
def __init__(self, events, start, end, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=pytz.utc):
self.utc_start = self._normalize_timezone_to_utc(start, tzinfo)
self.utc_end = self._normalize_timezone_to_utc(end, tzinfo)
self.events = events
self.tzinfo = self._get_tzinfo(tzinfo)
self.occurrence_pool = occurrence_pool
if parent_persisted_occurrences is not None:
self._persisted_occurrences = parent_persisted_occurrences
def _normalize_timezone_to_utc(self, point_in_time, tzinfo):
if point_in_time.tzinfo is not None:
return point_in_time.astimezone(pytz.utc)
if tzinfo is not None:
return tzinfo.localize(point_in_time).astimezone(pytz.utc)
if settings.USE_TZ:
return pytz.utc.localize(point_in_time)
else:
if timezone.is_aware(point_in_time):
return timezone.make_naive(point_in_time, pytz.utc)
else:
return point_in_time
def __eq__(self, period):
return self.utc_start == period.utc_start and self.utc_end == period.utc_end and self.events == period.events
def __ne__(self, period):
return self.utc_start != period.utc_start or self.utc_end != period.utc_end or self.events != period.events
def _get_tzinfo(self, tzinfo):
return tzinfo if settings.USE_TZ else None
def _get_sorted_occurrences(self):
occurrences = []
if hasattr(self, "occurrence_pool") and self.occurrence_pool is not None:
for occurrence in self.occurrence_pool:
if occurrence.start <= self.utc_end and occurrence.end >= self.utc_start:
occurrences.append(occurrence)
return occurrences
for event in self.events:
event_occurrences = event.get_occurrences(self.start, self.end)
occurrences += event_occurrences
return sorted(occurrences)
def cached_get_sorted_occurrences(self):
if hasattr(self, '_occurrences'):
return self._occurrences
occs = self._get_sorted_occurrences()
self._occurrences = occs
return occs
occurrences = property(cached_get_sorted_occurrences)
def get_persisted_occurrences(self):
if hasattr(self, '_persisted_occurrenes'):
return self._persisted_occurrences
else:
self._persisted_occurrences = Occurrence.objects.filter(event__in=self.events)
return self._persisted_occurrences
def classify_occurrence(self, occurrence):
if occurrence.cancelled and not SHOW_CANCELLED_OCCURRENCES:
return
if occurrence.start > self.end or occurrence.end < self.start:
return None
started = False
ended = False
if self.utc_start <= occurrence.start < self.utc_end:
started = True
if self.utc_start <= occurrence.end < self.utc_end:
ended = True
if started and ended:
return {'occurrence': occurrence, 'class': 1}
elif started:
return {'occurrence': occurrence, 'class': 0}
elif ended:
return {'occurrence': occurrence, 'class': 3}
# it existed during this period but it didn't begin or end within it
# so it must have just continued
return {'occurrence': occurrence, 'class': 2}
def get_occurrence_partials(self):
occurrence_dicts = []
for occurrence in self.occurrences:
occurrence = self.classify_occurrence(occurrence)
if occurrence:
occurrence_dicts.append(occurrence)
return occurrence_dicts
def get_occurrences(self):
return self.occurrences
def has_occurrences(self):
return any(self.classify_occurrence(o) for o in self.occurrences)
def get_time_slot(self, start, end):
if start >= self.start and end <= self.end:
return Period(self.events, start, end)
return None
def create_sub_period(self, cls, start=None, tzinfo=None):
if tzinfo is None:
tzinfo = self.tzinfo
start = start or self.start
return cls(self.events, start, self.get_persisted_occurrences(), self.occurrences, tzinfo)
def get_periods(self, cls, tzinfo=None):
if tzinfo is None:
tzinfo = self.tzinfo
period = self.create_sub_period(cls)
while period.start < self.end:
yield self.create_sub_period(cls, period.start, tzinfo)
period = next(period)
@property
def start(self):
if self.tzinfo is not None:
return self.utc_start.astimezone(self.tzinfo)
return self.utc_start.replace(tzinfo=None)
@property
def end(self):
if self.tzinfo is not None:
return self.utc_end.astimezone(self.tzinfo)
return self.utc_end.replace(tzinfo=None)
@python_2_unicode_compatible
class Year(Period):
def __init__(self, events, date=None, parent_persisted_occurrences=None, tzinfo=pytz.utc):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_year_range(date)
super(Year, self).__init__(events, start, end, parent_persisted_occurrences, tzinfo=tzinfo)
def get_months(self):
return self.get_periods(Month)
def next_year(self):
return Year(self.events, self.end, tzinfo=self.tzinfo)
__next__ = next_year
def prev_year(self):
start = datetime.datetime(self.start.year - 1, self.start.month, self.start.day)
return Year(self.events, start, tzinfo=self.tzinfo)
prev = prev_year
def _get_year_range(self, year):
#If tzinfo is not none get the local start of the year and convert it to utc.
naive_start = datetime.datetime(year.year, datetime.datetime.min.month, datetime.datetime.min.day)
naive_end = datetime.datetime(year.year + 1, datetime.datetime.min.month, datetime.datetime.min.day)
start = naive_start
end = naive_end
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
return start, end
def __str__(self):
return self.start.year
@python_2_unicode_compatible
class Month(Period):
"""
The month period has functions for retrieving the week periods within this period
and day periods within the date.
"""
def __init__(self, events, date=None, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=pytz.utc):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_month_range(date)
super(Month, self).__init__(events, start, end,
parent_persisted_occurrences, occurrence_pool, tzinfo=tzinfo)
def get_weeks(self):
return self.get_periods(Week)
def get_days(self):
return self.get_periods(Day)
def get_day(self, daynumber):
date = self.start
if daynumber > 1:
date += datetime.timedelta(days=daynumber - 1)
return self.create_sub_period(Day, date)
def next_month(self):
return Month(self.events, self.end, tzinfo=self.tzinfo)
__next__ = next_month
def prev_month(self):
start = (self.start - datetime.timedelta(days=1)).replace(day=1, tzinfo=self.tzinfo)
return Month(self.events, start, tzinfo=self.tzinfo)
prev = prev_month
def current_year(self):
return Year(self.events, self.start, tzinfo=self.tzinfo)
def prev_year(self):
start = datetime.datetime.min.replace(year=self.start.year - 1, tzinfo=self.tzinfo)
return Year(self.events, start, tzinfo=self.tzinfo)
def next_year(self):
start = datetime.datetime.min.replace(year=self.start.year + 1, tzinfo=self.tzinfo)
return Year(self.events, start, tzinfo=self.tzinfo)
def _get_month_range(self, month):
year = month.year
month = month.month
#If tzinfo is not none get the local start of the month and convert it to utc.
naive_start = datetime.datetime.min.replace(year=year, month=month)
if month == 12:
naive_end = datetime.datetime.min.replace(month=1, year=year + 1, day=1)
else:
naive_end = datetime.datetime.min.replace(month=month + 1, year=year, day=1)
start = naive_start
end = naive_end
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
return start, end
def __str__(self):
return self.name()
def name(self):
return standardlib_calendar.month_name[self.start.month]
def year(self):
return self.start.year
@python_2_unicode_compatible
class Week(Period):
"""
The Week period that has functions for retrieving Day periods within it
"""
def __init__(self, events, date=None, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=pytz.utc):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_week_range(date)
super(Week, self).__init__(events, start, end,
parent_persisted_occurrences, occurrence_pool, tzinfo=tzinfo)
def prev_week(self):
return Week(self.events, self.start - datetime.timedelta(days=7), tzinfo=self.tzinfo)
prev = prev_week
def next_week(self):
return Week(self.events, self.end, tzinfo=self.tzinfo)
__next__ = next_week
def current_month(self):
return Month(self.events, self.start, tzinfo=self.tzinfo)
def current_year(self):
return Year(self.events, self.start, tzinfo=self.tzinfo)
def get_days(self):
return self.get_periods(Day)
def _get_week_range(self, week):
if isinstance(week, datetime.datetime):
week = week.date()
# Adjust the start datetime to midnight of the week datetime
naive_start = datetime.datetime.combine(week, datetime.time.min)
# Adjust the start datetime to Monday or Sunday of the current week
if settings.FIRST_DAY_OF_WEEK == 1:
# The week begins on Monday
sub_days = naive_start.isoweekday() - 1
else:
# The week begins on Sunday
sub_days = naive_start.isoweekday()
if sub_days == 7:
sub_days = 0
if sub_days > 0:
naive_start = naive_start - datetime.timedelta(days=sub_days)
naive_end = naive_start + datetime.timedelta(days=7)
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
else:
start = naive_start
end = naive_end
return start, end
def __str__(self):
date_format = 'l, %s' % settings.DATE_FORMAT
return ugettext('Week: %(start)s-%(end)s') % {
'start': date_filter(self.start, date_format),
'end': date_filter(self.end, date_format),
}
@python_2_unicode_compatible
class Day(Period):
def __init__(self, events, date=None, parent_persisted_occurrences=None,
occurrence_pool=None, tzinfo=pytz.utc):
self.tzinfo = self._get_tzinfo(tzinfo)
if date is None:
date = timezone.now()
start, end = self._get_day_range(date)
super(Day, self).__init__(events, start, end,
parent_persisted_occurrences, occurrence_pool, tzinfo=tzinfo)
def _get_day_range(self, date):
if isinstance(date, datetime.datetime):
date = date.date()
naive_start = datetime.datetime.combine(date, datetime.time.min)
naive_end = datetime.datetime.combine(date + datetime.timedelta(days=1), datetime.time.min)
if self.tzinfo is not None:
local_start = self.tzinfo.localize(naive_start)
local_end = self.tzinfo.localize(naive_end)
start = local_start.astimezone(pytz.utc)
end = local_end.astimezone(pytz.utc)
else:
start = naive_start
end = naive_end
return start, end
def __str__(self):
date_format = 'l, %s' % settings.DATE_FORMAT
return ugettext('Day: %(start)s-%(end)s') % {
'start': date_filter(self.start, date_format),
'end': date_filter(self.end, date_format),
}
def prev_day(self):
return Day(self.events, self.start - datetime.timedelta(days=1), tzinfo=self.tzinfo)
prev = prev_day
def next_day(self):
return Day(self.events, self.end, tzinfo=self.tzinfo)
__next__ = next_day
def current_year(self):
return Year(self.events, self.start, tzinfo=self.tzinfo)
def current_month(self):
return Month(self.events, self.start, tzinfo=self.tzinfo)
def current_week(self):
return Week(self.events, self.start, tzinfo=self.tzinfo)
| gpl-2.0 | 3,281,308,981,689,938,000 | 35.862843 | 117 | 0.625085 | false |
arruda/presente_14 | presente_14/chat_parser/html_parser.py | 1 | 4871 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from bs4 import BeautifulSoup
from .chat_objects import *
def get_emails_html(path_to_html='emails.html'):
"returns the html from the emails file"
html = None
with open(path_to_html, 'r') as emails_file:
html = emails_file.read()
return html
def get_h2s_positions(html):
"return a list of all the index of H2 in the given html"
import re
starts = [match.start() for match in re.finditer(re.escape('<h2>'), html)]
return starts
def get_h3s_positions(html):
"return a list of all the index of H3 in the given html"
import re
starts = [match.start() for match in re.finditer(re.escape('<h3>'), html)]
return starts
def validate_conversation_group_html(html):
parsed_html = BeautifulSoup(html)
h2 = parsed_html.find('h2')
return 'Bate-papo' in h2.get_text()
def get_conversations_groups_html(html):
"returns a list of string that represent each conversations group of this html"
h2s_indexes = get_h2s_positions(html)
conversations_groups = []
last_h2_index = h2s_indexes[0]
for h2_index in h2s_indexes[1:]:
conversation_group_html = html[last_h2_index:h2_index]
if(validate_conversation_group_html(conversation_group_html)):
conversations_groups.append(conversation_group_html)
last_h2_index = h2_index
#: add the last one
conversation_group_html = html[last_h2_index:]
if(validate_conversation_group_html(conversation_group_html)):
conversations_groups.append(conversation_group_html)
return conversations_groups
def get_conversations_html(html):
"returns a list of string that represent each conversation of this html"
h3s_indexes = get_h3s_positions(html)
conversations = []
last_h3_index = h3s_indexes[0]
if len(h3s_indexes) > 1:
for h3_index in h3s_indexes[1:]:
conversation_html = html[last_h3_index:h3_index]
conversations.append(conversation_html)
last_h3_index = h3_index
#: add the last one
conversation_html = html[last_h3_index:]
conversations.append(conversation_html)
else:
conversation_html = html[last_h3_index:]
conversations.append(conversation_html)
return conversations
def get_messages(conversation_html):
"return the list of messages in a html"
parsed_html = BeautifulSoup(conversation_html)
msgs = []
span = parsed_html.find('span')
while span is not None:
msg, next_span = message_and_next_span_from_html(span)
msgs.append(msg)
span = next_span
return msgs
def message_and_next_span_from_html(span_html):
"return the Message object for this html and also the next span html"
author_span = span_html.findNext('span', attrs={'style': 'font-weight:bold'})
author = author_span.get_text().replace('eu', 'felipe').capitalize()
msg = span_html.get_text()
msg = remove_author_from_message(msg)
return Message(author, msg), author_span.findNext('span')
def remove_author_from_message(message_txt):
"removes the author from the message text"
first_ddot = message_txt.find(':')
message_txt = message_txt[first_ddot+2:]
return message_txt
def get_conversation_date(conversation_html):
"returns the date of the conversation html"
parsed_html = BeautifulSoup(conversation_html)
date = parsed_html.findAll('p')[1].get_text()
return date
def get_conversation_group(conversations_group_html):
"returns the conversation group of the given html"
conversation_list = []
for conversation_html in get_conversations_html(conversations_group_html):
msgs = get_messages(conversation_html)
date = get_conversation_date(conversation_html)
# if "Mar 21 2012 23:23:21" in date:
# import pdb;pdb.set_trace()
# print "a"
# 2012-03-21 23:23:21
conversation = Conversation(date, msgs)
conversation_list.append(conversation)
conversation_group = ConversationGroup(conversation_list)
return conversation_group
def perc_done(done, total):
"the percentage done of all the conversations groups"
print "%.f" % (done / total * 100), "%"
def parse_html(path_to_html):
"parse the emails html and return them in python objects"
html = get_emails_html(path_to_html)
conversations_group_list_html = get_conversations_groups_html(html)
total = len(conversations_group_list_html)
done = 0.0
conversations_group_list = []
for conversations_group_html in conversations_group_list_html:
perc_done(done, total)
conversations_group_list.append(get_conversation_group(conversations_group_html))
done = done + 1
perc_done(done, total)
return conversations_group_list
| mit | -3,933,352,032,407,904,000 | 30.425806 | 89 | 0.674605 | false |
GeoMop/GeoMop | src/gm_base/model_data/export_con.py | 1 | 4070 | """Module for exporting the data structure to .con format.
.. codeauthor:: Tomas Krizek <[email protected]>
"""
INDENTATION = ' '
class Exporter:
"""Exporter from data structure to con files."""
def __init__(self):
"""Initialize the class."""
self.lines = ['']
self.indent_level = 0
def export_con(self, root):
"""Create .con text from a root data node.
:param DataNode root: the root of the data structure
:return: text representation of the structure in .con format
:rtype: str
"""
self.lines = ['']
self.indent_level = 0
self._create_node(root)
return '\n'.join(self.lines)
def _print_line(self, text):
"""Append the text as indented line to the buffer.
:param str text: a line of text without the EOL symbol
"""
self.lines.append(self.indent_level * INDENTATION + text)
def _print(self, text):
"""Append the text to the last line."""
self.lines[-1] = self.lines[-1] + text
def _print_new_line(self, indent_change=0):
"""Append new line with the appropriate indentation.
:param int indent_change: +1, 0 or -1 to increase, keep or decrease indentation
"""
self.indent_level += indent_change
self.lines.append(self.indent_level * INDENTATION)
def _create_mapping_node(self, node):
"""Create a mapping node."""
self._print('{')
self._print_new_line(1)
# check for type
if node.type is not None:
self._print('TYPE = "{type}",'.format(type=node.type.value))
self._print_new_line()
# print all keys
for child in node.children:
self._print(child.key.value + ' = ')
self._create_node(child)
self._print(',')
self._print_new_line()
self.lines.pop() # remove last (extra) line
self.lines[-1] = self.lines[-1][:-1] # remove , from end of line
self._print_new_line(-1)
self._print('}')
def _create_node(self, node):
"""Create a node based on its type.
:param DataNode node: node to be create in text
"""
if node.ref is not None:
path = node.ref.absolute_path
self._create_reference(path)
else:
if node.implementation == node.Implementation.mapping:
self._create_mapping_node(node)
elif node.implementation == node.Implementation.scalar:
self._create_scalar_node(node)
elif node.implementation == node.Implementation.sequence:
self._create_sequence_node(node)
def _create_scalar_node(self, node):
"""Create a text representation of scalar node.
:param DataNode node: node
"""
if isinstance(node.value, bool):
self._print('true' if node.value else 'false')
elif isinstance(node.value, int):
self._print(str(node.value))
elif isinstance(node.value, float):
self._print(str(node.value))
else:
self._print('"' + node.value + '"')
def _create_sequence_node(self, node):
"""Create a text representation of sequence node.
:param DataNode node: node
"""
self._print('[')
self._print_new_line(1)
# print all keys
for child in node.children:
self._create_node(child)
self._print(',')
self._print_new_line()
self.lines.pop() # remove last (extra) line
self.lines[-1] = self.lines[-1][:-1] # remove , from end of line
self._print_new_line(-1)
self._print(']')
def _create_reference(self, path):
"""Create a reference node with the given absolute path."""
self._print('{')
self._print_new_line(1)
self._print('REF = "{ref}"'.format(ref=path))
self._print_new_line(-1)
self._print('}')
_exporter = Exporter()
export_con = _exporter.export_con
| gpl-3.0 | -7,520,589,368,520,169,000 | 29.601504 | 87 | 0.561425 | false |
emencia/emencia-django-forum | forum/settings.py | 1 | 2607 | """
Default Forum settings to import/define in your project settings
"""
# Categories pagination in 'Category index' (=forum index) view
FORUM_CATEGORY_INDEX_PAGINATE = 6
# Threads pagination in 'Last threads' view
FORUM_LAST_THREAD_PAGINATE = 15
# Threads pagination in 'Category detail' view
FORUM_CATEGORY_THREAD_PAGINATE = 15
# Messages pagination in 'Thread detail' view
FORUM_THREAD_DETAIL_PAGINATE = 10
# If True message owner can edit its text, else only admin/moderate
FORUM_OWNER_MESSAGE_CAN_EDIT = True
# If True threadwatch checkbox is checked in thread create and post create forms
FORUM_DEFAULT_THREADWATCH_CHECKBOX = False
# Receiver function for signal when a new Post is created
FORUM_NEW_POST_SIGNAL = 'forum.signals.new_message_posted_receiver'
# Specific email sender address, if None. Use in the default new Post signal receiver
FORUM_EMAIL_SENDER = None
# Add new specific "rstview" parser settings for Forum app, if you have other apps
# that define parser settings this can lead to overwrite problems . In this
# case, just define all parser setting in 'RSTVIEW_PARSER_FILTER_SETTINGS' in
# the same settings file.
# WARNING: This should be removed, it must not be a default settings coming from forum
# Add it as a note in markup install doc
RSTVIEW_PARSER_FILTER_SETTINGS = {
'forum':{
'initial_header_level': 5,
'file_insertion_enabled': False,
'raw_enabled': False,
'footnote_references': 'superscript',
'doctitle_xform': False,
},
}
#
# Optionnal text markup settings
#
# Field helper for text in forms
FORUM_TEXT_FIELD_HELPER_PATH = None # Default, just a CharField
#FORUM_TEXT_FIELD_HELPER_PATH = "forum.markup.get_text_field" # Use DjangoCodeMirror
# Validator helper for Post.text in forms
FORUM_TEXT_VALIDATOR_HELPER_PATH = None # Default, no markup validation
#FORUM_TEXT_VALIDATOR_HELPER_PATH = "forum.markup.clean_restructuredtext" # Validation for RST syntax (with Rstview)
# Text markup renderer
FORUM_TEXT_MARKUP_RENDER_TEMPLATE = None # Default, just a CharField
#FORUM_TEXT_MARKUP_RENDER_TEMPLATE = "forum/markup/_text_markup_render.html" # Use Rstview renderer
# Template to init some Javascript for text in forms
FORUM_TEXT_FIELD_JS_TEMPLATE = None # Default, no JS template
#FORUM_TEXT_FIELD_JS_TEMPLATE = "forum/markup/_text_field_djangocodemirror_js.html" # Use DjangoCodeMirror
# Template to display author infos in thread's post list
FORUM_AUTHOR_VCARD_TEMPLATE = None # Default, only display the author username
#FORUM_AUTHOR_VCARD_TEMPLATE = "forum/author/_vcard.html" # Use Gravatar
| mit | -1,560,000,287,349,011,200 | 39.734375 | 116 | 0.754507 | false |
davy39/eric | Graphics/Ui_UMLSceneSizeDialog.py | 1 | 2749 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Graphics/UMLSceneSizeDialog.ui'
#
# Created: Tue Nov 18 17:53:58 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_UMLSceneSizeDialog(object):
def setupUi(self, UMLSceneSizeDialog):
UMLSceneSizeDialog.setObjectName("UMLSceneSizeDialog")
UMLSceneSizeDialog.resize(314, 103)
UMLSceneSizeDialog.setSizeGripEnabled(True)
self.gridlayout = QtWidgets.QGridLayout(UMLSceneSizeDialog)
self.gridlayout.setObjectName("gridlayout")
self.buttonBox = QtWidgets.QDialogButtonBox(UMLSceneSizeDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridlayout.addWidget(self.buttonBox, 2, 0, 1, 2)
self.textLabel2 = QtWidgets.QLabel(UMLSceneSizeDialog)
self.textLabel2.setObjectName("textLabel2")
self.gridlayout.addWidget(self.textLabel2, 1, 0, 1, 1)
self.textLabel1 = QtWidgets.QLabel(UMLSceneSizeDialog)
self.textLabel1.setObjectName("textLabel1")
self.gridlayout.addWidget(self.textLabel1, 0, 0, 1, 1)
self.heightSpinBox = QtWidgets.QSpinBox(UMLSceneSizeDialog)
self.heightSpinBox.setMinimum(100)
self.heightSpinBox.setMaximum(100000)
self.heightSpinBox.setObjectName("heightSpinBox")
self.gridlayout.addWidget(self.heightSpinBox, 1, 1, 1, 1)
self.widthSpinBox = QtWidgets.QSpinBox(UMLSceneSizeDialog)
self.widthSpinBox.setMinimum(100)
self.widthSpinBox.setMaximum(100000)
self.widthSpinBox.setObjectName("widthSpinBox")
self.gridlayout.addWidget(self.widthSpinBox, 0, 1, 1, 1)
self.retranslateUi(UMLSceneSizeDialog)
self.buttonBox.accepted.connect(UMLSceneSizeDialog.accept)
self.buttonBox.rejected.connect(UMLSceneSizeDialog.reject)
QtCore.QMetaObject.connectSlotsByName(UMLSceneSizeDialog)
def retranslateUi(self, UMLSceneSizeDialog):
_translate = QtCore.QCoreApplication.translate
UMLSceneSizeDialog.setWindowTitle(_translate("UMLSceneSizeDialog", "Set Size"))
self.textLabel2.setText(_translate("UMLSceneSizeDialog", "Height (in pixels):"))
self.textLabel1.setText(_translate("UMLSceneSizeDialog", "Width (in pixels):"))
self.heightSpinBox.setToolTip(_translate("UMLSceneSizeDialog", "Select the height of the diagram"))
self.widthSpinBox.setToolTip(_translate("UMLSceneSizeDialog", "Select the width of the diagram"))
| gpl-3.0 | -7,273,575,584,148,077,000 | 50.867925 | 107 | 0.731539 | false |
google/clif | clif/testing/derived_in_other_header/python/shared_unique_interop_test.py | 1 | 7351 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
from clif.testing.derived_in_other_header.python import concrete_base
from clif.testing.derived_in_other_header.python import concrete_derived
from clif.testing.derived_in_other_header.python import shared_unique_interop
from clif.testing.derived_in_other_header.python import virtual_derived
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.derived_in_other_header.python import concrete_base_pybind11
from clif.testing.derived_in_other_header.python import concrete_derived_pybind11
from clif.testing.derived_in_other_header.python import shared_unique_interop_pybind11
from clif.testing.derived_in_other_header.python import virtual_derived_pybind11
except ImportError:
concrete_base_pybind11 = None
concrete_derived_pybind11 = None
shared_unique_interop_pybind11 = None
virtual_derived_pybind11 = None
# pylint: enable=g-import-not-at-top
CONCRETE_BASE_EMPTY_GET_RESULT = 90146438
CONCRETE_DERIVED_EMPTY_GET_RESULT = 31607978
VIRTUAL_DERIVED_EMPTY_GET_RESULT = 29852452
class ConcreteTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.concrete_base = concrete_base
cls.concrete_derived = concrete_derived
cls.shared_unique_interop = shared_unique_interop
def testBaseAndDerived(self):
cbe = self.concrete_base.ConcreteBaseEmpty()
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
cde = self.concrete_derived.ConcreteDerivedEmpty()
self.assertEqual(cde.Get(), CONCRETE_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(
cde.BaseGet(cbe),
CONCRETE_BASE_EMPTY_GET_RESULT + CONCRETE_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(
cde.BaseGet(cde),
CONCRETE_BASE_EMPTY_GET_RESULT + CONCRETE_DERIVED_EMPTY_GET_RESULT)
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testUnableToDisownOriginalShared(self, use_custom_deleter):
cbe = self.shared_unique_interop.make_shared_concrete_derived_empty_up_cast(
use_custom_deleter)
with self.assertRaises(ValueError):
self.shared_unique_interop.pass_unique_concrete_base_empty(cbe)
def testPassUniqueConcreteBaseEmpty(self):
# b/175568410
cbe = (
self.shared_unique_interop.make_unique_concrete_derived_empty_up_cast())
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
cbe.Get()
def testOriginalUniqueNotDisownedByShared(self):
# b/175568410
cbe = (
self.shared_unique_interop.make_unique_concrete_derived_empty_up_cast())
i = self.shared_unique_interop.pass_shared_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
cbe.Get()
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testPassSharedConcreteBaseEmpty(self, use_custom_deleter):
cbe = self.shared_unique_interop.make_shared_concrete_derived_empty_up_cast(
use_custom_deleter)
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_shared_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
@absltest.skipIf(not concrete_base_pybind11, 'Failed to import pybind11 module')
class ConcretePybind11Test(ConcreteTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.concrete_base = concrete_base_pybind11
cls.concrete_derived = concrete_derived_pybind11
cls.shared_unique_interop = shared_unique_interop_pybind11
class VirtualTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.shared_unique_interop = shared_unique_interop
cls.virtual_derived = virtual_derived
def testBaseAndDerived(self):
vde = self.virtual_derived.VirtualDerivedEmpty()
self.assertEqual(vde.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(vde.BaseGet(vde), 2 * VIRTUAL_DERIVED_EMPTY_GET_RESULT)
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testUnableToDisownOriginalShared(self, use_custom_deleter):
vbe = self.shared_unique_interop.make_shared_virtual_derived_empty_up_cast(
use_custom_deleter)
with self.assertRaises(ValueError):
self.shared_unique_interop.pass_unique_virtual_base_empty(vbe)
def testPassUniqueVirtualBaseEmpty(self):
vbe = self.shared_unique_interop.make_unique_virtual_derived_empty_up_cast()
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
vbe.Get()
def testOriginalUniqueNotDisownedByShared(self):
vbe = self.shared_unique_interop.make_unique_virtual_derived_empty_up_cast()
i = self.shared_unique_interop.pass_shared_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
vbe.Get()
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testPassSharedVirtualBaseEmpty(self, use_custom_deleter):
vbe = self.shared_unique_interop.make_shared_virtual_derived_empty_up_cast(
use_custom_deleter)
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_shared_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
@absltest.skipIf(not virtual_derived_pybind11,
'Failed to import pybind11 module')
class VirtualPybind11Test(VirtualTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.shared_unique_interop = shared_unique_interop_pybind11
cls.virtual_derived = virtual_derived_pybind11
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -8,905,060,718,889,265,000 | 39.61326 | 88 | 0.744389 | false |
mozilla/mozilla-ignite | apps/challenges/migrations/0017_auto__add_field_phase_start_date__add_field_phase_end_date.py | 1 | 13053 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Phase.start_date'
db.add_column('challenges_phase', 'start_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 12, 13, 17, 6, 37, 831418)), keep_default=False)
# Adding field 'Phase.end_date'
db.add_column('challenges_phase', 'end_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 6, 13, 17, 6, 37, 831477)), keep_default=False)
def backwards(self, orm):
# Deleting field 'Phase.start_date'
db.delete_column('challenges_phase', 'start_date')
# Deleting field 'Phase.end_date'
db.delete_column('challenges_phase', 'end_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'challenges.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'challenges.challenge': {
'Meta': {'object_name': 'Challenge'},
'allow_voting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'challenges.externallink': {
'Meta': {'object_name': 'ExternalLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'challenges.phase': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('challenge', 'name'),)", 'object_name': 'Phase'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': "orm['challenges.Challenge']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 13, 17, 6, 37, 831477)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 12, 13, 17, 6, 37, 831418)'})
},
'challenges.submission': {
'Meta': {'ordering': "['-id']", 'object_name': 'Submission'},
'brief_description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['challenges.Category']", 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 12, 14, 1, 6, 37, 834376)'}),
'description': ('django.db.models.fields.TextField', [], {}),
'flagged_offensive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flagged_offensive_reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_winner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'sketh_note': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'allow_participation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_sub_projects': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects_following'", 'symmetrical': 'False', 'to': "orm['users.Profile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_project_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'sub_project_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['challenges']
| bsd-3-clause | -5,817,836,040,785,347,000 | 75.782353 | 185 | 0.553742 | false |
akellne/toolshed | plugins/ifi.py | 1 | 4495 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import datetime
import urllib2
import re
import json
from base import Plugin
#URL to the ifi news ics file
URL = "http://webhelper.informatik.uni-goettingen.de/editor/ical/ifinews.ics"
#dateformat used in ics files (date with and without time)
ICS_UTC="%Y%m%dT%H%M%SZ"
ICS_DATE="%Y%m%d"
#hours that needs to be shifted for correct times of ics files
TIME_SHIFT = datetime.timedelta(hours=2)
class IfINews(Plugin):
"""
class to parse the ics calendar of the IfI webpage
"""
NAME = "IfI News"
AUTHOR = "[email protected]"
VERSION = (0, 0, 1)
ENABLED = True
HELP = "!ifi shows the cureent ifi news"
CHANNELS = []
def __init__(
self, ircbot, cache_time=datetime.timedelta(hours=1),
random_message=[None, None]
):
Plugin.__init__(self, ircbot, cache_time, random_message)
def on_privmsg(self, msg, *params):
Plugin.on_privmsg(self, msg, *params)
if not self.is_in_channel(params[0]):
#plugin not available in the channel => return
return
if msg == "!ifi":
self.ircbot.switch_personality(nick="chiefsec")
#get data from cache
reload_data, self.data = self.load_cache()
if reload_data:
#reload the data, if too old
self.data = self._get_news()
self.save_cache(data=self.data)
else:
self.data = self.data.encode("utf-8")
message = "--- IfI News: ---\n"
if self.data:
message += self.data
else:
message += "there are currently no news!"
#finally, send the message with the
self.ircbot.privmsg(params[0], message)
self.ircbot.reset_personality()
def _get_news(self):
"""
load ifi news from ifi webpage's ics file
"""
#load url and parse it with simple regex
f = urllib2.urlopen(URL)
ics = f.read()
#parse ics data
li = []
for res in re.compile(
r'BEGIN:VEVENT(.*?)END:VEVENT', re.I|re.S
).findall(ics):
#parse every calendar item found
item = {}
for line in res.split("\n"):
if line.strip():
#replace stuff for all day events that use another format
for x in ("DTSTART", "DTEND"):
line = line.replace(
"%s;VALUE=DATE-TIME" % x,
"%s" % x
)
k, _, v = line.partition(":")
if k in ("SUMMARY", "DTSTART", "DTEND"):
if k == "SUMMARY":
item[k.lower()] = v.strip()
else:
try:
#try to parse date and time
item[k.lower()] = datetime.datetime.strptime(
v.strip(), ICS_UTC
) + TIME_SHIFT
item["onlydate"] = False
except Exception:
try:
#try to parse only date
item[k.lower()] = datetime.datetime.strptime(
v.strip(), ICS_DATE
)
item["onlydate"] = True
except Exception:
pass
li.append(item)
#build message
tmp = ""
for item in sorted(li, key=lambda item: item["dtstart"]):
if item["dtstart"] >= datetime.datetime.today():
if item["onlydate"] is False:
tmp += "%sh to %sh: %s\n" % (
item["dtstart"].strftime("%a %d. %b %Y, %H:%M"),
item["dtend"].strftime("%H:%M"),
item["summary"].replace("\\", "")
)
else:
tmp += "%sh %s\n" % (
item["dtstart"].strftime("%a %d. %b %Y"),
item["summary"].replace("\\", "")
)
return tmp.decode("latin-1").encode("utf-8")
| gpl-3.0 | -3,895,263,639,573,684,700 | 32.051471 | 81 | 0.441824 | false |
cherry-wb/SideTools | examples/mainwindows/sdi/sdi.py | 1 | 11026 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
#
# This file is part of the example classes of the Qt Toolkit.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file LICENSE.GPL included in the packaging of
# self file. Please review the following information to ensure GNU
# General Public Licensing requirements will be met:
# http://www.trolltech.com/products/qt/opensource.html
#
# If you are unsure which license is appropriate for your use, please
# review the following information:
# http://www.trolltech.com/products/qt/licensing.html or contact the
# sales department at [email protected].
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
#import sip
#sip.setapi('QVariant', 2)
from PySide import QtCore, QtGui
import sdi_rc
class MainWindow(QtGui.QMainWindow):
sequenceNumber = 1
windowList = []
def __init__(self, fileName=None):
super(MainWindow, self).__init__()
self.init()
if fileName:
self.loadFile(fileName)
else:
self.setCurrentFile('')
def closeEvent(self, event):
if self.maybeSave():
self.writeSettings()
event.accept()
else:
event.ignore()
def newFile(self):
other = MainWindow()
MainWindow.windowList.append(other)
other.move(self.x() + 40, self.y() + 40)
other.show()
def open(self):
fileName, filtr = QtGui.QFileDialog.getOpenFileName(self)
if fileName:
existing = self.findMainWindow(fileName)
if existing:
existing.show()
existing.raise_()
existing.activateWindow()
return
if self.isUntitled and self.textEdit.document().isEmpty() and not self.isWindowModified():
self.loadFile(fileName)
else:
other = MainWindow(fileName)
if other.isUntitled:
del other
return
MainWindow.windowList.append(other)
other.move(self.x() + 40, self.y() + 40)
other.show()
def save(self):
if self.isUntitled:
return self.saveAs()
else:
return self.saveFile(self.curFile)
def saveAs(self):
fileName, filtr = QtGui.QFileDialog.getSaveFileName(self, "Save As",
self.curFile)
if not fileName:
return False
return self.saveFile(fileName)
def about(self):
QtGui.QMessageBox.about(self, "About SDI",
"The <b>SDI</b> example demonstrates how to write single "
"document interface applications using Qt.")
def documentWasModified(self):
self.setWindowModified(True)
def init(self):
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.isUntitled = True
self.textEdit = QtGui.QTextEdit()
self.setCentralWidget(self.textEdit)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.readSettings()
self.textEdit.document().contentsChanged.connect(self.documentWasModified)
def createActions(self):
self.newAct = QtGui.QAction(QtGui.QIcon(':/images/new.png'), "&New",
self, shortcut=QtGui.QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QtGui.QAction(QtGui.QIcon(':/images/open.png'),
"&Open...", self, shortcut=QtGui.QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QtGui.QAction(QtGui.QIcon(':/images/save.png'),
"&Save", self, shortcut=QtGui.QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.saveAsAct = QtGui.QAction("Save &As...", self,
shortcut=QtGui.QKeySequence.SaveAs,
statusTip="Save the document under a new name",
triggered=self.saveAs)
self.closeAct = QtGui.QAction("&Close", self, shortcut="Ctrl+W",
statusTip="Close this window", triggered=self.close)
self.exitAct = QtGui.QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application",
triggered=QtGui.qApp.closeAllWindows)
self.cutAct = QtGui.QAction(QtGui.QIcon(':/images/cut.png'), "Cu&t",
self, enabled=False, shortcut=QtGui.QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.textEdit.cut)
self.copyAct = QtGui.QAction(QtGui.QIcon(':/images/copy.png'),
"&Copy", self, enabled=False, shortcut=QtGui.QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.textEdit.copy)
self.pasteAct = QtGui.QAction(QtGui.QIcon(':/images/paste.png'),
"&Paste", self, shortcut=QtGui.QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.textEdit.paste)
self.aboutAct = QtGui.QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=QtGui.qApp.aboutQt)
self.textEdit.copyAvailable.connect(self.cutAct.setEnabled)
self.textEdit.copyAvailable.connect(self.copyAct.setEnabled)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAsAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.closeAct)
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createToolBars(self):
self.fileToolBar = self.addToolBar("File")
self.fileToolBar.addAction(self.newAct)
self.fileToolBar.addAction(self.openAct)
self.fileToolBar.addAction(self.saveAct)
self.editToolBar = self.addToolBar("Edit")
self.editToolBar.addAction(self.cutAct)
self.editToolBar.addAction(self.copyAct)
self.editToolBar.addAction(self.pasteAct)
def createStatusBar(self):
self.statusBar().showMessage("Ready")
def readSettings(self):
settings = QtCore.QSettings('Trolltech', 'SDI Example')
pos = settings.value('pos', QtCore.QPoint(200, 200))
size = settings.value('size', QtCore.QSize(400, 400))
self.move(pos)
self.resize(size)
def writeSettings(self):
settings = QtCore.QSettings('Trolltech', 'SDI Example')
settings.setValue('pos', self.pos())
settings.setValue('size', self.size())
def maybeSave(self):
if self.textEdit.document().isModified():
ret = QtGui.QMessageBox.warning(self, "SDI",
"The document has been modified.\nDo you want to save "
"your changes?",
QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |
QtGui.QMessageBox.Cancel)
if ret == QtGui.QMessageBox.Save:
return self.save()
elif ret == QtGui.QMessageBox.Cancel:
return False
return True
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open( QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "SDI",
"Cannot read file %s:\n%s." % (fileName, file.errorString()))
return
instr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.textEdit.setPlainText(instr.readAll())
QtGui.QApplication.restoreOverrideCursor()
self.setCurrentFile(fileName)
self.statusBar().showMessage("File loaded", 2000)
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open( QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "SDI",
"Cannot write file %s:\n%s." % (fileName, file.errorString()))
return False
outstr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstr << self.textEdit.toPlainText()
QtGui.QApplication.restoreOverrideCursor()
self.setCurrentFile(fileName)
self.statusBar().showMessage("File saved", 2000)
return True
def setCurrentFile(self, fileName):
self.isUntitled = not fileName
if self.isUntitled:
self.curFile = "document%d.txt" % MainWindow.sequenceNumber
MainWindow.sequenceNumber += 1
else:
self.curFile = QtCore.QFileInfo(fileName).canonicalFilePath()
self.textEdit.document().setModified(False)
self.setWindowModified(False)
self.setWindowTitle("%s[*] - SDI" % self.strippedName(self.curFile))
def strippedName(self, fullFileName):
return QtCore.QFileInfo(fullFileName).fileName()
def findMainWindow(self, fileName):
canonicalFilePath = QtCore.QFileInfo(fileName).canonicalFilePath()
for widget in QtGui.qApp.topLevelWidgets():
if isinstance(widget, MainWindow) and widget.curFile == canonicalFilePath:
return widget
return None
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
| apache-2.0 | -1,914,550,978,136,689,000 | 35.376271 | 102 | 0.597497 | false |
theonion/django-bulbs | bulbs/reading_list/mixins.py | 1 | 8196 | """Logic for reading list behavior across all properties."""
from django.conf import settings
from django.core.cache import cache
from elasticsearch import TransportError
from elasticsearch_dsl import filter as es_filter
from bulbs.content.filters import NegateQueryFilter, SponsoredBoost
from bulbs.content.models import Content
from bulbs.content.search import randomize_es
from bulbs.sections.models import Section
from bulbs.special_coverage.models import SpecialCoverage
from .popular import get_popular_ids, popular_content
from .slicers import FirstSlotSlicer
class ReadingListMixin(object):
"""Mixin for Content-based objects to manage reading lists."""
def _get_reading_list_identifier(self):
# 1. Match content to sponsored Special Coverages
results = self.percolate_special_coverage(sponsored_only=True)
if results:
return results[0]
# 2."Popular" i.e., the content is one of the 25 most popular items.
popular_ids = get_popular_ids()
if popular_ids and self.id in popular_ids:
return "popular"
# 3. Any unsponsored special coverage reading list that contains this item.
results = self.percolate_special_coverage()
if results:
return results[0]
# 4. Any section that contains this item.
try:
results = Content.search_objects.client.percolate(
index=self.mapping.index,
doc_type=self.mapping.doc_type,
id=self.id,
body={"filter": es_filter.Prefix(_id="section").to_dict()}
)
except TransportError:
results = {"total": 0}
if results["total"] > 0:
for result in results["matches"]:
if not result["_id"].endswith("None"):
return result["_id"]
return "recent"
def validate_query(self, query):
"""Confirm query exists given common filters."""
if query is None:
return query
query = self.update_reading_list(query)
return query
def get_validated_augment_query(self, augment_query=None):
"""
Common rules for reading list augmentation hierarchy.
1. Sponsored Content.
2. Video Content.
"""
augment_query = self.validate_query(augment_query)
# Given an invalid query, reach for a Sponsored query.
if not augment_query:
augment_query = self.validate_query(Content.search_objects.sponsored())
# Given an invalid Sponsored query, reach for a Video query.
if not augment_query:
reading_list_config = getattr(settings, "READING_LIST_CONFIG", {})
excluded_channel_ids = reading_list_config.get("excluded_channel_ids", [])
augment_query = self.validate_query(Content.search_objects.evergreen_video(
excluded_channel_ids=excluded_channel_ids
))
return augment_query
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False):
"""Apply injected logic for slicing reading lists with additional content."""
primary_query = self.validate_query(primary_query)
augment_query = self.get_validated_augment_query(augment_query=augment_query)
try:
# We use this for cases like recent where queries are vague.
if reverse_negate:
primary_query = primary_query.filter(NegateQueryFilter(augment_query))
else:
augment_query = augment_query.filter(NegateQueryFilter(primary_query))
augment_query = randomize_es(augment_query)
return FirstSlotSlicer(primary_query, augment_query)
except TransportError:
return primary_query
def get_special_coverage_identifiers(self):
cache_key = "special-coverage-identifiers-{}".format(self.id)
identifiers = cache.get(cache_key)
if identifiers is None:
identifiers = self.percolate_special_coverage()
cache.set(cache_key, identifiers, 60 * 5)
return identifiers
def get_reading_list_identifier(self):
cache_key = "reading-list-identifier-{}".format(self.id)
identifier = cache.get(cache_key)
if identifier is None:
identifier = self._get_reading_list_identifier()
cache.set(cache_key, identifier, 60 * 5)
return identifier
def update_reading_list(self, reading_list):
"""Generic behaviors for reading lists before being rendered."""
# remove the current piece of content from the query.
reading_list = reading_list.filter(
~es_filter.Ids(values=[self.id])
)
# remove excluded document types from the query.
reading_list_config = getattr(settings, "READING_LIST_CONFIG", {})
excluded_doc_types = reading_list_config.get("excluded_doc_types", [])
for obj in excluded_doc_types:
reading_list = reading_list.filter(~es_filter.Type(value=obj))
return reading_list
def get_reading_list_context(self, **kwargs):
"""Returns the context dictionary for a given reading list."""
reading_list = None
context = {
"name": "",
"content": reading_list,
"targeting": {},
"videos": []
}
if self.reading_list_identifier == "popular":
reading_list = popular_content()
context.update({"name": self.reading_list_identifier})
# Popular is augmented.
reading_list = self.augment_reading_list(reading_list)
context.update({"content": reading_list})
return context
if self.reading_list_identifier.startswith("specialcoverage"):
special_coverage = SpecialCoverage.objects.get_by_identifier(
self.reading_list_identifier
)
reading_list = special_coverage.get_content().query(
SponsoredBoost(field_name="tunic_campaign_id")
).sort("_score", "-published")
context["targeting"]["dfp_specialcoverage"] = special_coverage.slug
if special_coverage.tunic_campaign_id:
context["tunic_campaign_id"] = special_coverage.tunic_campaign_id
context["targeting"].update({
"dfp_campaign_id": special_coverage.tunic_campaign_id
})
# We do not augment sponsored special coverage lists.
reading_list = self.update_reading_list(reading_list)
else:
reading_list = self.augment_reading_list(reading_list)
context.update({
"name": special_coverage.name,
"videos": special_coverage.videos,
"content": reading_list
})
return context
if self.reading_list_identifier.startswith("section"):
section = Section.objects.get_by_identifier(self.reading_list_identifier)
reading_list = section.get_content()
reading_list = self.augment_reading_list(reading_list)
context.update({
"name": section.name,
"content": reading_list
})
return context
reading_list = Content.search_objects.search()
reading_list = self.augment_reading_list(reading_list, reverse_negate=True)
context.update({
"name": "Recent News",
"content": reading_list
})
return context
def get_reading_list(self, published=True):
"""
This is currently a misnomer, as it actually returns a dictionary object.
The returned object contains the reading list.
"""
return self.get_reading_list_context(published=True)
@property
def reading_list_identifier(self):
_reading_list_identifier = getattr(self, "_reading_list_identifier", None)
if not _reading_list_identifier:
setattr(self, "_reading_list_identifier", self.get_reading_list_identifier())
return self._reading_list_identifier
| mit | 6,193,664,146,447,652,000 | 38.786408 | 92 | 0.618228 | false |
Zefiros-Software/Zefiros-Bot | bot/slack_clients.py | 1 | 8173 | import logging
import re
import time
import json
import traceback
import os
import raven
from slacker import Slacker
from slackclient import SlackClient
from asq.initiators import *
from phabricator import Phabricator
from functools import wraps
class memo:
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __get__(self, obj, objtype):
"""Support instance methods."""
import functools
return functools.partial(self.__call__, obj)
def __call__(self, *args, **kwds):
import pickle
str = pickle.dumps(args[1:], -1)+pickle.dumps(kwds, -1)
if not str in self.memo:
result = self.fn(*args, **kwds)
if result:
self.memo[str] = result
else:
return result
return self.memo[str]
logger = logging.getLogger(__name__)
class SlackClients(object):
def __init__(self, token, phabuser, phabtoken, phabhost):
#logger.debug('Starting client with token: {}, {}, {}, {}'.format(token, phabuser, phabtoken, phabhost))
self.token = token
# Slacker is a Slack Web API Client
self.web = Slacker(token)
# SlackClient is a Slack Websocket RTM API Client
self.rtm = SlackClient(token)
sentryKey = str(os.getenv("SENTRY_KEY", "")).strip()
sentrySecret = str(os.getenv("SENTRY_SECRET", "")).strip()
sentryUrl = str(os.getenv("SENTRY_URL", "")).strip()
self.sentry = raven.Client( 'https://{}:{}@{}'.format(sentryKey, sentrySecret, sentryUrl) )
self.phab = Phabricator(username=phabuser, token=phabtoken, host=phabhost)
self.phab.update_interfaces()
self.priorityTranslations = {100: 'Unbreak Now!', 90: 'Needs Triage', 80: 'High', 50: 'Normal', 25: 'Low', 0: 'Wishlist'}
def queryPhabFeed(self, after):
return self.phab.feed.query( after=after, view="text" )
def queryPhabManiphest(self, owners, **kwargs):
return self.phab.maniphest.query(ownerPHIDs=owners, order="order-priority", status="status-open", limit=5, **kwargs)
def findPhabIssue(self, issue):
return self.phab.maniphest.info( task_id=issue )
def createPhabTask(self, title, creators, projects):
auxiliary = { 'std:maniphest:zefiros:isFromSlack': True, 'std:maniphest:zefiros:creators': creators }
if projects:
return self.phab.maniphest.createtask( title=title ,ccPHIDs=creators, projectPHIDs=projects, auxiliary=auxiliary)
return self.phab.maniphest.createtask( title=title, ccPHIDs=creators, auxiliary=auxiliary)
def setPhabTask(self, task, **kwargs):
return self.phab.maniphest.update( phid=task, **kwargs )
def getPriorityLabelByValue(self, priorityValue):
return self.priorityTranslations.get( priorityValue )
def findPhabIssueByPHID(self, issuePHID):
issues = self.phab.maniphest.query( phids=[issuePHID] )
if len( issues ) == 0:
return None
return issues[issuePHID]
def findPhabRepository(self, repositoryPHID):
return query( list( self.phab.phid.lookup( names=[repositoryPHID] ).items() ) ).where( lambda tuple: repositoryPHID in tuple[0] ).select( lambda tuple: tuple[1] ).to_list()[0]
def findPhabRepositoryInfo( self, repositoryPhid):
return query( list(self.phab.diffusion.repository.search( phids=[repositoryPhid] )["data"].items() ) ).select( lambda repo: repo["fields"] ).to_list()[0]
def findPhabCommits( self, commitPHIDs):
commits = self.phab.diffusion.querycommits( phids=commitPHIDs )["data"]
if len( commits ) == 0:
return None
return query( list( commits.items() ) ).select( lambda tuple: tuple[1] ).to_list()
def findWorkboardColumn(self, columnPHID):
return query( list( self.phab.phid.lookup( names=[columnPHID] ).items() ) ).where( lambda tuple: columnPHID in tuple[0] ).select( lambda tuple: tuple[1] ).to_list()[0]
@memo
def findPhabUsers(self, userIds):
return self.phab.user.query( phids=userIds )
@memo
def findPhabProject(self, names):
projects = self.phab.project.query(names=names)
if len( projects ) == 0:
return None
return projects
@memo
def findPhabProjectsByPHID(self, phids):
projects = self.phab.project.query(phids=phids)['data']
if len( projects ) == 0:
return None
return list( projects.values() )
@memo
def findSlackUsersToPhab(self, userNames):
userNames = self.findSlackUserNames( userNames )
users = self.phab.user.query(usernames=userNames)
return query(users).select(lambda u: u['phid'] ).to_list()
@memo
def findSlackUserNames(self, userNames):
userList = json.loads(self.rtm.server.api_call( "users.list", presence=0 ))
return query(userList['members']).where(lambda im: "email" in im['profile'] and im['profile']['email'] is not None) \
.where(lambda im: im['name'] in userNames) \
.select(lambda im: im['profile']['email'].split('@')[0]).to_list()
def findManiphestTransactions(self, taskId, transactionPHIDs):
transactions = self.phab.maniphest.gettasktransactions( ids=[taskId] )
return query(transactions["{}".format(taskId)]).where(lambda t: t['transactionType'] != 'core:customfield' and t['transactionType'] != 'core:edge' and t['transactionType'] != 'core:subscribers' and any(t['transactionPHID'] in transactionPHID for transactionPHID in transactionPHIDs)).to_list()
@memo
def findPhabUsersToSlack(self, userIds):
users = self.findPhabUsers(userIds)
users = self.findUsers( query(users).select(lambda u: u['userName'] ).to_list() )
if users:
return query(users).select(lambda u: u['id'] ).to_list()
else:
return None
@memo
def findUsers(self, users):
userList = json.loads(self.rtm.server.api_call( "users.list", presence=0 ))
users = query(userList['members']).where(lambda im: "email" in im['profile'] and im['profile']['email'] is not None).where(lambda im: im['profile']['email'].split('@')[0] in users).to_list()
if len( users ) == 0:
return None
return users
@memo
def findChannelId(self, names):
channelsList = json.loads(self.rtm.server.api_call( "channels.list", exclude_archived=1 ))
channels = query(channelsList['channels']).where(lambda im: im['name'].lower() in names.lower()).select(lambda u: u['id'] ).to_list()
if len( channels ) == 0:
return None
return channels
def botUserId(self):
return self.rtm.server.login_data['self']['id']
def isMessageFromMe(self, user):
return user == self.rtm.server.login_data['self']['id']
@memo
def isDirectMessage(self, channel):
imList = json.loads(self.rtm.server.api_call( "im.list" ))
return len(query(imList['ims']).where(lambda im: im['id'] == channel).to_list()) > 0
@memo
def isBot(self, user):
userInf = json.loads(self.rtm.server.api_call( "users.info", user=user ))
return "is_bot" in userInf['user'] and userInf['user']['is_bot']
def isBotMention(self, message):
botUserName = self.rtm.server.login_data['self']['id']
if re.search("@{}".format(botUserName), message):
return True
else:
return False
def sendUserTypingPause(self, channelId, sleepTime=3.0):
userTypingJson = {"type": "typing", "channel": channelId}
self.rtm.server.send_to_websocket(userTypingJson)
time.sleep(sleepTime)
def logExceptionToSlack(self, e):
logger.exception( e )
self.sentry.captureException()
| mit | 7,156,433,444,986,988,000 | 38.261084 | 301 | 0.608345 | false |
Wilsh/goldwarsplus | commerce/models.py | 1 | 13576 | from django.db import models
from django.utils import timezone
from urllib.request import urlretrieve
import hashlib
import os
from django.conf import settings
from math import ceil
# Create your models here.
class Item(models.Model):
'''All items discovered by players in the game'''
item_id = models.PositiveIntegerField(primary_key=True)
chat_link = models.CharField(max_length=120, default='')
name = models.CharField(max_length=200, default='[no name provided]')
icon = models.ForeignKey('Icon', on_delete=models.CASCADE)
description = models.TextField(default='No description provided')
type = models.CharField(max_length=20, default='')
rarity = models.CharField(max_length=10, default='')
level = models.PositiveSmallIntegerField(default=0)
vendor_value = models.PositiveIntegerField(default=0)
seen_on_trading_post = models.BooleanField(default=False)
can_be_crafted = models.BooleanField(default=False)
can_purchase_from_vendor = models.BooleanField(default=False)
vendor_price = models.PositiveIntegerField(default=0)
date_added = models.DateTimeField()
class Meta:
ordering = ["-date_added"]
def __str__(self):
return "Item " + str(self.item_id) + ": " + self.name
def add_details(self, itemdetails):
self.item_id = itemdetails['id']
self.chat_link = itemdetails['chat_link']
if itemdetails['name'] != '':
self.name = itemdetails['name']
try:
if itemdetails['description'] != '':
self.description = itemdetails['description']
except KeyError:
pass
self.type = itemdetails['type']
self.rarity = itemdetails['rarity']
self.level = itemdetails['level']
self.vendor_value = itemdetails['vendor_value']
self.date_added = timezone.now()
def get_market_buy(self, quantity=1):
'''Return the cost of the quantity of this item if bought on the trading post'''
sell_orders = self.selllisting_set.all().order_by('unit_price')
total = 0
count = 0
for order in sell_orders:
if (order.quantity + count) < quantity:
count += order.quantity
total += order.quantity * order.unit_price
else:
total += (quantity - count) * order.unit_price
return total
#quantity not available
return 0
def get_market_sell(self):
'''Return the value of this item if sold immediately on the trading post'''
buy_order = self.buylisting_set.order_by('-unit_price').first()
return buy_order.unit_price if buy_order else 0
def get_market_delay_sell(self):
'''Return the value of this item if sold one copper below the lowest current
selling price on the trading post. Returns 0 if none of these items are listed'''
sell_order = self.selllisting_set.order_by('unit_price').first()
return sell_order.unit_price - 1 if sell_order else 0
def buy_or_craft(self, quantity=1):
'''Return the cheapest method to obtain this Item as a nested list of
Items designated as 'buy' or 'craft' depending upon whether it is cheaper
to buy that Item on the trading post or craft the Item after buying its
base components'''
purchase_price = self.get_market_buy(quantity)
if purchase_price == 0: #not available
purchase_price = 9999999999
if not self.can_be_crafted:
return ['buy', purchase_price, quantity, [self.item_id, self.name]]
recipe_id_list = []
recipe_name_list = []
cheapest_recipe_idx = 0
ingredient_list = []
crafting_price = 0
num_recipes = 0
for recipe in self.recipe_set.all():
ingredient_sublist = []
recipe_id_list.append(recipe.recipe_id)
recipe_name_list.append([recipe.output_item_id, recipe.output_item_id.name])
for ingredient in recipe.recipeingredient_set.all():
should_buy = ingredient.item_id.buy_or_craft(ceil(ingredient.count / recipe.output_item_count))
if should_buy[0] == 'buy':
cost_multiplier = 1
else:
cost_multiplier = ceil(ingredient.count / recipe.output_item_count)
ingredient_sublist.append([should_buy, cost_multiplier])
ingredient_list.append(ingredient_sublist)
num_recipes += 1
if num_recipes > 1:
ingredient_list, cheapest_recipe_idx, crafting_price = self.get_cheapest_recipe(ingredient_list)
else:
ingredient_list = ingredient_list[0]
for ingredient, count in ingredient_list:
crafting_price += self.get_component_cost(ingredient, count)
if crafting_price < purchase_price:
return ['craft', crafting_price, quantity, ingredient_list, recipe_name_list[cheapest_recipe_idx], recipe_id_list[cheapest_recipe_idx]]
else:
return ['buy', purchase_price, quantity, [self.item_id, self.name]]
def get_cheapest_recipe(self, recipe_list):
'''Given a list of lists of ingredients for multiple Recipes, return
the list of Recipe ingredients that are the cheapest to obtain along
with the index of the recipe_list containing the cheapest ingredients
and the total cost of those ingredients.
Intended for Items that can be crafted by more than one Recipe'''
cheapest_idx = 0
current_idx = 0
cheapest_price = 9999999999
for ingredient_list in recipe_list:
crafting_price = 0
for ingredient, count in ingredient_list:
crafting_price += self.get_component_cost(ingredient, count)
if crafting_price < cheapest_price:
cheapest_price = crafting_price
cheapest_idx = current_idx
current_idx += 1
return (recipe_list[cheapest_idx], cheapest_idx, cheapest_price)
def get_component_cost(self, list, num_items):
'''Return the cost of an Item in a list instantiated by buy_or_craft'''
cost = 0
if list[0] == 'buy' or list[0] == 'craft':
cost = list[1] * num_items
return cost
class ItemFlag(models.Model):
'''Flags applying to an Item'''
for_item = models.OneToOneField('Item', on_delete=models.CASCADE)
AccountBindOnUse = models.BooleanField(default=False)
AccountBound = models.BooleanField(default=False)
HideSuffix = models.BooleanField(default=False)
MonsterOnly = models.BooleanField(default=False)
NoMysticForge = models.BooleanField(default=False)
NoSalvage = models.BooleanField(default=False)
NoSell = models.BooleanField(default=False)
NotUpgradeable = models.BooleanField(default=False)
NoUnderwater = models.BooleanField(default=False)
SoulbindOnAcquire = models.BooleanField(default=False)
SoulBindOnUse = models.BooleanField(default=False)
Unique = models.BooleanField(default=False)
class Meta:
ordering = ["for_item"]
def __str__(self):
return "Flags for item " + str(self.for_item.item_id) + ": " + self.for_item.name
def add_details(self, flaglist):
for entry in flaglist:
setattr(self, entry, True)
class EconomicsForItem(models.Model):
'''Economic data applying to an Item that can be found on the trading post'''
for_item = models.OneToOneField('Item', on_delete=models.CASCADE)
price_change_count = models.PositiveIntegerField(default=0)
relist_profit = models.PositiveIntegerField(default=0)
def __str__(self):
return "Economic data for Item " + str(self.for_item.item_id) + ": " + self.for_item.name
class Icon(models.Model):
'''Icons used for Items'''
url = models.CharField(primary_key=True, max_length=120)
static_id = models.CharField(max_length=36, default='unknown.png')
def __str__(self):
return "Icon for Items " + self.static_id
def add_details(self):
self.static_id = hashlib.md5(self.url.encode('utf-8')).hexdigest()
self.static_id += '.png'
urlretrieve(self.url, os.path.join(settings.BASE_DIR, 'commerce/static/commerce/items/') + self.static_id)
class Recipe(models.Model):
'''All recipes for craftable Items discovered by
players in the game'''
recipe_id = models.PositiveIntegerField(primary_key=True)
type = models.CharField(max_length=30, default='')
output_item_id = models.ForeignKey('Item', on_delete=models.CASCADE)
output_item_count = models.PositiveSmallIntegerField(default=0)
min_rating = models.PositiveSmallIntegerField(default=0)
AutoLearned = models.BooleanField(default=False)
LearnedFromItem = models.BooleanField(default=False)
date_added = models.DateTimeField()
class Meta:
ordering = ["-date_added"]
def __str__(self):
return "Recipe for item " + str(self.output_item_id.item_id) + ": " + self.output_item_id.name
def add_details(self, recipedetails):
self.recipe_id = recipedetails['id']
self.type = recipedetails['type']
self.output_item_count = recipedetails['output_item_count']
self.min_rating = recipedetails['min_rating']
for entry in recipedetails['flags']:
setattr(self, entry, True)
self.date_added = timezone.now()
class EconomicsForRecipe(models.Model):
'''Economic data applying to a Recipe'''
for_recipe = models.OneToOneField('Recipe', on_delete=models.CASCADE)
limited_production = models.BooleanField(default=False)
ingredient_cost = models.PositiveIntegerField(default=0)
fast_crafting_profit = models.IntegerField(default=0)
delayed_crafting_profit = models.IntegerField(default=0)
def __str__(self):
return "Economic data for Recipe " + str(self.for_recipe.recipe_id) + ": " + self.for_recipe.output_item_id.name
class RecipeDiscipline(models.Model):
'''Discipline flags applying to a Recipe'''
for_recipe = models.OneToOneField('Recipe', on_delete=models.CASCADE)
Artificer = models.BooleanField(default=False)
Armorsmith = models.BooleanField(default=False)
Chef = models.BooleanField(default=False)
Huntsman = models.BooleanField(default=False)
Jeweler = models.BooleanField(default=False)
Leatherworker = models.BooleanField(default=False)
Tailor = models.BooleanField(default=False)
Weaponsmith = models.BooleanField(default=False)
Scribe = models.BooleanField(default=False)
class Meta:
ordering = ["for_recipe"]
def __str__(self):
return "Disciplines for recipe " + str(self.for_recipe.recipe_id) + ": " + self.for_recipe.output_item_id.name
def add_details(self, disciplines):
for entry in disciplines:
setattr(self, entry, True)
def get_disciplines(self):
disciplines = []
disciplines.append(['Artificer', self.Artificer])
disciplines.append(['Armorsmith', self.Armorsmith])
disciplines.append(['Chef', self.Chef])
disciplines.append(['Huntsman', self.Huntsman])
disciplines.append(['Jeweler', self.Jeweler])
disciplines.append(['Leatherworker', self.Leatherworker])
disciplines.append(['Tailor', self.Tailor])
disciplines.append(['Weaponsmith', self.Weaponsmith])
disciplines.append(['Scribe', self.Scribe])
return disciplines
class RecipeIngredient(models.Model):
'''An Item and its quantity required for a Recipe'''
for_recipe = models.ForeignKey('Recipe', on_delete=models.CASCADE)
item_id = models.ForeignKey('Item', on_delete=models.CASCADE)
count = models.PositiveSmallIntegerField()
class Meta:
ordering = ["for_recipe"]
def __str__(self):
return "Ingredient for recipe " + str(self.for_recipe.recipe_id) + ": " + self.for_recipe.output_item_id.name
def add_details(self, ingredient):
self.count = ingredient['count']
class BuyListing(models.Model):
'''A buy order for an Item listed on the trading post'''
for_item = models.ForeignKey('Item', on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
unit_price = models.PositiveIntegerField()
date_added = models.DateTimeField()
class Meta:
ordering = ["-unit_price"]
def __str__(self):
return "Buy order for item " + str(self.for_item.item_id) + ": " + self.for_item.name + " at price: " + str(self.unit_price)
def add_details(self, listing):
self.quantity = listing['quantity']
self.unit_price = listing['unit_price']
self.date_added = timezone.now()
class SellListing(models.Model):
'''A sell order for an Item listed on the trading post'''
for_item = models.ForeignKey('Item', on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
unit_price = models.PositiveIntegerField()
date_added = models.DateTimeField()
class Meta:
ordering = ["unit_price"]
def __str__(self):
return "Sell order for item " + str(self.for_item.item_id) + ": " + self.for_item.name + " at price: " + str(self.unit_price)
def add_details(self, listing):
self.quantity = listing['quantity']
self.unit_price = listing['unit_price']
self.date_added = timezone.now()
| mit | -482,726,655,842,744,700 | 42.373802 | 147 | 0.648129 | false |
marscher/mdtraj | MDTraj/utils/unit/quantity.py | 1 | 27812 | #!/bin/env python
"""
Module simtk.unit.quantity
Physical quantities with units, intended to produce similar functionality
to Boost.Units package in C++ (but with a runtime cost).
Uses similar API as Scientific.Physics.PhysicalQuantities
but different internals to satisfy our local requirements.
In particular, there is no underlying set of 'canonical' base
units, whereas in Scientific.Physics.PhysicalQuantities all
units are secretly in terms of SI units. Also, it is easier
to add new fundamental dimensions to simtk.dimensions. You
might want to make new dimensions for, say, "currency" or
"information".
Some features of this implementation:
* Quantities are a combination of a value and a unit. The value
part can be any python type, including numbers, lists, numpy
arrays, and anything else. The unit part must be a simtk.unit.Unit.
* Operations like adding incompatible units raises an error.
* Multiplying or dividing units/quantities creates new units.
* Users can create new Units and Dimensions, but most of the useful
ones are predefined.
* Conversion factors between units are applied transitively, so all
possible conversions are available.
* I want dimensioned Quantities that are compatible with numpy arrays,
but do not necessarily require the python numpy package. In other
words, Quantities can be based on either numpy arrays or on built in
python types.
* Units are NOT necessarily stored in terms of SI units internally.
This is very important for me, because one important application
area for us is at the molecular scale. Using SI units internally
can lead to exponent overflow in commonly used molecular force
calculations. Internally, all unit systems are equally fundamental
in SimTK.
Two possible enhancements that have not been implemented are
1) Include uncertainties with propagation of errors
2) Incorporate offsets for celsius <-> kelvin conversion
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2012 Stanford University and the Authors.
Authors: Christopher M. Bruns
Contributors: Peter Eastman
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
__author__ = "Christopher M. Bruns"
__version__ = "0.5"
import math
import copy
from .standard_dimensions import *
from .unit import Unit, is_unit, dimensionless
class Quantity(object):
"""Physical quantity, such as 1.3 meters per second.
Quantities contain both a value, such as 1.3; and a unit,
such as 'meters per second'.
Supported value types include:
1 - numbers (float, int, long)
2 - lists of numbers, e.g. [1,2,3]
3 - tuples of numbers, e.g. (1,2,3)
Note - unit conversions will cause tuples to be converted to lists
4 - lists of tuples of numbers, lists of lists of ... etc. of numbers
5 - numpy.arrays
Create numpy.arrays with units using the Quantity constructor, not the
multiply operator. e.g.
Quantity(numpy.array([1,2,3]), centimeters) # correct
*NOT*
numpy.array([1,2,3]) * centimeters # won't work
because numpy.arrays already overload the multiply operator for EVERYTHING.
"""
def __init__(self, value=None, unit=None):
"""
Create a new Quantity from a value and a unit.
Parameters
- value: (any type, usually a number) Measure of this quantity
- unit: (Unit) the physical unit, e.g. simtk.unit.meters.
"""
# When no unit is specified, bend over backwards to handle all one-argument possibilities
if unit == None: # one argument version, copied from UList
if is_unit(value):
# Unit argument creates an empty list with that unit attached
unit = value
value = []
elif is_quantity(value):
# Ulist of a Quantity is just the Quantity itself
unit = value.unit
value = value._value
elif _is_string(value):
unit = dimensionless
else:
# Is value a container?
is_container = True
try:
i = iter(value)
except TypeError:
is_container = False
if is_container:
if len(value) < 1:
unit = dimensionless
else:
first_item = iter(value).next()
# Avoid infinite recursion for string, because a one-character
# string is its own first element
if value == first_item:
unit = dimensionless
else:
unit = Quantity(first_item).unit
# Notice that tuples, lists, and numpy.arrays can all be initialized with a list
new_container = Quantity([], unit)
for item in value:
new_container.append(Quantity(item)) # Strips off units into list new_container._value
# __class__ trick does not work for numpy.arrays
try:
import numpy
if isinstance(value, numpy.ndarray):
value = numpy.array(new_container._value)
else:
# delegate contruction to container class from list
value = value.__class__(new_container._value)
except ImportError:
# delegate contruction to container class from list
value = value.__class__(new_container._value)
else:
# Non-Quantity, non container
# Wrap in a dimensionless Quantity
unit = dimensionless
# Accept simple scalar quantities as units
if is_quantity(unit):
value = value * unit._value
unit = unit.unit
# Use empty list for unspecified values
if value == None:
value = []
self._value = value
self.unit = unit
def __getstate__(self):
state = dict()
state['_value'] = self._value
state['unit'] = self.unit
return state
def __setstate__(self, state):
self._value = state['_value']
self.unit = state['unit']
return
def __copy__(self):
"""
Shallow copy produces a new Quantity with the shallow copy of value and the same unit.
Because we want copy operations to work just the same way they would on the underlying value.
"""
return Quantity(copy.copy(self._value), self.unit)
def __deepcopy__(self, memo):
"""
Deep copy produces a new Quantity with a deep copy of the value, and the same unit.
Because we want copy operations to work just the same way they would on the underlying value.
"""
return Quantity(copy.deepcopy(self._value, memo), self.unit)
def __getattr__(self, attribute):
"""
Delegate unrecognized attribute calls to the underlying value type.
"""
ret_val = getattr(self._value, attribute)
return ret_val
def __str__(self):
"""Printable string version of this Quantity.
Returns a string consisting of quantity number followed by unit abbreviation.
"""
return str(self._value) + ' ' + str(self.unit.get_symbol())
def __repr__(self):
"""
"""
return (Quantity.__name__ + '(value=' + repr(self._value) + ', unit=' +
str(self.unit) + ')')
def format(self, format_spec):
return format_spec % self._value + ' ' + str(self.unit.get_symbol())
def __add__(self, other):
"""Add two Quantities.
Only Quantities with the same dimensions (e.g. length)
can be added. Raises TypeError otherwise.
Parameters
- self: left hand member of sum
- other: right hand member of sum
Returns a new Quantity that is the sum of the two arguments.
"""
# can only add using like units
if not self.unit.is_compatible(other.unit):
raise TypeError('Cannot add two quantities with incompatible units "%s" and "%s".' % (self.unit, other.unit))
value = self._value + other.value_in_unit(self.unit)
unit = self.unit
return Quantity(value, unit)
def __sub__(self, other):
"""Subtract two Quantities.
Only Quantities with the same dimensions (e.g. length)
can be subtracted. Raises TypeError otherwise.
Parameters
- self: left hand member (a) of a - b.
- other: right hand member (b) of a - b.
Returns a new Quantity that is the difference of the two arguments.
"""
if not self.unit.is_compatible(other.unit):
raise TypeError('Cannot subtract two quantities with incompatible units "%s" and "%s".' % (self.unit, other.unit))
value = self._value - other.value_in_unit(self.unit)
unit = self.unit
return Quantity(value, unit)
def __eq__(self, other):
"""
"""
if not is_quantity(other):
return False
if not self.unit.is_compatible(other.unit):
return False
return self.value_in_unit(other.unit) == other._value
def __ne__(self, other):
"""
"""
return not self.__eq__(other)
def __lt__(self, other):
"""Compares two quantities.
Raises TypeError if the Quantities are of different dimension (e.g. length vs. mass)
Returns True if self < other, False otherwise.
"""
return self._value < other.value_in_unit(self.unit)
def __ge__(self, other):
return self._value >= (other.value_in_unit(self.unit))
def __gt__(self, other):
return self._value > (other.value_in_unit(self.unit))
def __le__(self, other):
return self._value <= (other.value_in_unit(self.unit))
def __lt__(self, other):
return self._value < (other.value_in_unit(self.unit))
_reduce_cache = {}
def reduce_unit(self, guide_unit=None):
"""
Combine similar component units and scale, to form an
equal Quantity in simpler units.
Returns underlying value type if unit is dimensionless.
"""
key = (self.unit, guide_unit)
if key in Quantity._reduce_cache:
(unit, value_factor) = Quantity._reduce_cache[key]
else:
value_factor = 1.0
canonical_units = {} # dict of dimensionTuple: (Base/ScaledUnit, exponent)
# Bias result toward guide units
if guide_unit != None:
for u, exponent in guide_unit.iter_base_or_scaled_units():
d = u.get_dimension_tuple()
if d not in canonical_units:
canonical_units[d] = [u, 0]
for u, exponent in self.unit.iter_base_or_scaled_units():
d = u.get_dimension_tuple()
# Take first unit found in a dimension as canonical
if d not in canonical_units:
canonical_units[d] = [u, exponent]
else:
value_factor *= (u.conversion_factor_to(canonical_units[d][0])**exponent)
canonical_units[d][1] += exponent
new_base_units = {}
for d in canonical_units:
u, exponent = canonical_units[d]
if exponent != 0:
assert u not in new_base_units
new_base_units[u] = exponent
# Create new unit
if len(new_base_units) == 0:
unit = dimensionless
else:
unit = Unit(new_base_units)
# There might be a factor due to unit conversion, even though unit is dimensionless
# e.g. suppose unit is meter/centimeter
if unit.is_dimensionless():
unit_factor = unit.conversion_factor_to(dimensionless)
if unit_factor != 1.0:
value_factor *= unit_factor
# print "value_factor = %s" % value_factor
unit = dimensionless
Quantity._reduce_cache[key] = (unit, value_factor)
# Create Quantity, then scale (in case value is a container)
# That's why we don't just scale the value.
result = Quantity(self._value, unit)
if value_factor != 1.0:
# __mul__ strips off dimensionless, if appropriate
result = result * value_factor
if unit.is_dimensionless():
assert unit is dimensionless # should have been set earlier in this method
if is_quantity(result):
result = result._value
return result
def __mul__(self, other):
"""Multiply a quantity by another object
Returns a new Quantity that is the product of the self * other,
unless the resulting unit is dimensionless, in which case the
underlying value type is returned, instead of a Quantity.
"""
if is_unit(other):
# print "quantity * unit"
# Many other mul/div operations delegate to here because I was debugging
# a dimensionless unit conversion problem, which I ended up fixing within
# the reduce_unit() method.
unit = self.unit * other
return Quantity(self._value, unit).reduce_unit(self.unit)
elif is_quantity(other):
# print "quantity * quantity"
# Situations where the units cancel can result in scale factors from the unit cancellation.
# To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit
return (self * other._value) * other.unit
else:
# print "quantity * scalar"
return self._change_units_with_factor(self.unit, other, post_multiply=False)
# value type might not be commutative for multiplication
def __rmul__(self, other):
"""Multiply a scalar by a Quantity
Returns a new Quantity with the same units as self, but with the value
multiplied by other.
"""
if is_unit(other):
raise NotImplementedError('programmer is surprised __rmul__ was called instead of __mul__')
# print "R unit * quantity"
elif is_quantity(other):
# print "R quantity * quantity"
raise NotImplementedError('programmer is surprised __rmul__ was called instead of __mul__')
else:
# print "scalar * quantity"
return self._change_units_with_factor(self.unit, other, post_multiply=True)
# return Quantity(other * self._value, self.unit)
def __truediv__(self, other):
"""Divide a Quantity by another object
Returns a new Quantity, unless the resulting unit type is dimensionless,
in which case the underlying value type is returned.
"""
if is_unit(other):
# print "quantity / unit"
return self * pow(other, -1.0)
# unit = self.unit / other
# return Quantity(self._value, unit).reduce_unit(self.unit)
elif is_quantity(other):
# print "quantity / quantity"
# Delegate quantity/quantity to (quantity/scalar)/unit
return (self/other._value) / other.unit
else:
# print "quantity / scalar"
return self * pow(other, -1.0)
# return Quantity(self._value / other, self.unit)
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide a scalar by a quantity.
Returns a new Quantity. The resulting units are the inverse of the self argument units.
"""
if is_unit(other):
# print "R unit / quantity"
raise NotImplementedError('programmer is surprised __rtruediv__ was called instead of __truediv__')
elif is_quantity(other):
raise NotImplementedError('programmer is surprised __rtruediv__ was called instead of __truediv__')
else:
# print "R scalar / quantity"
return other * pow(self, -1.0)
# return Quantity(other / self._value, pow(self.unit, -1.0))
__rdiv__ = __rtruediv__
def __pow__(self, exponent):
"""Raise a Quantity to a power.
Generally both the value and the unit of the Quantity are affected by this operation.
Returns a new Quantity equal to self**exponent.
"""
return Quantity(pow(self._value, exponent), pow(self.unit, exponent))
def sqrt(self):
"""
Returns square root of a Quantity.
Raises ArithmeticError if component exponents are not even.
This behavior can be changed if you present a reasonable real life case to me.
"""
# There might be a conversion factor from taking the square root of the unit
new_value = math.sqrt(self._value)
new_unit = self.unit.sqrt()
unit_factor = self.unit.conversion_factor_to(new_unit*new_unit)
if unit_factor != 1.0:
new_value *= math.sqrt(unit_factor)
return Quantity(value=new_value, unit=new_unit)
def __abs__(self):
"""
Return absolute value of a Quantity.
The unit is unchanged. A negative value of self will result in a positive value
in the result.
"""
return Quantity(abs(self._value), self.unit)
def __pos__(self):
"""
Returns a reference to self.
"""
return Quantity(+(self._value), self.unit)
def __neg__(self):
"""Negate a Quantity.
Returns a new Quantity with a different sign on the value.
"""
return Quantity(-(self._value), self.unit)
def __nonzero__(self):
"""Returns True if value underlying Quantity is zero, False otherwise.
"""
return bool(self._value)
def __complex__(self):
return Quantity(complex(self._value), self.unit)
def __float__(self):
return Quantity(float(self._value), self.unit)
def __int__(self):
return Quantity(int(self._value), self.unit)
def __long__(self):
return Quantity(int(self._value), self.unit)
def value_in_unit(self, unit):
"""
Returns underlying value, in the specified units.
"""
val = self.in_units_of(unit)
if is_quantity(val):
return val._value
else: # naked dimensionless
return val
def value_in_unit_system(self, system):
"""
Returns the underlying value type, after conversion to a particular unit system.
"""
result = self.in_unit_system(system)
if is_quantity(result):
return result._value
else:
return result # dimensionless
def in_unit_system(self, system):
"""
Returns a new Quantity equal to this one, expressed in a particular unit system.
"""
new_units = system.express_unit(self.unit)
f = self.unit.conversion_factor_to(new_units)
return self._change_units_with_factor(new_units, f)
def in_units_of(self, other_unit):
"""
Returns an equal Quantity expressed in different units.
If the units are the same as those in self, a reference to self is returned.
Raises a TypeError if the new unit is not compatible with the original unit.
The post_multiply argument is used in case the multiplication operation is not commutative.
i.e. result = factor * value when post_multiply is False
and result = value * factor when post_multiply is True
"""
if not self.unit.is_compatible(other_unit):
raise TypeError('Unit "%s" is not compatible with Unit "%s".' % (self.unit, other_unit))
f = self.unit.conversion_factor_to(other_unit)
return self._change_units_with_factor(other_unit, f)
def _change_units_with_factor(self, new_unit, factor, post_multiply=True):
# numpy arrays cannot be compared with 1.0, so just "try"
factor_is_identity = False
try:
if (factor == 1.0):
factor_is_identity = True
except ValueError:
pass
if factor_is_identity:
# No multiplication required
if (self.unit is new_unit):
result = self
else:
result = Quantity(self._value, new_unit)
else:
try:
# multiply operator, if it exists, is preferred
if post_multiply:
value = self._value * factor # works for number, numpy.array, or vec3, e.g.
else:
value = factor * self._value # works for number, numpy.array, or vec3, e.g.
result = Quantity(value, new_unit)
except TypeError:
# list * float fails with TypeError
# Presumably a list type
# deep copy
value = self._value[:] # deep copy
# convert tuple to list
try:
value[0] = value[0] # tuple is immutable
except TypeError:
# convert immutable tuple to list
value = []
for i in self._value:
value.append(i)
result = Quantity(self._scale_sequence(value, factor, post_multiply), new_unit)
if (new_unit.is_dimensionless()):
return result._value
else:
return result
def _scale_sequence(self, value, factor, post_multiply):
try:
if post_multiply:
if isinstance(self._value, tuple):
value = tuple([x*factor for x in value])
else:
for i in range(len(value)):
value[i] = value[i]*factor
else:
if isinstance(self._value, tuple):
value = tuple([factor*x for x in value])
else:
for i in range(len(value)):
value[i] = factor*value[i]
except TypeError as ex:
for i in range(len(value)):
value[i] = self._scale_sequence(value[i], factor, post_multiply)
return value
####################################
### Sequence methods of Quantity ###
### in case value is a sequence ###
####################################
def __len__(self):
"""
Return size of internal value type.
"""
return len(self._value)
def __getitem__(self, key):
"""
Keep the same units on contained elements.
"""
assert not is_quantity(self._value[key])
return Quantity(self._value[key], self.unit)
def __setitem__(self, key, value):
# Delegate slices to one-at-a time ___setitem___
if isinstance(key, slice): # slice
indices = key.indices(len(self))
for i in range(*indices):
self[i] = value[i]
else: # single index
# Check unit compatibility
if self.unit.is_dimensionless() and is_dimensionless(value):
pass # OK
elif not self.unit.is_compatible(value.unit):
raise TypeError('Unit "%s" is not compatible with Unit "%s".' % (self.unit, value.unit))
self._value[key] = value / self.unit
assert not is_quantity(self._value[key])
def __delitem__(self, key):
del(self._value[key])
def __contains__(self, item):
return self._value.__contains__(item.value_in_unit(self.unit))
def __iter__(self):
for item in self._value:
yield Quantity(item, self.unit)
def count(self, item):
return self._value.count(item.value_in_unit(self.unit))
def index(self, item):
return self._value.index(item.value_in_unit(self.unit))
def append(self, item):
if is_quantity(item):
return self._value.append(item.value_in_unit(self.unit))
elif is_dimensionless(self.unit):
return self._value.append(item)
else:
raise TypeError("Cannot append item without units into list with units")
def extend(self, rhs):
self._value.extend(rhs.value_in_unit(self.unit))
def insert(self, index, item):
self._value.insert(index, item.value_in_unit(self.unit))
def remove(self, item):
self._value.remove(item)
def pop(self, *args):
return self._value.pop(*args) * self.unit
# list.reverse will automatically delegate correctly
# list.sort with no arguments will delegate correctly
# list.sort with a comparison function cannot be done correctly
def is_quantity(x):
"""
Returns True if x is a Quantity, False otherwise.
"""
return isinstance(x, Quantity)
def is_dimensionless(x):
"""
"""
if is_unit(x):
return x.is_dimensionless()
elif is_quantity(x):
return x.unit.is_dimensionless()
else:
# everything else in the universe is dimensionless
return True
# Strings can cause trouble
# as can any container that has infinite levels of containment
def _is_string(x):
# step 1) String is always a container
# and its contents are themselves containers.
if isinstance(x, str):
return True
try:
first_item = iter(x).next()
inner_item = iter(first_item).next()
if first_item is inner_item:
return True
else:
return False
except TypeError:
return False
except StopIteration:
return False
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
doctest.testmod(sys.modules[__name__])
| lgpl-2.1 | 5,665,115,278,954,907,000 | 38.282486 | 126 | 0.580469 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.