code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def login(access_code:str, client_id:str=CLIENT_ID, client_secret:str=CLIENT_SECRET, headers:dict=HEADERS, redirect_uri:str=REDIRECT_URI): if not CLIENT_ID or not CLIENT_SECRET: raise GoogleApiError({"error_message": _("Login with google account is disabled. Contact " "with the sysadmins. Maybe they're snoozing in a " "secret hideout of the data center.")}) url = _build_url("login", "access-token") params={"code": access_code, "client_id": client_id, "client_secret": client_secret, "grant_type": "authorization_code", "redirect_uri": redirect_uri} data = _post(url, params=params, headers=headers) return AuthInfo(access_token=data.get("access_token", None))
Get access_token fron an user authorized code, the client id and the client secret key. (See https://developer.google.com/v3/oauth/#web-application-flow).
def get_user_profile(headers:dict=HEADERS): url = _build_url("user", "profile") data = _get(url, headers=headers) return User(id=data.get("id", None), username=data.get("name", None).get("givenName", None) + data.get("name", None).get("familyName", None), full_name=(data.get("displayName", None) or ""), email=(data.get("emails", None)[0].get("value", None) or ""), bio=(data.get("bio", None) or ""))
Get authenticated user info. (See https://developer.google.com/v3/users/#get-the-authenticated-user).
def get_user_emails(headers:dict=HEADERS) -> list: url = _build_url("user", "emails") data = _get(url, headers=headers) return [Email(email=e.get("email", None), is_primary=e.get("primary", False)) for e in data]
Get a list with all emails of the authenticated user. (See https://developer.google.com/v3/users/emails/#list-email-addresses-for-a-user).
def me(access_code:str) -> tuple: auth_info = login(access_code) headers = HEADERS.copy() headers["Authorization"] = "Bearer {}".format(auth_info.access_token) user = get_user_profile(headers=headers) # emails = get_user_emails(headers=headers) # primary_email = next(filter(lambda x: x.is_primary, emails)) # return primary_email.email, user return user
Connect to a google account and get all personal info (profile and the primary email).
def controls(self, move): '''Returns a set of attacked/defended squares''' to_move = self.board.turn analysis_board = chess.Board(self.board.fen()) analysis_board.push(move) squares = 0 for square in chess.SQUARES: if move.to_square in analysis_board.attackers(to_move, square): squares |= chess.BB_SQUARES[square] return SquareSet(squaresf controls(self, move): '''Returns a set of attacked/defended squares''' to_move = self.board.turn analysis_board = chess.Board(self.board.fen()) analysis_board.push(move) squares = 0 for square in chess.SQUARES: if move.to_square in analysis_board.attackers(to_move, square): squares |= chess.BB_SQUARES[square] return SquareSet(squares)
Returns a set of attacked/defended squares
def dims_knight(self, move): '''Knight on the rim is dim''' if self.board.piece_type_at(move.from_square) == chess.KNIGHT: rim = SquareSet( chess.BB_RANK_1 | \ chess.BB_RANK_8 | \ chess.BB_FILE_A | \ chess.BB_FILE_H) return move.to_square in rif dims_knight(self, move): '''Knight on the rim is dim''' if self.board.piece_type_at(move.from_square) == chess.KNIGHT: rim = SquareSet( chess.BB_RANK_1 | \ chess.BB_RANK_8 | \ chess.BB_FILE_A | \ chess.BB_FILE_H) return move.to_square in rim
Knight on the rim is dim
def _create_mapping(grammar): # type: (Grammar) -> (Dict[int, Set[Type[Rule]]], Dict[int, Set[Type[Rule]]]) termmap = dict() rulemap = dict() for r in grammar.rules: if len(r.right) == 1: # rule to terminal h = hash(r.toSymbol) if h not in termmap: termmap[h] = set() termmap[h].add(r) else: # rule to two nonterms key = hash(tuple(r.right)) if key not in rulemap: rulemap[key] = set() rulemap[key].add(r) return (termmap, rulemap)
Create mapping between symbols and rules rewritable to these symbols. :param grammar: Grammar to use. :return: Tuple of two dictionaries. First dictionary maps rule to terminal hash. Second dictionary maps rule to nonterminals hash.
def perform_request_vote(cmt_id, client_ip_address, value, uid=-1): cmt_id = wash_url_argument(cmt_id, 'int') client_ip_address = wash_url_argument(client_ip_address, 'str') value = wash_url_argument(value, 'int') uid = wash_url_argument(uid, 'int') if cmt_id > 0 and value in [-1, 1] and check_user_can_vote(cmt_id, client_ip_address, uid): action_date = convert_datestruct_to_datetext(time.localtime()) action_code = CFG_WEBCOMMENT_ACTION_CODE['VOTE'] # FIXME compatibility with postgresql query = """INSERT INTO "cmtACTIONHISTORY" ("id_cmtRECORDCOMMENT", id_bibrec, id_user, client_host, action_time, action_code) VALUES (%s, NULL ,%s, inet_aton(%s), %s, %s)""" params = (cmt_id, uid, client_ip_address, action_date, action_code) run_sql(query, params) return query_record_useful_review(cmt_id, value) else: return 0
Vote positively or negatively for a comment/review :param cmt_id: review id :param value: +1 for voting positively -1 for voting negatively :return: integer 1 if successful, integer 0 if not
def check_user_can_comment(recID, client_ip_address, uid=-1): recID = wash_url_argument(recID, 'int') client_ip_address = wash_url_argument(client_ip_address, 'str') uid = wash_url_argument(uid, 'int') max_action_time = time.time() - \ CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS max_action_time = convert_datestruct_to_datetext( time.localtime(max_action_time)) action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_COMMENT'] query = """SELECT id_bibrec FROM "cmtACTIONHISTORY" WHERE id_bibrec=%s AND action_code=%s AND action_time>%s """ params = (recID, action_code, max_action_time) if uid < 0: query += " AND client_host=inet_aton(%s)" params += (client_ip_address,) else: query += " AND id_user=%s" params += (uid,) res = run_sql(query, params) return len(res) == 0
Check if a user hasn't already commented within the last seconds time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS :param recID: record id :param client_ip_address: IP => use: str(req.remote_ip) :param uid: user id, as given by invenio.legacy.webuser.getUid(req)
def check_user_can_review(recID, client_ip_address, uid=-1): action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_REVIEW'] query = """SELECT id_bibrec FROM "cmtACTIONHISTORY" WHERE id_bibrec=%s AND action_code=%s """ params = (recID, action_code) if uid < 0: query += " AND client_host=inet_aton(%s)" params += (client_ip_address,) else: query += " AND id_user=%s" params += (uid,) res = run_sql(query, params) return len(res) == 0
Check if a user hasn't already reviewed within the last seconds time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_REVIEWS_IN_SECONDS :param recID: record ID :param client_ip_address: IP => use: str(req.remote_ip) :param uid: user id, as given by invenio.legacy.webuser.getUid(req)
def check_user_can_vote(cmt_id, client_ip_address, uid=-1): cmt_id = wash_url_argument(cmt_id, 'int') client_ip_address = wash_url_argument(client_ip_address, 'str') uid = wash_url_argument(uid, 'int') query = """SELECT "id_cmtRECORDCOMMENT" FROM "cmtACTIONHISTORY" WHERE "id_cmtRECORDCOMMENT"=%s""" params = (cmt_id,) if uid < 0: query += " AND client_host=inet_aton(%s)" params += (client_ip_address,) else: query += " AND id_user=%s" params += (uid, ) res = run_sql(query, params) return (len(res) == 0)
Checks if a user hasn't already voted :param cmt_id: comment id :param client_ip_address: IP => use: str(req.remote_ip) :param uid: user id, as given by invenio.legacy.webuser.getUid(req)
def get_comment_collection(cmt_id): query = """SELECT id_bibrec FROM "cmtRECORDCOMMENT" WHERE id=%s""" recid = run_sql(query, (cmt_id,)) record_primary_collection = guess_primary_collection_of_a_record( recid[0][0]) return record_primary_collection
Extract the collection where the comment is written
def get_collection_moderators(collection): from invenio_access.engine import acc_get_authorized_emails res = list( acc_get_authorized_emails( 'moderatecomments', collection=collection)) if not res: return [CFG_WEBCOMMENT_DEFAULT_MODERATOR, ] return res
Return the list of comment moderators for the given collection.
def query_get_user_contact_info(uid): # FIXME compatibility with postgresql query1 = """SELECT nickname, email, """ + \ datetime_format('last_login') + \ """ FROM "user" WHERE id=%s""" params1 = (uid,) res1 = run_sql(query1, params1) if res1: return res1[0] else: return ()
Get the user contact information :return: tuple (nickname, email, last_login), if none found return () Note: for the moment, if no nickname, will return email address up to the ':'
def query_get_user_reports_and_votes(uid): query1 = """SELECT nb_votes_yes, nb_votes_total, nb_abuse_reports FROM "cmtRECORDCOMMENT" WHERE id_user=%s""" params1 = (uid,) res1 = run_sql(query1, params1) if len(res1) == 0: return () nb_votes_yes = nb_votes_total = nb_abuse_reports = 0 for cmt_tuple in res1: nb_votes_yes += int(cmt_tuple[0]) nb_votes_total += int(cmt_tuple[1]) nb_abuse_reports += int(cmt_tuple[2]) return (nb_abuse_reports, nb_votes_yes, nb_votes_total)
Retrieve total number of reports and votes of a particular user :param uid: user id :return: tuple (total_nb_reports, total_nb_votes_yes, total_nb_votes_total) if none found return ()
def query_get_comment(comID): query1 = """SELECT id, id_bibrec, id_user, body, """ + \ datetime_format('date_creation') + ', ' \ """ star_score, nb_votes_yes, nb_votes_total, title, nb_abuse_reports, round_name, restriction FROM "cmtRECORDCOMMENT" WHERE id=%s""" params1 = (comID,) res1 = run_sql(query1, params1) if len(res1) > 0: return res1[0] else: return ()
Get all fields of a comment :param comID: comment id :return: tuple (comID, id_bibrec, id_user, body, date_creation, star_score, nb_votes_yes, nb_votes_total, title, nb_abuse_reports, round_name, restriction) if none found return ()
def query_record_report_this(comID): # retrieve nb_abuse_reports query1 = """SELECT nb_abuse_reports FROM "cmtRECORDCOMMENT" WHERE id=%s""" params1 = (comID,) res1 = run_sql(query1, params1) if len(res1) == 0: return (-2, 0) #increment and update nb_abuse_reports = int(res1[0][0]) + 1 query2 = """UPDATE "cmtRECORDCOMMENT" SET nb_abuse_reports=%s WHERE id=%s""" params2 = (nb_abuse_reports, comID) res2 = run_sql(query2, params2) return (int(res2), nb_abuse_reports)
Increment the number of reports for a comment :param comID: comment id :return: tuple (success, new_total_nb_reports_for_this_comment) where success is integer 1 if success, integer 0 if not, -2 if comment does not exist
def query_record_useful_review(comID, value): # retrieve nb_useful votes query1 = """SELECT nb_votes_total, nb_votes_yes FROM "cmtRECORDCOMMENT" WHERE id=%s""" params1 = (comID,) res1 = run_sql(query1, params1) if len(res1) == 0: return 0 # modify and insert new nb_useful votes nb_votes_yes = int(res1[0][1]) if value >= 1: nb_votes_yes = int(res1[0][1]) + 1 nb_votes_total = int(res1[0][0]) + 1 query2 = """UPDATE "cmtRECORDCOMMENT" SET nb_votes_total=%s, nb_votes_yes=%s WHERE id=%s""" params2 = (nb_votes_total, nb_votes_yes, comID) res2 = run_sql(query2, params2) return int(res2)
private funciton Adjust the number of useful votes and number of total votes for a comment. :param comID: comment id :param value: +1 or -1 :return: integer 1 if successful, integer 0 if not
def get_comment_ancestors(comID, depth=None): if depth == 0: return [] res = run_sql( """SELECT "in_reply_to_id_cmtRECORDCOMMENT" FROM "cmtRECORDCOMMENT" WHERE id=%s""", (comID, )) if res: parent_comID = res[0][0] if parent_comID == 0: return [] parent_ancestors = [] if depth: depth -= 1 parent_ancestors = get_comment_ancestors(parent_comID, depth) parent_ancestors.append(parent_comID) return parent_ancestors else: return []
Returns the list of ancestors of the given comment, ordered from oldest to newest ("top-down": direct parent of comID is at last position), up to given depth :param comID: the ID of the comment for which we want to retrieve ancestors :type comID: int :param depth: the maximum of levels up from the given comment we want to retrieve ancestors. None for no limit, 1 for direct parent only, etc. :type depth: int :return the list of ancestors :rtype: list
def get_reply_order_cache_data(comid): return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256), chr((comid >> 8) % 256), chr(comid % 256))
Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL.
def move_attached_files_to_storage(attached_files, recID, comid): for filename, filepath in iteritems(attached_files): dest_dir = os.path.join(CFG_COMMENTSDIR, str(recID), str(comid)) try: os.makedirs(dest_dir) except: # Dir most probably already existed pass shutil.move(filepath, os.path.join(dest_dir, filename))
Move the files that were just attached to a new comment to their final location. :param attached_files: the mappings of desired filename to attach and path where to find the original file :type attached_files: dict {filename, filepath} :param recID: the record ID to which we attach the files :param comid: the comment ID to which we attach the files
def get_attached_files(recid, comid): base_dir = os.path.join(CFG_COMMENTSDIR, str(recid), str(comid)) if os.path.isdir(base_dir): filenames = os.listdir(base_dir) return [ ( filename, os.path.join( CFG_COMMENTSDIR, str(recid), str(comid), filename), CFG_SITE_URL + '/' + CFG_SITE_RECORD + '/' + str(recid) + '/comments/attachments/get/' + str(comid) + '/' + filename) for filename in filenames] else: return []
Returns a list with tuples (filename, filepath, fileurl) :param recid: the recid to which the comment belong :param comid: the commment id for which we want to retrieve files
def subscribe_user_to_discussion(recID, uid): query = """INSERT INTO "cmtSUBSCRIPTION" (id_bibrec, id_user, creation_time) VALUES (%s, %s, %s)""" params = (recID, uid, convert_datestruct_to_datetext(time.localtime())) try: run_sql(query, params) except: return 0 return 1
Subscribe a user to a discussion, so the she receives by emails all new new comments for this record. :param recID: record ID corresponding to the discussion we want to subscribe the user :param uid: user id
def unsubscribe_user_from_discussion(recID, uid): query = """DELETE FROM "cmtSUBSCRIPTION" WHERE id_bibrec=%s AND id_user=%s""" params = (recID, uid) try: res = run_sql(query, params) except: return 0 if res > 0: return 1 return 0
Unsubscribe users from a discussion. :param recID: record ID corresponding to the discussion we want to unsubscribe the user :param uid: user id :return 1 if successful, 0 if not
def get_user_subscription_to_discussion(recID, uid): user_email = User.query.get(uid).email (emails1, emails2) = get_users_subscribed_to_discussion( recID, check_authorizations=False) if user_email in emails1: return 1 elif user_email in emails2: return 2 else: return 0
Returns the type of subscription for the given user to this discussion. This does not check authorizations (for eg. if user was subscribed, but is suddenly no longer authorized). :param recID: record ID :param uid: user id :return: - 0 if user is not subscribed to discussion - 1 if user is subscribed, and is allowed to unsubscribe - 2 if user is subscribed, but cannot unsubscribe
def get_record_status(recid): collections_with_rounds = CFG_WEBCOMMENT_ROUND_DATAFIELD.keys() commenting_round = "" for collection in collections_with_rounds: # Find the first collection defines rounds field for this # record if recid in get_collection_reclist(collection): commenting_rounds = get_fieldvalues( recid, CFG_WEBCOMMENT_ROUND_DATAFIELD.get( collection, "")) if commenting_rounds: commenting_round = commenting_rounds[0] break collections_with_restrictions = CFG_WEBCOMMENT_RESTRICTION_DATAFIELD.keys() restriction = "" for collection in collections_with_restrictions: # Find the first collection that defines restriction field for # this record if recid in get_collection_reclist(collection): restrictions = get_fieldvalues( recid, CFG_WEBCOMMENT_RESTRICTION_DATAFIELD.get( collection, "")) if restrictions: restriction = restrictions[0] break return (restriction, commenting_round)
Returns the current status of the record, i.e. current restriction to apply for newly submitted comments, and current commenting round. The restriction to apply can be found in the record metadata, in field(s) defined by config CFG_WEBCOMMENT_RESTRICTION_DATAFIELD. The restriction is empty string "" in cases where the restriction has not explicitely been set, even if the record itself is restricted. :param recid: the record id :type recid: int :return tuple(restriction, round_name), where 'restriction' is empty string when no restriction applies :rtype (string, int)
def group_comments_by_round(comments, ranking=0): comment_rounds = {} ordered_comment_round_names = [] for comment in comments: comment_round_name = ranking and comment[11] or comment[7] if comment_round_name not in comment_rounds: comment_rounds[comment_round_name] = [] ordered_comment_round_names.append(comment_round_name) comment_rounds[comment_round_name].append(comment) return [(comment_round_name, comment_rounds[comment_round_name]) for comment_round_name in ordered_comment_round_names]
Group comments by the round to which they belong
def calculate_avg_score(res): c_star_score = 6 avg_score = 0.0 nb_reviews = 0 for comment in res: if comment[c_star_score] > 0: avg_score += comment[c_star_score] nb_reviews += 1 if nb_reviews == 0: return 0.0 avg_score = avg_score / nb_reviews avg_score_unit = avg_score - math.floor(avg_score) if avg_score_unit < 0.25: avg_score = math.floor(avg_score) elif avg_score_unit > 0.75: avg_score = math.floor(avg_score) + 1 else: avg_score = math.floor(avg_score) + 0.5 if avg_score > 5: avg_score = 5.0 return avg_score
private function Calculate the avg score of reviews present in res :param res: tuple of tuple returned from query_retrieve_comments_or_remarks :return: a float of the average score rounded to the closest 0.5
def check_int_arg_is_in_range(value, name, gte_value, lte_value=None): if not isinstance(value, int): try: raise InvenioWebCommentError('%s is not a number.' % value) except InvenioWebCommentError as exc: register_exception() body = webcomment_templates.tmpl_error(exc.message) return body #errors.append(('ERR_WEBCOMMENT_ARGUMENT_NAN', value)) return 0 if value < gte_value: try: raise InvenioWebCommentError('%s invalid argument.' % value) except InvenioWebCommentError as exc: register_exception() body = webcomment_templates.tmpl_error(exc.message) return body #errors.append(('ERR_WEBCOMMENT_ARGUMENT_INVALID', value)) return 0 if lte_value: if value > lte_value: try: raise InvenioWebCommentError('%s invalid argument.' % value) except InvenioWebCommentError as exc: register_exception() body = webcomment_templates.tmpl_error(exc.message) return body #errors.append(('ERR_WEBCOMMENT_ARGUMENT_INVALID', value)) return 0 return 1
Check that variable with name 'name' >= gte_value and optionally <= lte_value :param value: variable value :param name: variable name :param errors: list of error tuples (error_id, value) :param gte_value: greater than or equal to value :param lte_value: less than or equal to value :return: boolean (1=true, 0=false)
def get_mini_reviews(recid, ln=CFG_SITE_LANG): if CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS: action = 'SUBMIT' else: action = 'DISPLAY' reviews = query_retrieve_comments_or_remarks(recid, ranking=1) return webcomment_templates.tmpl_mini_review( recid, ln, action=action, avg_score=calculate_avg_score(reviews), nb_comments_total=len(reviews))
Returns the web controls to add reviews to a record from the detailed record pages mini-panel. :param recid: the id of the displayed record :param ln: the user's language
def check_user_can_view_comments(user_info, recid): # Check user can view the record itself first (auth_code, auth_msg) = check_user_can_view_record(user_info, recid) if auth_code: return (auth_code, auth_msg) # Check if user can view the comments # But first can we find an authorization for this case action, # for this collection? record_primary_collection = guess_primary_collection_of_a_record(recid) return acc_authorize_action( user_info, 'viewcomment', authorized_if_no_roles=True, collection=record_primary_collection)
Check if the user is authorized to view comments for given recid. Returns the same type as acc_authorize_action
def check_user_can_view_comment(user_info, comid, restriction=None): if restriction is None: comment = query_get_comment(comid) if comment: restriction = comment[11] else: return (1, 'Comment %i does not exist' % comid) if restriction == "": return (0, '') return acc_authorize_action( user_info, 'viewrestrcomment', status=restriction)
Check if the user is authorized to view a particular comment, given the comment restriction. Note that this function does not check if the record itself is restricted to the user, which would mean that the user should not see the comment. You can omit 'comid' if you already know the 'restriction' :param user_info: the user info object :param comid: the comment id of that we want to check :param restriction: the restriction applied to given comment (if known. Otherwise retrieved automatically) :return: the same type as acc_authorize_action
def check_user_can_send_comments(user_info, recid): # First can we find an authorization for this case, action + collection record_primary_collection = guess_primary_collection_of_a_record(recid) return acc_authorize_action( user_info, 'sendcomment', authorized_if_no_roles=True, collection=record_primary_collection)
Check if the user is authorized to comment the given recid. This function does not check that user can view the record or view the comments Returns the same type as acc_authorize_action
def check_comment_belongs_to_record(comid, recid): query = """SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s""" params = (comid,) res = run_sql(query, params) if res and res[0][0] == recid: return True return False
Return True if the comment is indeed part of given record (even if comment or/and record have been "deleted"). Else return False. :param comid: the id of the comment to check membership :param recid: the recid of the record we want to check if comment belongs to
def check_user_can_attach_file_to_comments(user_info, recid): # First can we find an authorization for this case action, for # this collection? record_primary_collection = guess_primary_collection_of_a_record(recid) return acc_authorize_action( user_info, 'attachcommentfile', authorized_if_no_roles=False, collection=record_primary_collection)
Check if the user is authorized to attach a file to comments for given recid. This function does not check that user can view the comments or send comments. Returns the same type as acc_authorize_action
def toggle_comment_visibility(uid, comid, collapse, recid): # We rely on the client to tell if comment should be collapsed or # developed, to ensure consistency between our internal state and # client state. Even if not strictly necessary, we store the # record ID for quicker retrieval of the collapsed comments of a # given discussion page. To prevent unnecessary population of the # table, only one distinct tuple (record ID, comment ID, user ID) # can be inserted (due to table definition). For the same purpose # we also check that comment to collapse exists, and corresponds # to an existing record: we cannot rely on the recid found as part # of the URL, as no former check is done. This rule is not applied # when deleting an entry, as in the worst case no line would be # removed. For optimized retrieval of row to delete, the id_bibrec # column is used, though not strictly necessary. if collapse: query = """SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s""" params = (comid,) res = run_sql(query, params) if res: query = """INSERT INTO "cmtCOLLAPSED" (id_bibrec, "id_cmtRECORDCOMMENT", id_user) VALUES (%s, %s, %s)""" params = (res[0][0], comid, uid) run_sql(query, params) return True else: query = """DELETE FROM "cmtCOLLAPSED" WHERE "id_cmtRECORDCOMMENT"=%s and id_user=%s and id_bibrec=%s""" params = (comid, uid, recid) run_sql(query, params) return False
Toggle the visibility of the given comment (collapse) for the given user. Return the new visibility :param uid: the user id for which the change applies :param comid: the comment id to close/open :param collapse: if the comment is to be closed (1) or opened (0) :param recid: the record id to which the comment belongs :return: if the comment is visible or not after the update
def get_user_collapsed_comments_for_record(uid, recid): # Collapsed state is not an attribute of cmtRECORDCOMMENT table # (vary per user) so it cannot be found when querying for the # comment. We must therefore provide a efficient way to retrieve # the collapsed state for a given discussion page and user. query = """SELECT "id_cmtRECORDCOMMENT" from "cmtCOLLAPSED" WHERE id_user=%s and id_bibrec=%s""" params = (uid, recid) return [res[0] for res in run_sql(query, params)]
Get the comments collapsed for given user on given recid page
def is_comment_deleted(comid): query = """SELECT status from "cmtRECORDCOMMENT" WHERE id=%s""" params = (comid,) res = run_sql(query, params) if res and res[0][0] != 'ok': return True return False
Return True of the comment is deleted. Else False :param comid: ID of comment to check
def _fix_time(self, dt): if dt.tzinfo is not None: dt = dt.replace(tzinfo=None) return dt
Stackdistiller converts all times to utc. We store timestamps as utc datetime. However, the explicit UTC timezone on incoming datetimes causes comparison issues deep in sqlalchemy. We fix this by converting all datetimes to naive utc timestamps
def strip_leading_comments(text): # Look for the longest leading string of spaces and tabs common to # all lines. margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Current line and previous winner have no common whitespace: # there is no margin. else: margin = "" break # sanity check (testing/debugging only) if 0 and margin: for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text
Strips the leading whitespaces and % from the given text. Adapted from textwrap.dedent
def find(name, app=None, components=None, raw=False): if components is None: if app is None: from flask import current_app as app components = app.config.get('COMPONENTS', []) items = [] for key in components: # Attempt to import the component and access the specified name # as an attribute. module = import_module(key) item = getattr(module, name, None) if item is None: # Attempt to import a module or package in the component # with the specified name. try: item = import_module('.'.join((key, name))) except ImportError: # Assume this component has nothing under the specified name. continue if not raw: if isinstance(item, types.ModuleType): all_ = getattr(item, '__all__', None) if all_: item = {n: getattr(item, n) for n in all_} else: item = vars(item) items.append(item) return items
Discover any named attributes, modules, or packages and coalesces the results. Looks in any module or package declared in the the 'COMPONENTS' key in the application config. Order of found results are persisted from the order that the component was declared in. @param[in] components An array of components; overrides any setting in the application config. @param[in] raw If True then no processing is done on the found items.
def unsafe_execute(self, result=None): if result: self.result += result with opentracing.tracer.start_span( obj=self, child_of=KserSpan.extract_span(self), span_factory=KserSpan) as span: self.result = self._onsuccess(self._postrun(self._run())) span.obj = self return self.result
un-wrapped execution, can raise excepetion :return: Execution result :rtype: kser.result.Result
def auto_clear_shopping_cart(self, auto_clear_shopping_cart): allowed_values = ["never", "orderCreated", "orderCompleted"] # noqa: E501 if auto_clear_shopping_cart is not None and auto_clear_shopping_cart not in allowed_values: raise ValueError( "Invalid value for `auto_clear_shopping_cart` ({0}), must be one of {1}" # noqa: E501 .format(auto_clear_shopping_cart, allowed_values) ) self._auto_clear_shopping_cart = auto_clear_shopping_cart
Sets the auto_clear_shopping_cart of this CartSettings. :param auto_clear_shopping_cart: The auto_clear_shopping_cart of this CartSettings. :type: str
def getView(self, lv): view = None if str(lv.GetName())[-1] == 'X': return 'X' elif str(lv.GetName())[-1] == 'Y': return 'Y' self.log.error('Cannot determine view for %s', lv.GetName()) raise 'Cannot determine view for %s' % lv.GetName() return view
Determine the detector view starting with a G4LogicalVolume
def line_cross(x1, y1, x2, y2, x3, y3, x4, y4): # out of the rect if min(x1, x2) > max(x3, x4) or max(x1, x2) < min(x3, x4) or \ min(y1, y2) > max(y3, y4) or max(y1, y2) < min(y3, y4): return False # same slope rate if ((y1 - y2) * (x3 - x4) == (x1 - x2) * (y3 - y4)): return False if cross_product(x3, y3, x2, y2, x4, y4) * cross_product(x3, y3, x4, y4, x1, y1) < 0 or \ cross_product(x1, y1, x4, y4, x2, y2) * cross_product(x1, y1, x2, y2, x3, y3) < 0: return False # get collide point b1 = (y2 - y1) * x1 + (x1 - x2) * y1 b2 = (y4 - y3) * x3 + (x3 - x4) * y3 D = (x2 - x1) * (y4 - y3) - (x4 - x3) * (y2 - y1) D1 = b2 * (x2 - x1) - b1 * (x4 - x3) D2 = b2 * (y2 - y1) - b1 * (y4 - y3) return P(D1 / D, D2 / D)
判断两条线段是否交叉
def cross_product (x1, y1, x2, y2, x3, y3): return (x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1)
叉乘 vector 1: x1, y1, x2, y2 vector 2: x1, y1, x3, y3
def update_collision_rect(self): self.min_x = min(self.points[::2]) self.max_x = max(self.points[::2]) self.min_y = min(self.points[1::2]) self.max_y = max(self.points[1::2])
获取外接矩形
def collide(self, s2): s1 = self s1.update_points() s2.update_points() if not (s1.points and s2.points): return False t1 = s1.transform t2 = s2.transform t1.update_points(s1.points) t2.update_points(s2.points) # 更新外接矩形 t1.update_collision_rect() t2.update_collision_rect() # simple collide rect if not (t1.min_x < t2.max_x and t1.max_x > t2.min_x \ and t1.min_y < t2.max_y and t1.max_y > t2.min_y): return False return points_in_points(t1, t2) or \ points_in_points(t2, t1) or \ lines_cross(t1, t2)
判断图形是否碰到了另外一个图形
def _FromSpecs(self, specs): if isinstance(specs, dict): specs_ = [] for name, value in specs.items(): specs_.append((name, {"value": value})) else: specs_ = specs for spec in specs_: self.params.append(Parameter(spec))
Populates _params using specification Arguments: specs -- either: (a) list as [(name, {...}), ...] (see Parameter.FromSpec() for further information) (b) dictionary as {"name": value, ...}
def AddToLayout(self, layout): for param in self.params: widget = param.RenderWidget() layout.addRow(param.caption, widget)
Arguments: layout -- a QFormLayout instance
def FromSpec(self, spec): if isinstance(spec, Parameter): self.name = spec.name self.caption = spec.caption if spec.caption is not None else spec.name self.toolTip = spec.toolTip if spec.toolTip is not None else "" self.type = spec.type if spec.type is not None else type(spec.value) if spec.value is not None else int self.value = spec.value else: self.name, d = spec self.caption = d.get("caption", self.name) self.toolTip = d.get("toolTip", "") t = self.type = d.get("type", type(d["value"]) if "value" in d else int) if not t in (int, float, bool, str, list): raise TypeError("Invalid type: '{0!s}'".format(t.__name__)) self.value = d.get("value") if self.value is None: self.value = 0 if self.type == int else \ 0. if self.type == float else \ False if self.type == bool else ""
Args: spec: (name, {...}), or Parameter object Dict keys: "caption" -- (optional) text for label in editor. Defaults to the keyword argument name "toolTip" (optional) "type" -- (optional, defaults to type("value") or int if "value" is not specified. Accepts: - int - float - str - bool - list "value" -- (optional) defaults to 1 if numeric, False if bool, "" if str
def RenderWidget(self): t = self.type if t == int: ret = QSpinBox() ret.setMaximum(999999999) ret.setValue(self.value) elif t == float: ret = QLineEdit() ret.setText(str(self.value)) elif t == bool: ret = QCheckBox() ret.setChecked(self.value) else: # str, list left ret = QLineEdit() ret.setText(str(self.value)) if self.toolTip is not None: ret.setToolTip(self.toolTip) self.widget = ret return ret
Returns a QWidget subclass instance. Exact class depends on self.type
def ScreenGenerator(nfft, r0, nx, ny): while 1: layers = GenerateTwoScreens(nfft, r0) for iLayer in range(2): for iy in range(int(nfft/ny)): for ix in range(int(nfft/nx)): yield layers[iLayer][iy*ny:iy*ny+ny, ix*nx:ix*nx+nx]
Generate an infinite series of rectangular phase screens Uses an FFT screen generator to make a large screen and then returns non-overlapping subsections of it
def parse_dates(d, default='today'): if default == 'today': default = datetime.datetime.today() if d is None: return default elif isinstance(d, _parsed_date_types): return d elif is_number(d): # Treat as milliseconds since 1970 d = d if isinstance(d, float) else float(d) return datetime.datetime.utcfromtimestamp(d) elif not isinstance(d, STRING_TYPES): if hasattr(d, '__iter__'): return [parse_dates(s, default) for s in d] else: return default elif len(d) == 0: # Behaves like dateutil.parser < version 2.5 return default else: try: return parser.parse(d) except (AttributeError, ValueError): return default
Parses one or more dates from d
def load_with_classes(filename, classes): ok = False for class_ in classes: obj = class_() try: obj.load(filename) ok = True # # cannot let IOError through because pyfits raises IOError!! # except IOError: # raise # # also cannot let OSError through because astropy.io.fits raises OSError!! # except OSError: # raise except FileNotFoundError: raise except Exception as e: # (ValueError, NotImplementedError): # Note: for debugging, switch the below to True if a99.logging_level == logging.DEBUG: a99.get_python_logger().exception("Error trying with class \"{0!s}\"".format( class_.__name__)) pass if ok: break if ok: return obj return None
Attempts to load file by trial-and-error using a given list of classes. Arguments: filename -- full path to file classes -- list of classes having a load() method Returns: DataFile object if loaded successfully, or None if not. Note: it will stop at the first successful load. Attention: this is not good if there is a bug in any of the file readers, because *all exceptions will be silenced!*
def load_any_file(filename): import f311 # Splits attempts using ((binary X text) file) criterion if a99.is_text_file(filename): return load_with_classes(filename, f311.classes_txt()) else: return load_with_classes(filename, f311.classes_bin())
Attempts to load filename by trial-and-error Returns: file: A DataFile descendant, whose specific class depends on the file format detected, or None if the file canonot be loaded
def load_spectrum(filename): import f311 f = load_with_classes(filename, f311.classes_sp()) if f: return f.spectrum return None
Attempts to load spectrum as one of the supported types. Returns: a Spectrum, or None
def load_spectrum_fits_messed_x(filename, sp_ref=None): import f311.filetypes as ft # First tries to load as usual f = load_with_classes(filename, (ft.FileSpectrumFits,)) if f is not None: ret = f.spectrum else: hdul = fits.open(filename) hdu = hdul[0] if not hdu.header.get("CDELT1"): hdu.header["CDELT1"] = 1 if sp_ref is None else sp_ref.delta_lambda if not hdu.header.get("CRVAL1"): hdu.header["CRVAL1"] = 0 if sp_ref is None else sp_ref.x[0] ret = ft.Spectrum() ret.from_hdu(hdu) ret.filename = filename original_shape = ret.y.shape # Shape of data before squeeze # Squeezes to make data of shape e.g. (1, 1, 122) into (122,) ret.y = ret.y.squeeze() if len(ret.y.shape) > 1: raise RuntimeError( "Data contains more than 1 dimension (shape is {0!s}), " "FITS file is not single spectrum".format(original_shape)) return ret
Loads FITS file spectrum that does not have the proper headers. Returns a Spectrum
def get_filetypes_info(editor_quote="`", flag_leaf=True): NONE_REPL = "" import f311 data = [] # [FileTypeInfo, ...] for attr in f311.classes_file(flag_leaf): description = a99.get_obj_doc0(attr) def_ = NONE_REPL if attr.default_filename is None else attr.default_filename ee = attr.editors if ee is None: ee = NONE_REPL else: # Example: "``mained.py``, ``x.py``" ee = ", ".join(["{0}{1}{0}".format(editor_quote, x, editor_quote) for x in ee]) data.append({"description": description, "default_filename": def_, "classname": attr.__name__, "editors": ee, "class": attr, "txtbin": "text" if attr.flag_txt else "binary"}) data.sort(key=lambda x: x["description"]) return data
Reports available data types Args: editor_quote: character to enclose the name of the editor script between. flag_leaf: see tabulate_filetypes_rest() Returns: list: list of FileTypeInfo
def filetypes_info_to_rows_header(infos, attrnames=None, header=None, flag_wrap_description=False, description_width=40): if attrnames is None: attrnames = FILE_TYPE_INFO_ATTRS.keys() if header is None: header = [FILE_TYPE_INFO_ATTRS[key] for key in attrnames] if flag_wrap_description: wr = textwrap.TextWrapper(width=description_width, subsequent_indent="|br| ") data = [] for i, info in enumerate(infos): row = [] for j, attrname in enumerate(attrnames): if attrname != "description" or not flag_wrap_description: row.append(info[attrname]) else: row.append(wr.wrap(info[attrname])) data.append(row) return data, header
Converts filetype information to a (multiline_rows, header) tuple that can be more easily be tabulated **Attention** uses ReST syntax, using a "|br|" marker for line break. It requires the .rst source file to contain the following bit: .. |br| raw:: html <br /> Args: infos: list of FileTypeInfo attrnames: list of attribute names (keys of FILE_TYPE_INFO_ATTRS). Defaults to all attributes header: list of strings containing headers. If not passed, uses default names flag_wrap_description: whether to wrap the description text description_width: width to wrap the description text (effective only if flag_wrap_description is True) Returns: tuple: (rows, header): rows is a list of lists
def tabulate_filetypes_rest(attrnames=None, header=None, flag_wrap_description=True, description_width=40, flag_leaf=True): infos = get_filetypes_info(editor_quote="``", flag_leaf=flag_leaf) rows, header = filetypes_info_to_rows_header(infos, attrnames, header, flag_wrap_description, description_width) ret = a99.rest_table(rows, header) return ret
Generates a reST multirow table Args: attrnames: list of attribute names (keys of FILE_TYPE_INFO_ATTRS). Defaults to all attributes header: list of strings containing headers. If not passed, uses default names flag_wrap_description: whether to wrap the description text description_width: width to wrap the description text (effective only if flag_wrap_description is True) flag_leaf: returns only classes that do not have subclasses ("leaf" nodes as in a class tree graph)
def list_all_payment_sources(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_payment_sources_with_http_info(**kwargs) else: (data) = cls._list_all_payment_sources_with_http_info(**kwargs) return data
List PaymentSources Return a list of PaymentSources This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_payment_sources(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[PaymentSource] If the method is called asynchronously, returns the request thread.
def return_action(self, return_action): allowed_values = ["refund", "storeCredit"] # noqa: E501 if return_action is not None and return_action not in allowed_values: raise ValueError( "Invalid value for `return_action` ({0}), must be one of {1}" # noqa: E501 .format(return_action, allowed_values) ) self._return_action = return_action
Sets the return_action of this ReturnSettings. :param return_action: The return_action of this ReturnSettings. :type: str
def _create_rule(path, rule): # type: (List[Type[Rule]], Type[Rule]) -> Type[ReducedUnitRule] created = type('Reduced[' + rule.__name__ + ']', (ReducedUnitRule,), ReducedUnitRule.__dict__.copy()) # type: Type[ReducedUnitRule] created.rule = ([path[0].fromSymbol], rule.right) created.end_rule = rule created.by_rules = path return created
Create ReducedUnitRule based on sequence of unit rules and end, generating rule. :param path: Sequence of unit rules. :param rule: Rule that is attached after sequence of unit rules. :return: ReducedUnitRule class.
def remove_unit_rules(grammar, inplace=False): # type: (Grammar, bool) -> Grammar # copy if needed if inplace is False: grammar = copy(grammar) # get connections res = find_nonterminals_reachable_by_unit_rules(grammar) # iterate through rules for rule in grammar.rules.copy(): # delete unit rules if _is_unit(rule): grammar.rules.remove(rule) continue for nonterm in grammar.nonterminals: # find all nonterminals that can rewrite to current rule path = res.path_rules(nonterm, rule.fromSymbol) # get rid of cyclic paths if len(path) > 0 and path[0].fromSymbol != path[-1].toSymbol: created = _create_rule(path, rule) grammar.rules.add(created) return grammar
Remove unit rules from the grammar. :param grammar: Grammar where remove the rules. :param inplace: True if transformation should be performed in place. False by default. :return: Grammar without unit rules.
def show_config(config): print("\nCurrent Configuration:\n") for k, v in sorted(config.config.items()): print("{0:15}: {1}".format(k, v))
Show the current configuration.
def create_cloud_user(cfg, args): url = cfg['api_server'] + "admin/add-user" params = {'user_email': args.user_email, 'user_name': args.user_name, 'user_role': args.user_role, 'email': cfg['email'], 'api_key': cfg['api_key']} headers = {'Content-Type': 'application/json'} response = requests.post(url, data=json.dumps(params), headers=headers) if response.status_code not in range(200, 299): raise Exception("Errors contacting the cloud node: %s" % (response.content)) loaded = json.loads(response.content) return loaded
Attempt to create the user on the cloud node.
def readquery( sqlQuery, dbConn, log, quiet=False): log.debug('starting the ``readquery`` function') import pymysql import warnings warnings.filterwarnings('error', category=pymysql.Warning) rows = [] try: cursor = dbConn.cursor(pymysql.cursors.DictCursor) except Exception as e: log.error('could not create the database cursor: %s' % (e, )) raise IOError('could not create the database cursor: %s' % (e, )) # EXECUTE THE SQL COMMAND cursor.execute(sqlQuery) rows = cursor.fetchall() try: cursor.execute(sqlQuery) rows = cursor.fetchall() except Exception as e: sqlQuery = sqlQuery[:1000] if quiet == False: log.warning( 'MySQL raised an error - read command not executed.\n' + str(e) + '\nHere is the sqlQuery\n\t%(sqlQuery)s' % locals()) raise e # CLOSE THE CURSOR try: cursor.close() except Exception as e: log.warning('could not close the db cursor ' + str(e) + '\n') log.debug('completed the ``readquery`` function') return rows
Given a mysql query, read the data from the database and return the results as a list of dictionaries (database rows) **Key Arguments:** - ``log`` -- the logger. - ``sqlQuery`` -- the MySQL command to execute - ``dbConn`` -- the db connection - ``quiet`` -- ignore mysql warnings and errors and move on. Be careful when setting this to true - damaging errors can easily be missed. Default *False*. **Return:** - ``rows`` -- the rows returned by the sql query **Usage:** .. code-block:: python from fundamentals.mysql import readquery rows = readquery( log=log, sqlQuery=sqlQuery, dbConn=dbConn, quiet=False )
def update_caption(self, mouse): caption = "{} x: {}, y: {}".format(self._title, mouse.x, mouse.y) super().set_caption(caption)
添加坐标显示
def make_store(name, min_length=4, **kwargs): if name not in stores: raise ValueError('valid stores are {0}'.format(', '.join(stores))) if name == 'memcache': store = MemcacheStore elif name == 'memory': store = MemoryStore elif name == 'redis': store = RedisStore return store(min_length=min_length, **kwargs)
\ Creates a store with a reasonable keygen. .. deprecated:: 2.0.0 Instantiate stores directly e.g. ``shorten.MemoryStore(min_length=4)``
def do_work_on(self, repo): ''' Make repo the active one. Commands working on a repo will use it as default for repo parameter. ''' self.abort_on_nonexisting_repo(repo, 'work_on') self.network.active_repo = repf do_work_on(self, repo): ''' Make repo the active one. Commands working on a repo will use it as default for repo parameter. ''' self.abort_on_nonexisting_repo(repo, 'work_on') self.network.active_repo = repo
Make repo the active one. Commands working on a repo will use it as default for repo parameter.
def do_http_repo(self, repo): ''' [Re]define REPO as http package repository. http_repo REPO ''' self.abort_on_missing_effective_repo_name(repo, 'http_repo') repo_name = self.get_effective_repo_name(repo) try: self.network.set(repo_name, REPO.TYPE, REPOTYPE.HTTP) except UnknownRepoError: self.network.define_http_repo(repo_name) self.network.active_repo = repo_namf do_http_repo(self, repo): ''' [Re]define REPO as http package repository. http_repo REPO ''' self.abort_on_missing_effective_repo_name(repo, 'http_repo') repo_name = self.get_effective_repo_name(repo) try: self.network.set(repo_name, REPO.TYPE, REPOTYPE.HTTP) except UnknownRepoError: self.network.define_http_repo(repo_name) self.network.active_repo = repo_name
[Re]define REPO as http package repository. http_repo REPO
def do_directory_repo(self, repo): ''' [Re]define REPO as directory package repository. directory_repo REPO ''' self.abort_on_missing_effective_repo_name(repo, 'directory_repo') repo_name = self.get_effective_repo_name(repo) try: self.network.set(repo_name, REPO.TYPE, REPOTYPE.DIRECTORY) except UnknownRepoError: self.network.define_directory_repo(repo_name) self.network.active_repo = repo_namf do_directory_repo(self, repo): ''' [Re]define REPO as directory package repository. directory_repo REPO ''' self.abort_on_missing_effective_repo_name(repo, 'directory_repo') repo_name = self.get_effective_repo_name(repo) try: self.network.set(repo_name, REPO.TYPE, REPOTYPE.DIRECTORY) except UnknownRepoError: self.network.define_directory_repo(repo_name) self.network.active_repo = repo_name
[Re]define REPO as directory package repository. directory_repo REPO
def do_forget(self, repo): ''' Drop definition of a repo. forget REPO ''' self.abort_on_nonexisting_repo(repo, 'forget') self.network.forget(repof do_forget(self, repo): ''' Drop definition of a repo. forget REPO ''' self.abort_on_nonexisting_repo(repo, 'forget') self.network.forget(repo)
Drop definition of a repo. forget REPO
def do_set(self, line): ''' Set repository attributes on the active repo. set attribute=value # intended use: # directory repos: work_on developer-repo set type=directory set directory=package-directory # http repos: work_on company-private-repo set type=http set download-url=http://... set upload-url=http://... set username=user set password=pass ''' self.abort_on_invalid_active_repo('set') repo = self.network.active_repo attribute, eq, value = line.partition('=') if not attribute: raise ShellError('command "set" requires a non-empty attribute') if not eq: raise ShellError('command "set" requires a value') self.network.set(repo, attribute, valuef do_set(self, line): ''' Set repository attributes on the active repo. set attribute=value # intended use: # directory repos: work_on developer-repo set type=directory set directory=package-directory # http repos: work_on company-private-repo set type=http set download-url=http://... set upload-url=http://... set username=user set password=pass ''' self.abort_on_invalid_active_repo('set') repo = self.network.active_repo attribute, eq, value = line.partition('=') if not attribute: raise ShellError('command "set" requires a non-empty attribute') if not eq: raise ShellError('command "set" requires a value') self.network.set(repo, attribute, value)
Set repository attributes on the active repo. set attribute=value # intended use: # directory repos: work_on developer-repo set type=directory set directory=package-directory # http repos: work_on company-private-repo set type=http set download-url=http://... set upload-url=http://... set username=user set password=pass
def do_unset(self, attribute): ''' Unset attribute on the active/default repo ''' self.abort_on_invalid_active_repo('unset') if not attribute: raise ShellError('command "unset" requires a non-empty attribute') self.network.unset(self.network.active_repo, attributef do_unset(self, attribute): ''' Unset attribute on the active/default repo ''' self.abort_on_invalid_active_repo('unset') if not attribute: raise ShellError('command "unset" requires a non-empty attribute') self.network.unset(self.network.active_repo, attribute)
Unset attribute on the active/default repo
def do_list(self, line): ''' List known repos ''' repo_names = self.network.repo_names print('Known repos:') print(' ' + '\n '.join(repo_names)f do_list(self, line): ''' List known repos ''' repo_names = self.network.repo_names print('Known repos:') print(' ' + '\n '.join(repo_names))
List known repos
def do_show(self, repo): ''' List repo attributes ''' self.abort_on_nonexisting_effective_repo(repo, 'show') repo = self.network.get_repo(repo) repo.print_attributes(f do_show(self, repo): ''' List repo attributes ''' self.abort_on_nonexisting_effective_repo(repo, 'show') repo = self.network.get_repo(repo) repo.print_attributes()
List repo attributes
def do_setup_for_pypi_python_org(self, repo): ''' Configure repo to point to the default package index https://pypi.python.org. ''' effective_repo_name = self.get_effective_repo_name(repo) self.abort_on_nonexisting_repo( effective_repo_name, 'setup_for_pypi_python_org' ) self.network.setup_for_pypi_python_org(effective_repo_namef do_setup_for_pypi_python_org(self, repo): ''' Configure repo to point to the default package index https://pypi.python.org. ''' effective_repo_name = self.get_effective_repo_name(repo) self.abort_on_nonexisting_repo( effective_repo_name, 'setup_for_pypi_python_org' ) self.network.setup_for_pypi_python_org(effective_repo_name)
Configure repo to point to the default package index https://pypi.python.org.
def do_setup_for_pip_local(self, repo): ''' Configure repo to be directory based with directory `~/.pip/local`. Also makes that directory if needed. ''' effective_repo_name = self.get_effective_repo_name(repo) self.abort_on_nonexisting_repo( effective_repo_name, 'setup_for_pip_local' ) self.network.setup_for_pip_local(effective_repo_namef do_setup_for_pip_local(self, repo): ''' Configure repo to be directory based with directory `~/.pip/local`. Also makes that directory if needed. ''' effective_repo_name = self.get_effective_repo_name(repo) self.abort_on_nonexisting_repo( effective_repo_name, 'setup_for_pip_local' ) self.network.setup_for_pip_local(effective_repo_name)
Configure repo to be directory based with directory `~/.pip/local`. Also makes that directory if needed.
def do_serve(self, repo_name): ''' Serve a local directory over http as a package index (like pypi). Intended for quick package exchanges. ''' self.abort_on_nonexisting_effective_repo(repo_name, 'serve') repo = self.network.get_repo(repo_name) repo.serve(f do_serve(self, repo_name): ''' Serve a local directory over http as a package index (like pypi). Intended for quick package exchanges. ''' self.abort_on_nonexisting_effective_repo(repo_name, 'serve') repo = self.network.get_repo(repo_name) repo.serve()
Serve a local directory over http as a package index (like pypi). Intended for quick package exchanges.
def convert(filename, num_questions=None, solution=False, pages_per_q=DEFAULT_PAGES_PER_Q, folder='question_pdfs', output='gradescope.pdf', zoom=1): check_for_wkhtmltohtml() save_notebook(filename) nb = read_nb(filename, solution=solution) pdf_names = create_question_pdfs(nb, pages_per_q=pages_per_q, folder=folder, zoom=zoom) merge_pdfs(pdf_names, output) # The first pdf generated is the email PDF n_questions_found = len(pdf_names) - 1 if num_questions is not None and n_questions_found != num_questions: logging.warning( 'We expected there to be {} questions but there are only {} in ' 'your final PDF. Gradescope will most likely not accept your ' 'submission. Double check that you wrote your answers in the ' 'cells that we provided.' .format(num_questions, len(pdf_names)) ) try: from IPython.display import display, HTML display(HTML(DOWNLOAD_HTML.format(output))) except ImportError: print('Done! The resulting PDF is located in this directory and is ' 'called {}. Upload that PDF to Gradescope for grading.' .format(output)) print() print('If the font size of your PDF is too small/large, change the value ' 'of the zoom argument when calling convert. For example, setting ' 'zoom=2 makes everything twice as big.')
Public method that exports nb to PDF and pads all the questions. If num_questions is specified, will also check the final PDF for missing questions. If the output font size is too small/large, increase or decrease the zoom argument until the size looks correct. If solution=True, we'll export solution cells instead of student cells. Use this option to generate the solutions to upload to Gradescope.
def check_for_wkhtmltohtml(): locator = 'where' if sys.platform == 'win32' else 'which' wkhtmltopdf = (subprocess.Popen([locator, 'wkhtmltopdf'], stdout=subprocess.PIPE) .communicate()[0].strip()) if not os.path.exists(wkhtmltopdf): logging.error( 'No wkhtmltopdf executable found. Please install ' 'wkhtmltopdf before trying again - {}'.format(WKHTMLTOPDF_URL)) raise ValueError( 'No wkhtmltopdf executable found. Please install ' 'wkhtmltopdf before trying again - {}'.format(WKHTMLTOPDF_URL))
Checks to see if the wkhtmltohtml binary is installed. Raises error if not.
def find_student_email(nb) -> str: ''' Looks for the OkPy-generated string: "Successfully logged in as <email>" and returns the email address. Raises a ValueError if email not found. ''' search = 'Successfully logged in as ' cells = [cell for cell in nb.cells if 'outputs' in cell] for cell in cells: for output in cell.outputs: if 'text' in output and search in output.text: return output.text.split(search)[1].strip() raise ValueError('Error: was not able to get email from ok.auth() cell.' 'Please run that cell and try again.'f find_student_email(nb) -> str: ''' Looks for the OkPy-generated string: "Successfully logged in as <email>" and returns the email address. Raises a ValueError if email not found. ''' search = 'Successfully logged in as ' cells = [cell for cell in nb.cells if 'outputs' in cell] for cell in cells: for output in cell.outputs: if 'text' in output and search in output.text: return output.text.split(search)[1].strip() raise ValueError('Error: was not able to get email from ok.auth() cell.' 'Please run that cell and try again.')
Looks for the OkPy-generated string: "Successfully logged in as <email>" and returns the email address. Raises a ValueError if email not found.
def read_nb(filename, solution) -> nbformat.NotebookNode: with open(filename, 'r') as f: nb = nbformat.read(f, as_version=4) email = find_student_email(nb) preamble = nbformat.v4.new_markdown_cell( source='# ' + email, metadata={'tags': ['q_email']}) tags_to_check = TAGS if not solution else SOL_TAGS cells = ([preamble] + [remove_input(cell) for cell in nb['cells'] if cell_has_tags(cell, tags_to_check)]) nb['cells'] = cells return nb
Takes in a filename of a notebook and returns a notebook object containing only the cell outputs to export.
def nb_to_html_cells(nb) -> list: html_exporter = HTMLExporter() html_exporter.template_file = 'basic' (body, resources) = html_exporter.from_notebook_node(nb) return BeautifulSoup(body, 'html.parser').findAll('div', class_='cell')
Converts notebook to an iterable of BS4 HTML nodes. Images are inline.
def nb_to_q_nums(nb) -> list: def q_num(cell): assert cell.metadata.tags return first(filter(lambda t: 'q' in t, cell.metadata.tags)) return [q_num(cell) for cell in nb['cells']]
Gets question numbers from each cell in the notebook
def pad_pdf_pages(pdf_name, pages_per_q) -> None: pdf = PyPDF2.PdfFileReader(pdf_name) output = PyPDF2.PdfFileWriter() num_pages = pdf.getNumPages() if num_pages > pages_per_q: logging.warning('{} has {} pages. Only the first ' '{} pages will get output.' .format(pdf_name, num_pages, pages_per_q)) # Copy over up to pages_per_q pages for page in range(min(num_pages, pages_per_q)): output.addPage(pdf.getPage(page)) # Pad if necessary if num_pages < pages_per_q: for page in range(pages_per_q - num_pages): output.addBlankPage() # Output the PDF with open(pdf_name, 'wb') as out_file: output.write(out_file)
Checks if PDF has the correct number of pages. If it has too many, warns the user. If it has too few, adds blank pages until the right length is reached.
def create_question_pdfs(nb, pages_per_q, folder, zoom) -> list: html_cells = nb_to_html_cells(nb) q_nums = nb_to_q_nums(nb) os.makedirs(folder, exist_ok=True) pdf_options = PDF_OPTS.copy() pdf_options['zoom'] = ZOOM_FACTOR * zoom pdf_names = [] for question, cell in zip(q_nums, html_cells): # Create question PDFs pdf_name = os.path.join(folder, '{}.pdf'.format(question)) pdfkit.from_string(cell.prettify(), pdf_name, options=pdf_options) pad_pdf_pages(pdf_name, pages_per_q) print('Created ' + pdf_name) pdf_names.append(pdf_name) return pdf_names
Converts each cells in tbe notebook to a PDF named something like 'q04c.pdf'. Places PDFs in the specified folder and returns the list of created PDF locations.
def merge_pdfs(pdf_names, output) -> None: merger = PyPDF2.PdfFileMerger() for filename in pdf_names: merger.append(filename) merger.write(output) merger.close()
Merges all pdfs together into a single long PDF.
def get_item_abspath(self, identifier): dataset_cache_abspath = os.path.join( self._cache_abspath, self.uuid ) mkdir_parents(dataset_cache_abspath) manifest = self.get_manifest() relpath = manifest['items'][identifier]['relpath'] _, ext = os.path.splitext(relpath) local_item_abspath = os.path.join( dataset_cache_abspath, identifier + ext ) if not os.path.isfile(local_item_abspath): url = self.http_manifest["item_urls"][identifier] r = self._get_request(url, stream=True) tmp_local_item_abspath = local_item_abspath + ".tmp" with open(tmp_local_item_abspath, 'wb') as f: shutil.copyfileobj(r.raw, f) os.rename(tmp_local_item_abspath, local_item_abspath) return local_item_abspath
Return absolute path at which item content can be accessed. :param identifier: item identifier :returns: absolute path from which the item content can be accessed
def get_overlay(self, overlay_name): url = self.http_manifest["overlays"][overlay_name] return self._get_json_from_url(url)
Return overlay as a dictionary. :param overlay_name: name of the overlay :returns: overlay as a dictionary
def SQLarray_fromfile(filename, **kwargs): Table2array = {'rst': rest_table.Table2array, 'txt': rest_table.Table2array, 'csv': csv_table.Table2array, } # see convert.Autoconverter for the kwargs; *active*/*autoconvert* # is for the Table2array class _kwnames = ('active', 'autoconvert', 'mode', 'mapping', 'sep') kwargsT2a = dict((k,kwargs.pop(k)) for k in _kwnames if k in kwargs) kwargsT2a.setdefault('mode', 'singlet') # Note: sep=False is the only sane choice because we cannot deal yet # with numpy list structures for import into the db kwargsT2a['sep'] = False root, ext = os.path.splitext(filename) if ext.startswith('.'): ext = ext[1:] ext = ext.lower() kwargsT2a['filename'] = filename t = Table2array[ext](**kwargsT2a) kwargs.setdefault('name', t.tablename) kwargs['columns'] = t.names kwargs['records'] = t.records # use records to have sqlite do type conversion return SQLarray(**kwargs)
Create a :class:`SQLarray` from *filename*. Uses the filename suffix to detect the contents: rst, txt restructure text (see :mod:`recsql.rest_table` csv comma-separated (see :mod:`recsql.csv_table`) :Arguments: *filename* name of the file that contains the data with the appropriate file extension *kwargs* - additional arguments for :class:`SQLarray` - additional arguments :class:`recsql.csv_table.Table2array` or :class:`recsql.rest_table.Table2array` such as *mode* or *autoncovert*.
def connection_count(self): return self.sql("SELECT value FROM %(master)s WHERE name = 'connection_counter'" % vars(self), cache=False, asrecarray=False)[0][0]
Number of currently open connections to the database. (Stored in table sqlarray_master.)
def merge(self,recarray,columns=None): len_before = len(self) # CREATE TEMP TABLE in database tmparray = SQLarray(self.tmp_table_name, records=recarray, columns=columns, connection=self.connection, is_tmp=True) len_tmp = len(tmparray) # insert into main table SQL = """INSERT OR ABORT INTO __self__ SELECT * FROM %s""" % self.tmp_table_name self.sql(SQL) len_after = len(self) n_inserted = len_after - len_before assert len_tmp == n_inserted del tmparray # also drops the tmp table (keep it at end for debugging) return n_inserted
Merge another recarray with the same columns into this table. :Arguments: recarray numpy record array that describes the layout and initializes the table :Returns: n number of inserted rows :Raises: Raises an exception if duplicate and incompatible data exist in the main table and the new one.
def merge_table(self,name): l_before = len(self) SQL = """INSERT OR ABORT INTO __self__ SELECT * FROM %s""" % name self.sql(SQL) l_after = len(self) return l_after - l_before
Merge an existing table in the database with the __self__ table. Executes as ``'INSERT INTO __self__ SELECT * FROM <name>'``. However, this method is probably used less often than the simpler :meth:`merge`. :Arguments: name name of the table in the database (must be compatible with __self__) :Returns: n number of inserted rows
def sql_index(self,index_name,column_names,unique=True): if type(column_names) == str: column_names = [column_names] try: if len(column_names) == 0: raise TypeError except TypeError: raise ValueError("Provide a list of column names for an index.") if unique: UNIQUE = "UNIQUE" else: UNIQUE = "" table_name = self.name columns = ",".join(column_names) SQL = """CREATE %(UNIQUE)s INDEX %(index_name)s ON %(table_name)s """\ """(%(columns)s)""" % locals() self.sql(SQL)
Add a named index on given columns to improve performance.
def sql_select(self,fields,*args,**kwargs): SQL = "SELECT "+str(fields)+" FROM __self__ "+ " ".join(args) return self.sql(SQL,**kwargs)
Execute a simple SQL ``SELECT`` statement and returns values as new numpy rec array. The arguments *fields* and the additional optional arguments are simply concatenated with additional SQL statements according to the template:: SELECT <fields> FROM __self__ [args] The simplest fields argument is ``"*"``. Example: Create a recarray in which students with average grade less than 3 are listed:: result = T.SELECT("surname, subject, year, avg(grade) AS avg_grade", "WHERE avg_grade < 3", "GROUP BY surname,subject", "ORDER BY avg_grade,surname") The resulting SQL would be:: SELECT surname, subject, year, avg(grade) AS avg_grade FROM __self__ WHERE avg_grade < 3 GROUP BY surname,subject ORDER BY avg_grade,surname Note how one can use aggregate functions such avg(). The string *'__self__'* is automatically replaced with the table name (``T.name``); this can be used for cartesian products such as :: LEFT JOIN __self__ WHERE ... .. Note:: See the documentation for :meth:`~SQLarray.sql` for more details on the available keyword arguments and the use of ``?`` parameter interpolation.
def limits(self,variable): (vmin,vmax), = self.SELECT('min(%(variable)s), max(%(variable)s)' % vars()) return vmin,vmax
Return minimum and maximum of variable across all rows of data.
def _init_sqlite_functions(self): self.connection.create_function("sqrt", 1,sqlfunctions._sqrt) self.connection.create_function("sqr", 1,sqlfunctions._sqr) self.connection.create_function("periodic", 1,sqlfunctions._periodic) self.connection.create_function("pow", 2,sqlfunctions._pow) self.connection.create_function("match",2,sqlfunctions._match) self.connection.create_function("regexp",2,sqlfunctions._regexp) self.connection.create_function("fformat",2,sqlfunctions._fformat) self.connection.create_aggregate("std",1,sqlfunctions._Stdev) self.connection.create_aggregate("stdN",1,sqlfunctions._StdevN) self.connection.create_aggregate("median",1,sqlfunctions._Median) self.connection.create_aggregate("array",1,sqlfunctions._NumpyArray) self.connection.create_aggregate("histogram",4,sqlfunctions._NumpyHistogram) self.connection.create_aggregate("distribution",4,sqlfunctions._NormedNumpyHistogram) self.connection.create_aggregate("meanhistogram",5,sqlfunctions._MeanHistogram) self.connection.create_aggregate("stdhistogram",5,sqlfunctions._StdHistogram) self.connection.create_aggregate("minhistogram",5,sqlfunctions._MinHistogram) self.connection.create_aggregate("maxhistogram",5,sqlfunctions._MaxHistogram) self.connection.create_aggregate("medianhistogram",5,sqlfunctions._MedianHistogram) self.connection.create_aggregate("zscorehistogram",5,sqlfunctions._ZscoreHistogram)
additional SQL functions to the database