code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
def toggle_archived(book_id): is_archived = change_archived_books(book_id, message="Book {} archivebit toggled".format(book_id)) if is_archived: remove_synced_book(book_id) return ""
Base
1
def show_unit_extensions(request, course_id): """ Shows all of the students which have due date extensions for the given unit. """ course = get_course_by_id(SlashSeparatedCourseKey.from_deprecated_string(course_id)) unit = find_unit(course, request.GET.get('url')) return JsonResponse(dump_module_extensions(course, unit))
Compound
4
def response(self, response, content): if "authentication-info" not in response: challenge = _parse_www_authenticate(response, "www-authenticate").get( "digest", {} ) if "true" == challenge.get("stale"): self.challenge["nonce"] = challenge["nonce"] self.challenge["nc"] = 1 return True else: updated_challenge = _parse_www_authenticate( response, "authentication-info" ).get("digest", {}) if "nextnonce" in updated_challenge: self.challenge["nonce"] = updated_challenge["nextnonce"] self.challenge["nc"] = 1 return False
Class
2
def reset_due_date(request, course_id): """ Rescinds a due date extension for a student on a particular unit. """ course = get_course_by_id(SlashSeparatedCourseKey.from_deprecated_string(course_id)) student = require_student_from_identifier(request.GET.get('student')) unit = find_unit(course, request.GET.get('url')) set_due_date_extension(course, unit, student, None) if not getattr(unit, "due", None): # It's possible the normal due date was deleted after an extension was granted: return JsonResponse( _("Successfully removed invalid due date extension (unit has no due date).") ) original_due_date_str = unit.due.strftime('%Y-%m-%d %H:%M') return JsonResponse(_( 'Successfully reset due date for student {0} for {1} ' 'to {2}').format(student.profile.name, _display_unit(unit), original_due_date_str))
Compound
4
def get_pos_tagger(self): from nltk.corpus import brown regexp_tagger = RegexpTagger( [ (r"^-?[0-9]+(.[0-9]+)?$", "CD"), # cardinal numbers (r"(The|the|A|a|An|an)$", "AT"), # articles (r".*able$", "JJ"), # adjectives (r".*ness$", "NN"), # nouns formed from adjectives (r".*ly$", "RB"), # adverbs (r".*s$", "NNS"), # plural nouns (r".*ing$", "VBG"), # gerunds (r".*ed$", "VBD"), # past tense verbs (r".*", "NN"), # nouns (default) ] ) brown_train = brown.tagged_sents(categories="news") unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger) bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger) trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger) # Override particular words main_tagger = RegexpTagger( [(r"(A|a|An|an)$", "ex_quant"), (r"(Every|every|All|all)$", "univ_quant")], backoff=trigram_tagger, ) return main_tagger
Base
1
def parse_http_message(kind, buf): if buf._end: return None try: start_line = buf.readline() except EOFError: return None msg = kind() msg.raw = start_line if kind is HttpRequest: assert re.match( br".+ HTTP/\d\.\d\r\n$", start_line ), "Start line does not look like HTTP request: " + repr(start_line) msg.method, msg.uri, msg.proto = start_line.rstrip().decode().split(" ", 2) assert msg.proto.startswith("HTTP/"), repr(start_line) elif kind is HttpResponse: assert re.match( br"^HTTP/\d\.\d \d+ .+\r\n$", start_line ), "Start line does not look like HTTP response: " + repr(start_line) msg.proto, msg.status, msg.reason = start_line.rstrip().decode().split(" ", 2) msg.status = int(msg.status) assert msg.proto.startswith("HTTP/"), repr(start_line) else: raise Exception("Use HttpRequest or HttpResponse .from_{bytes,buffered}") msg.version = msg.proto[5:] while True: line = buf.readline() msg.raw += line line = line.rstrip() if not line: break t = line.decode().split(":", 1) msg.headers[t[0].lower()] = t[1].lstrip() content_length_string = msg.headers.get("content-length", "") if content_length_string.isdigit(): content_length = int(content_length_string) msg.body = msg.body_raw = buf.read(content_length) elif msg.headers.get("transfer-encoding") == "chunked": raise NotImplemented elif msg.version == "1.0": msg.body = msg.body_raw = buf.readall() else: msg.body = msg.body_raw = b"" msg.raw += msg.body_raw return msg
Class
2
def get_vars_next(self): next = current.request.vars._next host = current.request.env.http_host if isinstance(next, (list, tuple)): next = next[0] if next and self.settings.prevent_open_redirect_attacks: return self.prevent_open_redirect(next, host) return next or None
Base
1
def test_slot_policy_scrapy_default(): mw = _get_mw() req = scrapy.Request("http://example.com", meta = {'splash': { 'slot_policy': scrapy_splash.SlotPolicy.SCRAPY_DEFAULT }}) req = mw.process_request(req, None) assert 'download_slot' not in req.meta
Class
2
def test_received_no_doublecr(self): data = b"""\ GET /foobar HTTP/8.4 """ result = self.parser.received(data) self.assertEqual(result, 21) self.assertFalse(self.parser.completed) self.assertEqual(self.parser.headers, {})
Base
1
def get_imports(self, *, prefix: str) -> Set[str]: """ Get a set of import strings that should be included when this property is used somewhere Args: prefix: A prefix to put before any relative (local) module names. """ imports = super().get_imports(prefix=prefix) imports.update({"from datetime import datetime", "from typing import cast"}) return imports
Base
1
def test_login_unknown_code(self): response = self.client.post('/accounts/login/code/', { 'code': 'unknown', }) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['form'].errors, { 'code': ['Login code is invalid. It might have expired.'], })
Base
1
def _returndata_encoding(contract_sig): if contract_sig.is_from_json: return Encoding.JSON_ABI return Encoding.ABI
Base
1
def test_underscore_traversal(self): # Prevent traversal to names starting with an underscore (_) ec = self._makeContext() with self.assertRaises(NotFound): ec.evaluate("context/__class__") with self.assertRaises(NotFound): ec.evaluate("nocall: random/_itertools/repeat") with self.assertRaises(NotFound): ec.evaluate("random/_itertools/repeat/foobar")
Base
1
def serialize(self, bundle, format='application/json', options={}): """ Given some data and a format, calls the correct method to serialize the data and returns the result. """ desired_format = None for short_format, long_format in self.content_types.items(): if format == long_format: if hasattr(self, "to_%s" % short_format): desired_format = short_format break if desired_format is None: raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format) serialized = getattr(self, "to_%s" % desired_format)(bundle, options) return serialized
Class
2
def publish(request, category_id=None): if category_id: get_object_or_404( Category.objects.visible(), pk=category_id) user = request.user form = TopicForm( user=user, data=post_data(request), initial={'category': category_id}) cform = CommentForm( user=user, data=post_data(request)) if (is_post(request) and all([form.is_valid(), cform.is_valid()]) and not request.is_limited()): if not user.st.update_post_hash(form.get_topic_hash()): return redirect( request.POST.get('next', None) or form.get_category().get_absolute_url()) # wrap in transaction.atomic? topic = form.save() cform.topic = topic comment = cform.save() comment_posted(comment=comment, mentions=cform.mentions) return redirect(topic.get_absolute_url()) return render( request=request, template_name='spirit/topic/publish.html', context={'form': form, 'cform': cform})
Base
1
def sql_insert(self, sentence): self.cursor.execute(sentence) self.conn.commit() return True
Base
1
def test_list_entrance_exam_instructor_tasks_all_student(self): """ Test list task history for entrance exam AND all student. """ url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)}) response = self.client.get(url, {}) self.assertEqual(response.status_code, 200) # check response tasks = json.loads(response.content)['tasks'] self.assertEqual(len(tasks), 0)
Compound
4
def from_plist(self, content): """ Given some binary plist data, returns a Python dictionary of the decoded data. """ if biplist is None: raise ImproperlyConfigured("Usage of the plist aspects requires biplist.") return biplist.readPlistFromString(content)
Class
2
def is_writable(dir): """Determine whether a given directory is writable in a portable manner. Parameters ---------- dir : str A string represeting a path to a directory on the filesystem. Returns ------- res : bool True or False. """ if not os.path.isdir(dir): return False # Do NOT use a hardcoded name here due to the danger from race conditions # on NFS when multiple processes are accessing the same base directory in # parallel. We use both hostname and pocess id for the prefix in an # attempt to ensure that there can really be no name collisions (tempfile # appends 6 random chars to this prefix). prefix = 'dummy_%s_%s_' % (socket.gethostname(),os.getpid()) try: tmp = tempfile.TemporaryFile(prefix=prefix,dir=dir) except OSError: return False # The underlying file is destroyed upon closing the file object (under # *nix, it was unlinked at creation time) tmp.close() return True
Class
2
def test_reset_entrance_exam_student_attempts_deletall(self): """ Make sure no one can delete all students state on entrance exam. """ url = reverse('reset_student_attempts_for_entrance_exam', kwargs={'course_id': unicode(self.course.id)}) response = self.client.get(url, { 'all_students': True, 'delete_module': True, }) self.assertEqual(response.status_code, 400)
Compound
4
def _checknetloc(netloc): if not netloc or netloc.isascii(): return # looking for characters like \u2100 that expand to 'a/c' # IDNA uses NFKC equivalence, so normalize for this check import unicodedata n = netloc.rpartition('@')[2] # ignore anything to the left of '@' n = n.replace(':', '') # ignore characters already included n = n.replace('#', '') # but not the surrounding text n = n.replace('?', '') netloc2 = unicodedata.normalize('NFKC', n) if n == netloc2: return for c in '/?#@:': if c in netloc2: raise ValueError("netloc '" + netloc + "' contains invalid " + "characters under NFKC normalization")
Class
2
def list_users(): off = int(request.args.get("offset") or 0) limit = int(request.args.get("limit") or 10) search = request.args.get("search") sort = request.args.get("sort", "id") state = None if sort == "state": state = json.loads(request.args.get("state", "[]")) else: if sort not in ub.User.__table__.columns.keys(): sort = "id" order = request.args.get("order", "").lower() if sort != "state" and order: order = text(sort + " " + order) elif not state: order = ub.User.id.asc() all_user = ub.session.query(ub.User) if not config.config_anonbrowse: all_user = all_user.filter(ub.User.role.op('&')(constants.ROLE_ANONYMOUS) != constants.ROLE_ANONYMOUS) total_count = filtered_count = all_user.count() if search: all_user = all_user.filter(or_(func.lower(ub.User.name).ilike("%" + search + "%"), func.lower(ub.User.kindle_mail).ilike("%" + search + "%"), func.lower(ub.User.email).ilike("%" + search + "%"))) if state: users = calibre_db.get_checkbox_sorted(all_user.all(), state, off, limit, request.args.get("order", "").lower()) else: users = all_user.order_by(order).offset(off).limit(limit).all() if search: filtered_count = len(users) for user in users: if user.default_language == "all": user.default = _("All") else: user.default = LC.parse(user.default_language).get_language_name(get_locale()) table_entries = {'totalNotFiltered': total_count, 'total': filtered_count, "rows": users} js_list = json.dumps(table_entries, cls=db.AlchemyEncoder) response = make_response(js_list) response.headers["Content-Type"] = "application/json; charset=utf-8" return response
Base
1
def __init__(self, hs): super().__init__(hs) self.hs = hs self.is_mine_id = hs.is_mine_id self.http_client = hs.get_simple_http_client() self._presence_enabled = hs.config.use_presence # The number of ongoing syncs on this process, by user id. # Empty if _presence_enabled is false. self._user_to_num_current_syncs = {} # type: Dict[str, int] self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() # user_id -> last_sync_ms. Lists the users that have stopped syncing # but we haven't notified the master of that yet self.users_going_offline = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) self._send_stop_syncing_loop = self.clock.looping_call( self.send_stop_syncing, UPDATE_SYNCING_USERS_MS ) hs.get_reactor().addSystemEventTrigger( "before", "shutdown", run_as_background_process, "generic_presence.on_shutdown", self._on_shutdown, )
Base
1
def check_prereg_key_and_redirect( request: HttpRequest, confirmation_key: str, full_name: Optional[str] = REQ(default=None)
Base
1
def test_spinal_case(): assert utils.spinal_case("keep_alive") == "keep-alive"
Base
1
def parse(source, filename='<unknown>', mode='exec'): """ Parse the source into an AST node. Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). """ return compile(source, filename, mode, PyCF_ONLY_AST)
Base
1
def stmt(self, stmt, msg=None): mod = ast.Module([stmt]) self.mod(mod, msg)
Base
1
def __init__(self): self.reqparse = reqparse.RequestParser() super(AuthenticatedService, self).__init__() self.auth_dict = dict() if current_user.is_authenticated(): roles_marshal = [] for role in current_user.roles: roles_marshal.append(marshal(role.__dict__, ROLE_FIELDS)) roles_marshal.append({"name": current_user.role}) for role in RBACRole.roles[current_user.role].get_parents(): roles_marshal.append({"name": role.name}) self.auth_dict = { "authenticated": True, "user": current_user.email, "roles": roles_marshal } else: if app.config.get('FRONTED_BY_NGINX'): url = "https://{}:{}{}".format(app.config.get('FQDN'), app.config.get('NGINX_PORT'), '/login') else: url = "http://{}:{}{}".format(app.config.get('FQDN'), app.config.get('API_PORT'), '/login') self.auth_dict = { "authenticated": False, "user": None, "url": url }
Base
1
def close_or_open(request, pk, close=True): # todo: moderators should be able to close it poll = get_object_or_404( CommentPoll, pk=pk, comment__user=request.user ) if close: close_at = timezone.now() else: close_at = None (CommentPoll.objects .filter(pk=poll.pk) .update(close_at=close_at)) return redirect(request.GET.get('next', poll.get_absolute_url()))
Base
1
def testSpoofedHeadersDropped(self): data = b"""\ GET /foobar HTTP/8.4 x-auth_user: bob content-length: 7 Hello. """ self.feed(data) self.assertTrue(self.parser.completed) self.assertEqual(self.parser.headers, {"CONTENT_LENGTH": "7",})
Base
1
def _get_insert_token(token): """Returns either a whitespace or the line breaks from token.""" # See issue484 why line breaks should be preserved. m = re.search(r'((\r\n|\r|\n)+) *$', token.value) if m is not None: return sql.Token(T.Whitespace.Newline, m.groups()[0]) else: return sql.Token(T.Whitespace, ' ')
Class
2
def show_book(book_id): entries = calibre_db.get_book_read_archived(book_id, config.config_read_column, allow_show_archived=True) if entries: read_book = entries[1] archived_book = entries[2] entry = entries[0] entry.read_status = read_book == ub.ReadBook.STATUS_FINISHED entry.is_archived = archived_book for index in range(0, len(entry.languages)): entry.languages[index].language_name = isoLanguages.get_language_name(get_locale(), entry.languages[ index].lang_code) cc = get_cc_columns(filter_config_custom_read=True) book_in_shelfs = [] shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == book_id).all() for sh in shelfs: book_in_shelfs.append(sh.shelf) entry.tags = sort(entry.tags, key=lambda tag: tag.name) entry.ordered_authors = calibre_db.order_authors([entry]) entry.kindle_list = check_send_to_kindle(entry) entry.reader_list = check_read_formats(entry) entry.audioentries = [] for media_format in entry.data: if media_format.format.lower() in constants.EXTENSIONS_AUDIO: entry.audioentries.append(media_format.format.lower()) return render_title_template('detail.html', entry=entry, cc=cc, is_xhr=request.headers.get('X-Requested-With')=='XMLHttpRequest', title=entry.title, books_shelfs=book_in_shelfs, page="book") else: log.debug(u"Oops! Selected book title is unavailable. File does not exist or is not accessible") flash(_(u"Oops! Selected book title is unavailable. File does not exist or is not accessible"), category="error") return redirect(url_for("web.index"))
Base
1
def _load_from(self, data: bytes) -> None: if data.strip() == b'': data = XMP_EMPTY # on some platforms lxml chokes on empty documents def basic_parser(xml): return parse(BytesIO(xml)) def strip_illegal_bytes_parser(xml): return parse(BytesIO(re_xml_illegal_bytes.sub(b'', xml))) def recovery_parser(xml): parser = XMLParser(recover=True) return parse(BytesIO(xml), parser) def replace_with_empty_xmp(_xml=None): log.warning("Error occurred parsing XMP, replacing with empty XMP.") return basic_parser(XMP_EMPTY) if self.overwrite_invalid_xml: parsers: Iterable[Callable] = [ basic_parser, strip_illegal_bytes_parser, recovery_parser, replace_with_empty_xmp, ] else: parsers = [basic_parser] for parser in parsers: try: self._xmp = parser(data) except (XMLSyntaxError if self.overwrite_invalid_xml else NeverRaise) as e: if str(e).startswith("Start tag expected, '<' not found") or str( e ).startswith("Document is empty"): self._xmp = replace_with_empty_xmp() break else: break try: pis = self._xmp.xpath('/processing-instruction()') for pi in pis: etree.strip_tags(self._xmp, pi.tag) self._get_rdf_root() except (Exception if self.overwrite_invalid_xml else NeverRaise) as e: log.warning("Error occurred parsing XMP", exc_info=e) self._xmp = replace_with_empty_xmp() return
Base
1
def edit_all_cc_data(book_id, book, to_save): cc = calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all() return edit_cc_data(book_id, book, to_save, cc)
Base
1
def test_parse_header_11_expect_continue(self): data = b"GET /foobar HTTP/1.1\nexpect: 100-continue" self.parser.parse_header(data) self.assertEqual(self.parser.expect_continue, True)
Base
1
def auth_ldap_server(self): return self.appbuilder.get_app.config["AUTH_LDAP_SERVER"]
Class
2
def testPeek(self): with ops.Graph().as_default() as G: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.int32, name='x') pi = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) p = array_ops.placeholder(dtypes.int32, name='p') with ops.device(test.gpu_device_name()): stager = data_flow_ops.MapStagingArea( [ dtypes.int32, ], shapes=[[]]) stage = stager.put(pi, [x], [0]) peek = stager.peek(gi) size = stager.size() G.finalize() n = 10 with self.session(graph=G) as sess: for i in range(n): sess.run(stage, feed_dict={x: i, pi: i}) for i in range(n): self.assertTrue(sess.run(peek, feed_dict={gi: i})[0] == i) self.assertTrue(sess.run(size) == 10)
Base
1
def set_bookmark(book_id, book_format): bookmark_key = request.form["bookmark"] ub.session.query(ub.Bookmark).filter(and_(ub.Bookmark.user_id == int(current_user.id), ub.Bookmark.book_id == book_id, ub.Bookmark.format == book_format)).delete() if not bookmark_key: ub.session_commit() return "", 204 lbookmark = ub.Bookmark(user_id=current_user.id, book_id=book_id, format=book_format, bookmark_key=bookmark_key) ub.session.merge(lbookmark) ub.session_commit("Bookmark for user {} in book {} created".format(current_user.id, book_id)) return "", 201
Base
1
def _iterate_over_text( tree: "etree.Element", *tags_to_ignore: Union[str, "etree.Comment"]
Class
2
def model_from_config(config, custom_objects=None): """Instantiates a Keras model from its config. Usage: ``` # for a Functional API model tf.keras.Model().from_config(model.get_config()) # for a Sequential model tf.keras.Sequential().from_config(model.get_config()) ``` Args: config: Configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: TypeError: if `config` is not a dictionary. """ if isinstance(config, list): raise TypeError('`model_from_config` expects a dictionary, not a list. ' 'Maybe you meant to use ' '`Sequential.from_config(config)`?') from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top return deserialize(config, custom_objects=custom_objects)
Base
1
def require_post_params(*args, **kwargs): """ Checks for required parameters or renders a 400 error. (decorator with arguments) Functions like 'require_query_params', but checks for POST parameters rather than GET parameters. """ required_params = [] required_params += [(arg, None) for arg in args] required_params += [(key, kwargs[key]) for key in kwargs] # required_params = e.g. [('action', 'enroll or unenroll'), ['emails', None]] def decorator(func): # pylint: disable=missing-docstring def wrapped(*args, **kwargs): # pylint: disable=missing-docstring request = args[0] error_response_data = { 'error': 'Missing required query parameter(s)', 'parameters': [], 'info': {}, } for (param, extra) in required_params: default = object() if request.POST.get(param, default) == default: error_response_data['parameters'].append(param) error_response_data['info'][param] = extra if len(error_response_data['parameters']) > 0: return JsonResponse(error_response_data, status=400) else: return func(*args, **kwargs) return wrapped return decorator
Compound
4
def group_title(value: str) -> str: value = re.sub(r"([A-Z]{2,})([A-Z][a-z]|[ -_]|$)", lambda m: m.group(1).title() + m.group(2), value.strip()) value = re.sub(r"(^|[ _-])([A-Z])", lambda m: m.group(1) + m.group(2).lower(), value) return value
Base
1
def __init__(self, text, book): self.text = text self.book = book
Base
1
def main(req: func.HttpRequest) -> func.HttpResponse: response = ok( Info( resource_group=get_base_resource_group(), region=get_base_region(), subscription=get_subscription(), versions=versions(), instance_id=get_instance_id(), insights_appid=get_insights_appid(), insights_instrumentation_key=get_insights_instrumentation_key(), ) ) return response
Class
2
def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: """Tests if value matches glob. Args: glob value: String to test against glob. word_boundary: Whether to match against word boundaries or entire string. Defaults to False. """ try: r = regex_cache.get((glob, True, word_boundary), None) if not r: r = _glob_to_re(glob, word_boundary) regex_cache[(glob, True, word_boundary)] = r return bool(r.search(value)) except re.error: logger.warning("Failed to parse glob to regex: %r", glob) return False
Base
1
def testPartialIndexGets(self): with ops.Graph().as_default() as G: with ops.device('/cpu:0'): x = array_ops.placeholder(dtypes.float32) f = array_ops.placeholder(dtypes.float32) v = array_ops.placeholder(dtypes.float32) pi = array_ops.placeholder(dtypes.int64) pei = array_ops.placeholder(dtypes.int64) gi = array_ops.placeholder(dtypes.int64) with ops.device(test.gpu_device_name()): # Test again with partial index gets stager = data_flow_ops.MapStagingArea( [dtypes.float32, dtypes.float32, dtypes.float32]) stage_xvf = stager.put(pi, [x, v, f], [0, 1, 2]) key_xf, get_xf = stager.get(gi, [0, 2]) key_v, get_v = stager.get(gi, [1]) size = stager.size() isize = stager.incomplete_size() G.finalize() with self.session(graph=G) as sess: # Stage complete tuple sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3}) self.assertTrue(sess.run([size, isize]) == [1, 0]) # Partial get using indices self.assertTrue( sess.run([key_xf, get_xf], feed_dict={ gi: 0 }) == [0, [1, 2]]) # Still some of key 0 left self.assertTrue(sess.run([size, isize]) == [1, 0]) # Partial get of remaining index self.assertTrue(sess.run([key_v, get_v], feed_dict={gi: 0}) == [0, [3]]) # All gone self.assertTrue(sess.run([size, isize]) == [0, 0])
Base
1
def extension_element_from_string(xml_string): element_tree = ElementTree.fromstring(xml_string) return _extension_element_from_element_tree(element_tree)
Base
1
def to_html(self, data, options=None): """ Reserved for future usage. The desire is to provide HTML output of a resource, making an API available to a browser. This is on the TODO list but not currently implemented. """ options = options or {} return 'Sorry, not implemented yet. Please append "?format=json" to your URL.'
Class
2
def test_keepalive_http10_explicit(self): # If header Connection: Keep-Alive is explicitly sent, # we want to keept the connection open, we also need to return # the corresponding header data = "Keep me alive" s = tobytes( "GET / HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) connection = response.getheader("Connection", "") self.assertEqual(connection, "Keep-Alive")
Base
1
def test_filelike_longcl_http11(self): to_send = "GET /filelike_longcl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) ct = headers["content-type"] self.assertEqual(ct, "image/jpeg") self.assertTrue(b"\377\330\377" in response_body)
Base
1
def rename_all_authors(first_author, renamed_author, calibre_path="", localbook=None, gdrive=False): # Create new_author_dir from parameter or from database # Create new title_dir from database and add id if first_author: new_authordir = get_valid_filename(first_author, chars=96) for r in renamed_author: new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first() old_author_dir = get_valid_filename(r, chars=96) new_author_rename_dir = get_valid_filename(new_author.name, chars=96) if gdrive: gFile = gd.getFileFromEbooksFolder(None, old_author_dir) if gFile: gd.moveGdriveFolderRemote(gFile, new_author_rename_dir) else: if os.path.isdir(os.path.join(calibre_path, old_author_dir)): try: old_author_path = os.path.join(calibre_path, old_author_dir) new_author_path = os.path.join(calibre_path, new_author_rename_dir) shutil.move(os.path.normcase(old_author_path), os.path.normcase(new_author_path)) except (OSError) as ex: log.error("Rename author from: %s to %s: %s", old_author_path, new_author_path, ex) log.debug(ex, exc_info=True) return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s", src=old_author_path, dest=new_author_path, error=str(ex)) else: new_authordir = get_valid_filename(localbook.authors[0].name, chars=96) return new_authordir
Base
1
def extract_messages(obj_list): """ Extract "messages" from a list of exceptions or other objects. For ValidationErrors, `messages` are flattened into the output. For Exceptions, `args[0]` is added into the output. For other objects, `force_text` is called. :param obj_list: List of exceptions etc. :type obj_list: Iterable[object] :rtype: Iterable[str] """ for obj in obj_list: if isinstance(obj, ValidationError): for msg in obj.messages: yield force_text(msg) continue if isinstance(obj, Exception): if len(obj.args): yield force_text(obj.args[0]) continue yield force_text(obj)
Base
1
def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver("server", http_client=None) self.store = hs.get_datastore() return hs
Base
1
def update(request, pk): comment = Comment.objects.for_update_or_404(pk, request.user) form = CommentForm(data=post_data(request), instance=comment) if is_post(request) and form.is_valid(): pre_comment_update(comment=Comment.objects.get(pk=comment.pk)) comment = form.save() post_comment_update(comment=comment) return redirect(request.POST.get('next', comment.get_absolute_url())) return render( request=request, template_name='spirit/comment/update.html', context={'form': form})
Base
1
def test_without_crlf(self): data = "Echo\nthis\r\nplease" s = tobytes( "GET / HTTP/1.0\n" "Connection: close\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) fp = self.sock.makefile("rb", 0) line, headers, echo = self._read_echo(fp) self.assertline(line, "200", "OK", "HTTP/1.0") self.assertEqual(int(echo.content_length), len(data)) self.assertEqual(len(echo.body), len(data)) self.assertEqual(echo.body, tobytes(data))
Base
1
def test_started_typing_remote_send(self): self.room_members = [U_APPLE, U_ONION] self.get_success( self.handler.started_typing( target_user=U_APPLE, requester=create_requester(U_APPLE), room_id=ROOM_ID, timeout=20000, ) ) put_json = self.hs.get_http_client().put_json put_json.assert_called_once_with( "farm", path="/_matrix/federation/v1/send/1000000", data=_expect_edu_transaction( "m.typing", content={ "room_id": ROOM_ID, "user_id": U_APPLE.to_string(), "typing": True, }, ), json_data_callback=ANY, long_retries=True, backoff_on_404=True, try_trailing_slash_on_400=True, )
Base
1
def test_reset_student_attempts_single(self): """ Test reset single student attempts. """ url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()}) response = self.client.get(url, { 'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.student.email, }) self.assertEqual(response.status_code, 200) # make sure problem attempts have been reset. changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk) self.assertEqual( json.loads(changed_module.state)['attempts'], 0 )
Compound
4
def test_request_body_too_large_with_wrong_cl_http10(self): body = "a" * self.toobig to_send = "GET / HTTP/1.0\n" "Content-Length: 5\n\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb") # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, "200", "OK", "HTTP/1.0") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) # server trusts the content-length header; no pipelining, # so request fulfilled, extra bytes are thrown away # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp)
Base
1
def test_before_start_response_http_11_close(self): to_send = tobytes( "GET /before_start_response HTTP/1.1\n" "Connection: close\n\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile("rb", 0) line, headers, response_body = read_http(fp) self.assertline(line, "500", "Internal Server Error", "HTTP/1.1") cl = int(headers["content-length"]) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b"Internal Server Error")) self.assertEqual( sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"], ) self.assertEqual(headers["connection"], "close") # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp)
Base
1
def testInputPreProcessFormats(self): input_str = 'input1=/path/file.txt[ab3];input2=file2' input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]' input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str) input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string( input_expr_str) self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3')) self.assertTrue(input_dict['input2'] == ('file2', None)) print(input_expr_dict['input3']) self.assertAllClose(input_expr_dict['input3'], np.zeros([2, 2])) self.assertAllClose(input_expr_dict['input4'], [4, 5]) self.assertTrue(len(input_dict) == 2) self.assertTrue(len(input_expr_dict) == 2)
Base
1
def get_info(path: str, root: pathlib.Path) -> typing.Tuple[ pathlib.Path, dict]:
Base
1
def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host). Always returns ``False`` on an empty url. """ if not url: return False netloc = urllib_parse.urlparse(url)[1] return not netloc or netloc == host
Base
1
def build_key(self, filename, entry, metadata): """ generates a new key according the the specification """ type = self.key_specs[entry.get('name')]['type'] bits = self.key_specs[entry.get('name')]['bits'] if type == 'rsa': cmd = "openssl genrsa %s " % bits elif type == 'dsa': cmd = "openssl dsaparam -noout -genkey %s" % bits key = Popen(cmd, shell=True, stdout=PIPE).stdout.read() return key
Class
2
def __init__( self, cache, safe=safename
Class
2
def _handle_carbon_received(self, msg): self.xmpp.event('carbon_received', msg)
Class
2
def feed_booksindex(): shift = 0 off = int(request.args.get("offset") or 0) entries = calibre_db.session.query(func.upper(func.substr(db.Books.sort, 1, 1)).label('id'))\ .filter(calibre_db.common_filters()).group_by(func.upper(func.substr(db.Books.sort, 1, 1))).all() elements = [] if off == 0: elements.append({'id': "00", 'name':_("All")}) shift = 1 for entry in entries[ off + shift - 1: int(off + int(config.config_books_per_page) - shift)]: elements.append({'id': entry.id, 'name': entry.id}) pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, len(entries) + 1) return render_xml_template('feed.xml', letterelements=elements, folder='opds.feed_letter_books', pagination=pagination)
Base
1
def _create_database(self, last_upgrade_to_run): """ Make sure that the database is created and sets the file permissions. This should be done before storing any sensitive data in it. """ # Create the tables in the database conn = self._connect() try: with conn: self._create_tables(conn, last_upgrade_to_run) finally: conn.close() # Set the file permissions os.chmod(self.filename, stat.S_IRUSR | stat.S_IWUSR)
Base
1
def main(req: func.HttpRequest) -> func.HttpResponse: response = ok( Info( resource_group=get_base_resource_group(), region=get_base_region(), subscription=get_subscription(), versions=versions(), instance_id=get_instance_id(), insights_appid=get_insights_appid(), insights_instrumentation_key=get_insights_instrumentation_key(), ) ) return response
Class
2
def _get_element_ptr_tuplelike(parent, key): typ = parent.typ assert isinstance(typ, TupleLike) if isinstance(typ, StructType): assert isinstance(key, str) subtype = typ.members[key] attrs = list(typ.tuple_keys()) index = attrs.index(key) annotation = key else: assert isinstance(key, int) subtype = typ.members[key] attrs = list(range(len(typ.members))) index = key annotation = None # generated by empty() + make_setter if parent.value == "~empty": return IRnode.from_list("~empty", typ=subtype) if parent.value == "multi": assert parent.encoding != Encoding.ABI, "no abi-encoded literals" return parent.args[index] ofst = 0 # offset from parent start if parent.encoding in (Encoding.ABI, Encoding.JSON_ABI): if parent.location == STORAGE: raise CompilerPanic("storage variables should not be abi encoded") # pragma: notest member_t = typ.members[attrs[index]] for i in range(index): member_abi_t = typ.members[attrs[i]].abi_type ofst += member_abi_t.embedded_static_size() return _getelemptr_abi_helper(parent, member_t, ofst) if parent.location.word_addressable: for i in range(index): ofst += typ.members[attrs[i]].storage_size_in_words elif parent.location.byte_addressable: for i in range(index): ofst += typ.members[attrs[i]].memory_bytes_required else: raise CompilerPanic(f"bad location {parent.location}") # pragma: notest return IRnode.from_list( add_ofst(parent, ofst), typ=subtype, location=parent.location, encoding=parent.encoding, annotation=annotation, )
Class
2
def parse_soap_enveloped_saml_thingy(text, expected_tags): """Parses a SOAP enveloped SAML thing and returns the thing as a string. :param text: The SOAP object as XML string :param expected_tags: What the tag of the SAML thingy is expected to be. :return: SAML thingy as a string """ envelope = ElementTree.fromstring(text) # Make sure it's a SOAP message assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE assert len(envelope) >= 1 body = None for part in envelope: if part.tag == '{%s}Body' % soapenv.NAMESPACE: assert len(part) == 1 body = part break if body is None: return "" saml_part = body[0] if saml_part.tag in expected_tags: return ElementTree.tostring(saml_part, encoding="UTF-8") else: raise WrongMessageType("Was '%s' expected one of %s" % (saml_part.tag, expected_tags))
Base
1
def test_can_read_token_from_query_parameters(self): """Tests that Sydent correct extracts an auth token from query parameters""" self.sydent.run() request, _ = make_request( self.sydent.reactor, "GET", "/_matrix/identity/v2/hash_details?access_token=" + self.test_token ) token = tokenFromRequest(request) self.assertEqual(token, self.test_token)
Base
1
def contains(field: Term, value: str) -> Criterion: return field.like(f"%{value}%")
Base
1
def dataReceived(self, data: bytes) -> None: self.stream.write(data) self.length += len(data) if self.max_size is not None and self.length >= self.max_size: self.deferred.errback( SynapseError( 502, "Requested file is too large > %r bytes" % (self.max_size,), Codes.TOO_LARGE, ) ) self.deferred = defer.Deferred() self.transport.loseConnection()
Class
2
def test_datetime_parsing(value, result): if result == errors.DateTimeError: with pytest.raises(errors.DateTimeError): parse_datetime(value) else: assert parse_datetime(value) == result
Base
1
def test_basic_two_credentials(): # Test Basic Authentication with multiple sets of credentials http = httplib2.Http() password1 = tests.gen_password() password2 = tests.gen_password() allowed = [("joe", password1)] # exploit shared mutable list handler = tests.http_reflect_with_auth( allow_scheme="basic", allow_credentials=allowed ) with tests.server_request(handler, request_count=7) as uri: http.add_credentials("fred", password2) response, content = http.request(uri, "GET") assert response.status == 401 http.add_credentials("joe", password1) response, content = http.request(uri, "GET") assert response.status == 200 allowed[0] = ("fred", password2) response, content = http.request(uri, "GET") assert response.status == 200
Class
2
def test_login_get_non_idempotent(self): login_code = LoginCode.objects.create(user=self.user, code='foobar', next='/private/') response = self.client.get('/accounts/login/code/', { 'code': login_code.code, }) self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], '/private/') self.assertEqual(response.wsgi_request.user, self.user) self.assertFalse(LoginCode.objects.filter(pk=login_code.pk).exists())
Base
1
def test_urlsplit_normalization(self): # Certain characters should never occur in the netloc, # including under normalization. # Ensure that ALL of them are detected and cause an error illegal_chars = '/:#?@' hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars} denorm_chars = [ c for c in map(chr, range(128, sys.maxunicode)) if (hex_chars & set(unicodedata.decomposition(c).split())) and c not in illegal_chars ] # Sanity check that we found at least one such character self.assertIn('\u2100', denorm_chars) self.assertIn('\uFF03', denorm_chars) # bpo-36742: Verify port separators are ignored when they # existed prior to decomposition urllib.parse.urlsplit('http://\u30d5\u309a:80') with self.assertRaises(ValueError): urllib.parse.urlsplit('http://\u30d5\u309a\ufe1380') for scheme in ["http", "https", "ftp"]: for c in denorm_chars: url = "{}://netloc{}false.netloc/path".format(scheme, c) with self.subTest(url=url, char='{:04X}'.format(ord(c))): with self.assertRaises(ValueError): urllib.parse.urlsplit(url)
Class
2
def formatType(self): format_type = self.type.lower() if format_type == 'amazon': return u"Amazon" elif format_type.startswith("amazon_"): return u"Amazon.{0}".format(format_type[7:]) elif format_type == "isbn": return u"ISBN" elif format_type == "doi": return u"DOI" elif format_type == "douban": return u"Douban" elif format_type == "goodreads": return u"Goodreads" elif format_type == "babelio": return u"Babelio" elif format_type == "google": return u"Google Books" elif format_type == "kobo": return u"Kobo" elif format_type == "litres": return u"ЛитРес" elif format_type == "issn": return u"ISSN" elif format_type == "isfdb": return u"ISFDB" if format_type == "lubimyczytac": return u"Lubimyczytac" else: return self.type
Base
1
def _w(self, content): self.output.append(format_data(content, format_money_values=True))
Base
1
def test_change_to_invalid_due_date(self): url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()}) response = self.client.get(url, { 'student': self.user1.username, 'url': self.week1.location.to_deprecated_string(), 'due_datetime': '01/01/2009 00:00' }) self.assertEqual(response.status_code, 400, response.content) self.assertEqual( None, get_extended_due(self.course, self.week1, self.user1) )
Compound
4
def __init__( self, credentials, host, request_uri, headers, response, content, http
Class
2
def test_basic(): # Test Basic Authentication http = httplib2.Http() password = tests.gen_password() handler = tests.http_reflect_with_auth( allow_scheme="basic", allow_credentials=(("joe", password),) ) with tests.server_request(handler, request_count=3) as uri: response, content = http.request(uri, "GET") assert response.status == 401 http.add_credentials("joe", password) response, content = http.request(uri, "GET") assert response.status == 200
Class
2
def test_received_headers_finished_expect_continue_true_sent_true(self): inst, sock, map = self._makeOneWithMap() inst.server = DummyServer() preq = DummyParser() inst.request = preq preq.expect_continue = True preq.headers_finished = True preq.completed = False preq.empty = False inst.sent_continue = True inst.received(b"GET / HTTP/1.1\n\n") self.assertEqual(inst.request, preq) self.assertEqual(inst.server.tasks, []) self.assertEqual(sock.sent, b"") self.assertEqual(inst.sent_continue, True) self.assertEqual(preq.completed, False)
Base
1
def glob_to_regex(glob): """Converts a glob to a compiled regex object. The regex is anchored at the beginning and end of the string. Args: glob (str) Returns: re.RegexObject """ res = "" for c in glob: if c == "*": res = res + ".*" elif c == "?": res = res + "." else: res = res + re.escape(c) # \A anchors at start of string, \Z at end of string return re.compile(r"\A" + res + r"\Z", re.IGNORECASE)
Base
1
def parse_line(s): s = s.rstrip() r = re.sub(REG_LINE_GPERF, '', s) if r != s: return r r = re.sub(REG_HASH_FUNC, 'hash(OnigCodePoint codes[])', s) if r != s: return r r = re.sub(REG_STR_AT, 'onig_codes_byte_at(codes, \\1)', s) if r != s: return r r = re.sub(REG_UNFOLD_KEY, 'unicode_unfold_key(OnigCodePoint code)', s) if r != s: return r r = re.sub(REG_ENTRY, '{\\1, \\2, \\3}', s) if r != s: return r r = re.sub(REG_EMPTY_ENTRY, '{0xffffffff, \\1, \\2}', s) if r != s: return r r = re.sub(REG_IF_LEN, 'if (0 == 0)', s) if r != s: return r r = re.sub(REG_GET_HASH, 'int key = hash(&code);', s) if r != s: return r r = re.sub(REG_GET_CODE, 'OnigCodePoint gcode = wordlist[key].code;', s) if r != s: return r r = re.sub(REG_CODE_CHECK, 'if (code == gcode)', s) if r != s: return r return s
Base
1
def move(request, topic_id): topic = get_object_or_404(Topic, pk=topic_id) form = CommentMoveForm(topic=topic, data=request.POST) if form.is_valid(): comments = form.save() for comment in comments: comment_posted(comment=comment, mentions=None) topic.decrease_comment_count() post_comment_move(comment=comment, topic=topic) else: messages.error(request, render_form_errors(form)) return redirect(request.POST.get('next', topic.get_absolute_url()))
Base
1
def get_recipe_from_file(self, file): recipe_html = file.getvalue().decode("utf-8") recipe_json, recipe_tree, html_data, images = get_recipe_from_source(recipe_html, 'CookBookApp', self.request) recipe = Recipe.objects.create( name=recipe_json['name'].strip(), created_by=self.request.user, internal=True, space=self.request.space) try: recipe.servings = re.findall('([0-9])+', recipe_json['recipeYield'])[0] except Exception as e: pass try: recipe.working_time = iso_duration_to_minutes(recipe_json['prepTime']) recipe.waiting_time = iso_duration_to_minutes(recipe_json['cookTime']) except Exception: pass step = Step.objects.create(instruction=recipe_json['recipeInstructions'], space=self.request.space, ) if 'nutrition' in recipe_json: step.instruction = step.instruction + '\n\n' + recipe_json['nutrition'] step.save() recipe.steps.add(step) ingredient_parser = IngredientParser(self.request, True) for ingredient in recipe_json['recipeIngredient']: f = ingredient_parser.get_food(ingredient['ingredient']['text']) u = ingredient_parser.get_unit(ingredient['unit']['text']) step.ingredients.add(Ingredient.objects.create( food=f, unit=u, amount=ingredient['amount'], note=ingredient['note'], space=self.request.space, )) if len(images) > 0: try: response = requests.get(images[0]) self.import_recipe_image(recipe, BytesIO(response.content)) except Exception as e: print('failed to import image ', str(e)) recipe.save() return recipe
Base
1
def test_received_headers_finished_expect_continue_true(self): inst, sock, map = self._makeOneWithMap() inst.server = DummyServer() preq = DummyParser() inst.request = preq preq.expect_continue = True preq.headers_finished = True preq.completed = False preq.empty = False inst.received(b"GET / HTTP/1.1\n\n") self.assertEqual(inst.request, preq) self.assertEqual(inst.server.tasks, []) self.assertEqual(sock.sent, b"HTTP/1.1 100 Continue\r\n\r\n") self.assertEqual(inst.sent_continue, True) self.assertEqual(preq.completed, False)
Base
1
def read_config(self, config, **kwargs): consent_config = config.get("user_consent") self.terms_template = self.read_templates(["terms.html"], autoescape=True)[0] if consent_config is None: return self.user_consent_version = str(consent_config["version"]) self.user_consent_template_dir = self.abspath(consent_config["template_dir"]) if not path.isdir(self.user_consent_template_dir): raise ConfigError( "Could not find template directory '%s'" % (self.user_consent_template_dir,) ) self.user_consent_server_notice_content = consent_config.get( "server_notice_content" ) self.block_events_without_consent_error = consent_config.get( "block_events_error" ) self.user_consent_server_notice_to_guests = bool( consent_config.get("send_server_notice_to_guests", False) ) self.user_consent_at_registration = bool( consent_config.get("require_at_registration", False) ) self.user_consent_policy_name = consent_config.get( "policy_name", "Privacy Policy" )
Class
2
def edit_book_series_index(series_index, book): # Add default series_index to book modif_date = False series_index = series_index or '1' if not series_index.replace('.', '', 1).isdigit(): flash(_("%(seriesindex)s is not a valid number, skipping", seriesindex=series_index), category="warning") return False if str(book.series_index) != series_index: book.series_index = series_index modif_date = True return modif_date
Base
1
def request( self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None,
Class
2
def validate_and_sanitize_search_inputs(fn, instance, args, kwargs): kwargs.update(dict(zip(fn.__code__.co_varnames, args))) sanitize_searchfield(kwargs['searchfield']) kwargs['start'] = cint(kwargs['start']) kwargs['page_len'] = cint(kwargs['page_len']) if kwargs['doctype'] and not frappe.db.exists('DocType', kwargs['doctype']): return [] return fn(**kwargs)
Base
1
def adv_search_extension(q, include_extension_inputs, exclude_extension_inputs): for extension in include_extension_inputs: q = q.filter(db.Books.data.any(db.Data.format == extension)) for extension in exclude_extension_inputs: q = q.filter(not_(db.Books.data.any(db.Data.format == extension))) return q
Base
1
def opds_download_link(book_id, book_format): # I gave up with this: With enabled ldap login, the user doesn't get logged in, therefore it's always guest # workaround, loading the user from the request and checking it's download rights here # in case of anonymous browsing user is None user = load_user_from_request(request) or current_user if not user.role_download(): return abort(403) if "Kobo" in request.headers.get('User-Agent'): client = "kobo" else: client = "" return get_download_link(book_id, book_format.lower(), client)
Base
1
async def ignore(self, ctx, command: str.lower): """ Ignore or unignore the specified action. The bot will no longer respond to these actions. """ try: custom = await self.config.guild(ctx.guild).get_raw("custom", command) except KeyError: custom = NotImplemented if custom is None: await self.config.guild(ctx.guild).clear_raw("custom", command) await ctx.send("I will no longer ignore the {command} action".format(command=command)) else: await self.config.guild(ctx.guild).set_raw("custom", command, value=None) await ctx.send("I will now ignore the {command} action".format(command=command))
Base
1
def parse(source, filename='<unknown>', mode='exec'): """ Parse the source into an AST node. Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). """ return compile(source, filename, mode, PyCF_ONLY_AST)
Base
1
def __init__(self, sourceName: str): self.sourceName = sourceName self.type = "file" self.content = None
Base
1
def feed_author(book_id): off = request.args.get("offset") or 0 entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.authors.any(db.Authors.id == book_id), [db.Books.timestamp.desc()]) return render_xml_template('feed.xml', entries=entries, pagination=pagination)
Base
1
def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) f.close() # NB. doesn't work with flush+seek, due to use of C stdio f = open(self.filename, 'rb') y = np.fromfile(f, dtype=self.dtype) f.close() assert_array_equal(y, self.x.flat) os.unlink(self.filename)
Class
2
def publisher_list(): if current_user.get_view_property('publisher', 'dir') == 'desc': order = db.Publishers.name.desc() order_no = 0 else: order = db.Publishers.name.asc() order_no = 1 if current_user.check_visibility(constants.SIDEBAR_PUBLISHER): entries = calibre_db.session.query(db.Publishers, func.count('books_publishers_link.book').label('count')) \ .join(db.books_publishers_link).join(db.Books).filter(calibre_db.common_filters()) \ .group_by(text('books_publishers_link.publisher')).order_by(order).all() charlist = calibre_db.session.query(func.upper(func.substr(db.Publishers.name, 1, 1)).label('char')) \ .join(db.books_publishers_link).join(db.Books).filter(calibre_db.common_filters()) \ .group_by(func.upper(func.substr(db.Publishers.name, 1, 1))).all() return render_title_template('list.html', entries=entries, folder='web.books_list', charlist=charlist, title=_(u"Publishers"), page="publisherlist", data="publisher", order=order_no) else: abort(404)
Base
1