desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'The commit_manually function also works with a using argument.'
| @skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
| with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually(using='default'):
Reporter.objects.create(first_name='Walter', last_name='Cronkite')
|
'Regression for #11900: If a block wrapped by commit_on_success
writes a transaction that can\'t be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.'
| @skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
| with self.assertRaises(IntegrityError):
with transaction.commit_on_success():
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
transaction.set_dirty()
transaction.rollback()
|
'Returns the default WSGI handler for the runner.'
| def get_handler(self, *args, **options):
| return get_internal_wsgi_application()
|
'Runs the server, using the autoreloader if needed'
| def run(self, *args, **options):
| use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, args, options)
else:
self.inner_run(*args, **options)
|
'Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.'
| def run_from_argv(self, argv):
| option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
|
'Modify the column name to make it Python-compatible as a field name'
| def normalize_col_name(self, col_name, used_column_names, is_relation):
| field_params = {}
field_notes = []
new_name = col_name.lower()
if (new_name != col_name):
field_notes.append(u'Field name made lowercase.')
if is_relation:
if new_name.endswith(u'_id'):
new_name = new_name[:(-3)]
else:
field_params[u'db_column'] = col_name
(new_name, num_repl) = re.subn(u'\\W', u'_', new_name)
if (num_repl > 0):
field_notes.append(u'Field renamed to remove unsuitable characters.')
if (new_name.find(u'__') >= 0):
while (new_name.find(u'__') >= 0):
new_name = new_name.replace(u'__', u'_')
if (col_name.lower().find(u'__') >= 0):
field_notes.append(u"Field renamed because it contained more than one '_' in a row.")
if new_name.startswith(u'_'):
new_name = (u'field%s' % new_name)
field_notes.append(u"Field renamed because it started with '_'.")
if new_name.endswith(u'_'):
new_name = (u'%sfield' % new_name)
field_notes.append(u"Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += u'_field'
field_notes.append(u'Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = (u'number_%s' % new_name)
field_notes.append(u"Field renamed because it wasn't a valid Python identifier.")
if (new_name in used_column_names):
num = 0
while ((u'%s_%d' % (new_name, num)) in used_column_names):
num += 1
new_name = (u'%s_%d' % (new_name, num))
field_notes.append(u'Field renamed because of name conflict.')
if ((col_name != new_name) and field_notes):
field_params[u'db_column'] = col_name
return (new_name, field_params, field_notes)
|
'Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.'
| def get_field_type(self, connection, table_name, row):
| field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = u'TextField'
field_notes.append(u'This field type is a guess.')
if (type(field_type) is tuple):
(field_type, new_params) = field_type
field_params.update(new_params)
if ((field_type == u'CharField') and row[3]):
field_params[u'max_length'] = row[3]
if (field_type == u'DecimalField'):
field_params[u'max_digits'] = row[4]
field_params[u'decimal_places'] = row[5]
return (field_type, field_params, field_notes)
|
'Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.'
| def get_meta(self, table_name):
| return [u' class Meta:', (u" db_table = '%s'" % table_name), u'']
|
'Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.'
| def get_version(self):
| return django.get_version()
|
'Return a brief description of how to use this command, by
default from the attribute ``self.help``.'
| def usage(self, subcommand):
| usage = ('%%prog %s [options] %s' % (subcommand, self.args))
if self.help:
return ('%s\n\n%s' % (usage, self.help))
else:
return usage
|
'Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.'
| def create_parser(self, prog_name, subcommand):
| return OptionParser(prog=prog_name, usage=self.usage(subcommand), version=self.get_version(), option_list=self.option_list)
|
'Print the help message for this command, derived from
``self.usage()``.'
| def print_help(self, prog_name, subcommand):
| parser = self.create_parser(prog_name, subcommand)
parser.print_help()
|
'Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr.'
| def run_from_argv(self, argv):
| parser = self.create_parser(argv[0], argv[1])
(options, args) = parser.parse_args(argv[2:])
handle_default_options(options)
try:
self.execute(*args, **options.__dict__)
except Exception as e:
stderr = getattr(self, 'stderr', OutputWrapper(sys.stderr, self.style.ERROR))
if options.traceback:
stderr.write(traceback.format_exc())
else:
stderr.write(('%s: %s' % (e.__class__.__name__, e)))
sys.exit(1)
|
'Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``, except if force-skipped).'
| def execute(self, *args, **options):
| saved_lang = None
self.stdout = OutputWrapper(options.get('stdout', sys.stdout))
self.stderr = OutputWrapper(options.get('stderr', sys.stderr), self.style.ERROR)
if self.can_import_settings:
from django.utils import translation
saved_lang = translation.get_language()
translation.activate('en-us')
try:
if (self.requires_model_validation and (not options.get('skip_validation'))):
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write(('\n' + self.style.SQL_KEYWORD('COMMIT;')))
finally:
if (saved_lang is not None):
translation.activate(saved_lang)
|
'Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.'
| def validate(self, app=None, display_num_errors=False):
| from django.core.management.validation import get_validation_errors
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError(('One or more models did not validate:\n%s' % error_text))
if display_num_errors:
self.stdout.write(('%s error%s found' % (num_errors, (((num_errors != 1) and 's') or ''))))
|
'The actual logic of the command. Subclasses must implement
this method.'
| def handle(self, *args, **options):
| raise NotImplementedError()
|
'Perform the command\'s actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.'
| def handle_app(self, app, **options):
| raise NotImplementedError()
|
'Perform the command\'s actions for ``label``, which will be the
string as given on the command line.'
| def handle_label(self, label, **options):
| raise NotImplementedError()
|
'Perform this command\'s actions.'
| def handle_noargs(self, **options):
| raise NotImplementedError()
|
'Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don\'t need to print the lax options.'
| def print_help(self):
| pass
|
'Output the basic options available to every command.
This just redirects to the default print_help() behavior.'
| def print_lax_help(self):
| OptionParser.print_help(self)
|
'Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.'
| def _process_args(self, largs, rargs, values):
| while rargs:
arg = rargs[0]
try:
if ((arg[0:2] == '--') and (len(arg) > 2)):
self._process_long_opt(rargs, values)
elif ((arg[:1] == '-') and (len(arg) > 1)):
self._process_short_opts(rargs, values)
else:
del rargs[0]
raise Exception
except:
largs.append(arg)
|
'Returns the script\'s main help text, as a string.'
| def main_help_text(self, commands_only=False):
| if commands_only:
usage = sorted(get_commands().keys())
else:
usage = ['', ("Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name), '', 'Available subcommands:']
commands_dict = collections.defaultdict((lambda : []))
for (name, app) in six.iteritems(get_commands()):
if (app == 'django.core'):
app = 'django'
else:
app = app.rpartition('.')[(-1)]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append('')
usage.append(style.NOTICE(('[%s]' % app)))
for name in sorted(commands_dict[app]):
usage.append((' %s' % name))
return '\n'.join(usage)
|
'Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can\'t be found.'
| def fetch_command(self, subcommand):
| try:
app_name = get_commands()[subcommand]
except KeyError:
sys.stderr.write(("Unknown command: %r\nType '%s help' for usage.\n" % (subcommand, self.prog_name)))
sys.exit(1)
if isinstance(app_name, BaseCommand):
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
|
'Output completion suggestions for BASH.
The output of this function is passed to BASH\'s `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. \'--exclude\') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.'
| def autocomplete(self):
| if ('DJANGO_AUTO_COMPLETE' not in os.environ):
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[(cword - 1)]
except IndexError:
curr = ''
subcommands = (list(get_commands()) + ['help'])
options = [('--help', None)]
if (cword == 1):
print ' '.join(sorted(filter((lambda x: x.startswith(curr)), subcommands)))
elif ((cwords[0] in subcommands) and (cwords[0] != 'help')):
subcommand_cls = self.fetch_command(cwords[0])
if (cwords[0] == 'runfcgi'):
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
elif (cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear', 'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test')):
try:
from django.conf import settings
options += [(a.split('.')[(-1)], 0) for a in settings.INSTALLED_APPS]
except ImportError:
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in subcommand_cls.option_list]
prev_opts = [x.split('=')[0] for x in cwords[1:(cword - 1)]]
options = [opt for opt in options if (opt[0] not in prev_opts)]
options = sorted([(k, v) for (k, v) in options if k.startswith(curr)])
for option in options:
opt_label = option[0]
if option[1]:
opt_label += '='
print opt_label
sys.exit(1)
|
'Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.'
| def execute(self):
| parser = LaxOptionParser(usage='%prog subcommand [options] [args]', version=get_version(), option_list=BaseCommand.option_list)
self.autocomplete()
try:
(options, args) = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help'
if (subcommand == 'help'):
if (len(args) <= 2):
parser.print_lax_help()
sys.stdout.write((self.main_help_text() + '\n'))
elif (args[2] == '--commands'):
sys.stdout.write((self.main_help_text(commands_only=True) + '\n'))
else:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
elif (subcommand == 'version'):
sys.stdout.write((parser.get_version() + '\n'))
elif (self.argv[1:] == ['--version']):
pass
elif (self.argv[1:] in (['--help'], ['-h'])):
parser.print_lax_help()
sys.stdout.write((self.main_help_text() + '\n'))
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
|
'Determines where the app or project templates are.
Use django.__path__[0] as the default because we don\'t
know into which directory Django has been installed.'
| def handle_template(self, template, subdir):
| if (template is None):
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError(("couldn't handle %s template %s." % (self.app_or_project, template)))
|
'Downloads the given URL and returns the file name.'
| def download(self, url):
| def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[(-1)]
if url.endswith('/'):
display_url = (tmp + '/')
else:
display_url = url
return (filename, display_url)
prefix = ('django_%s_template_' % self.app_or_project)
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
(filename, display_url) = cleanup_url(url)
if (self.verbosity >= 2):
self.stdout.write(('Downloading %s\n' % display_url))
try:
(the_path, info) = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError(("couldn't download URL %s to %s: %s" % (url, filename, e)))
used_name = the_path.split('/')[(-1)]
content_disposition = info.get('content-disposition')
if content_disposition:
(_, params) = cgi.parse_header(content_disposition)
guessed_filename = (params.get('filename') or used_name)
else:
guessed_filename = used_name
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if ((not ext) and content_type):
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
if (used_name != guessed_filename):
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
return the_path
|
'Like os.path.splitext, but takes off .tar, too'
| def splitext(self, the_path):
| (base, ext) = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = (base[(-4):] + ext)
base = base[:(-4)]
return (base, ext)
|
'Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.'
| def extract(self, filename):
| prefix = ('django_%s_template_' % self.app_or_project)
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if (self.verbosity >= 2):
self.stdout.write(('Extracting %s\n' % filename))
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError(("couldn't extract file %s to %s: %s" % (filename, tempdir, e)))
|
'Returns True if the name looks like a URL'
| def is_url(self, template):
| if (':' not in template):
return False
scheme = template.split(':', 1)[0].lower()
return (scheme in self.url_schemes)
|
'Make sure that the file is writeable.
Useful if our source is read-only.'
| def make_writeable(self, filename):
| if sys.platform.startswith('java'):
return
if (not os.access(filename, os.W_OK)):
st = os.stat(filename)
new_permissions = (stat.S_IMODE(st.st_mode) | stat.S_IWUSR)
os.chmod(filename, new_permissions)
|
'\'write()\' callable as specified by PEP 3333'
| def write(self, data):
| assert isinstance(data, bytes), u'write() argument must be bytestring'
if (not self.status):
raise AssertionError(u'write() before start_response()')
elif (not self.headers_sent):
self.bytes_sent = len(data)
self.send_headers()
else:
self.bytes_sent += len(data)
length = len(data)
if (length > 33554432):
offset = 0
while (offset < length):
chunk_size = min(33554432, length)
self._write(data[offset:(offset + chunk_size)])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
|
'Override server_bind to store the server name.'
| def server_bind(self):
| try:
super(WSGIServer, self).server_bind()
except Exception as e:
raise WSGIServerException(e)
self.setup_environ()
|
'Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).'
| def load_middleware(self):
| self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
(mw_module, mw_classname) = middleware_path.rsplit(u'.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured((u"%s isn't a middleware module" % middleware_path))
try:
mod = import_module(mw_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured((u'Error importing middleware %s: "%s"' % (mw_module, e)))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured((u'Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, u'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, u'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, u'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, u'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, u'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
self._request_middleware = request_middleware
|
'Returns an HttpResponse object for the given HttpRequest'
| def get_response(self, request):
| try:
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(u'^/', urlconf)
try:
response = None
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if (response is None):
if hasattr(request, u'urlconf'):
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(u'^/', urlconf)
resolver_match = resolver.resolve(request.path_info)
(callback, callback_args, callback_kwargs) = resolver_match
request.resolver_match = resolver_match
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if (response is None):
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception as e:
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if (response is None):
raise
if (response is None):
if isinstance(callback, types.FunctionType):
view_name = callback.__name__
else:
view_name = (callback.__class__.__name__ + u'.__call__')
raise ValueError((u"The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)))
if (hasattr(response, u'render') and callable(response.render)):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning(u'Not Found: %s', request.path, extra={u'status_code': 404, u'request': request})
if settings.DEBUG:
response = debug.technical_404_response(request, e)
else:
try:
(callback, param_dict) = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except exceptions.PermissionDenied:
logger.warning(u'Forbidden (Permission denied): %s', request.path, extra={u'status_code': 403, u'request': request})
try:
(callback, param_dict) = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except SystemExit:
raise
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
urlresolvers.set_urlconf(None)
try:
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
|
'Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.'
| def handle_uncaught_exception(self, request, resolver, exc_info):
| if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error(u'Internal Server Error: %s', request.path, exc_info=exc_info, extra={u'status_code': 500, u'request': request})
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
if (resolver.urlconf_module is None):
six.reraise(*exc_info)
(callback, param_dict) = resolver.resolve500()
return callback(request, **param_dict)
|
'Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.'
| def apply_response_fixes(self, request, response):
| for func in self.response_fixes:
response = func(request, response)
return response
|
'Media Types parsing according to RFC 2616, section 3.7.
Returns the data type and parameters. For example:
Input: "text/plain; charset=iso-8859-1"
Output: (\'text/plain\', {\'charset\': \'iso-8859-1\'})'
| def _parse_content_type(self, ctype):
| (content_type, _, params) = ctype.partition(u';')
content_params = {}
for parameter in params.split(u';'):
(k, _, v) = parameter.strip().partition(u'=')
content_params[k] = v
return (content_type, content_params)
|
'Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It\'s up to the backend implementation to track the status of
a network connection if it\'s needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.'
| def open(self):
| pass
|
'Close a network connection.'
| def close(self):
| pass
|
'Sends one or more EmailMessage objects and returns the number of email
messages sent.'
| def send_messages(self, email_messages):
| raise NotImplementedError
|
'Write all messages to the stream in a thread-safe way.'
| def send_messages(self, email_messages):
| if (not email_messages):
return
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.stream.write(('%s\n' % message.message().as_string()))
self.stream.write(('-' * 79))
self.stream.write('\n')
self.stream.flush()
if stream_created:
self.close()
except:
if (not self.fail_silently):
raise
return len(email_messages)
|
'Return a unique file name.'
| def _get_filename(self):
| if (self._fname is None):
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
fname = ('%s-%s.log' % (timestamp, abs(id(self))))
self._fname = os.path.join(self.file_path, fname)
return self._fname
|
'Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).'
| def open(self):
| if self.connection:
return False
try:
self.connection = smtplib.SMTP(self.host, self.port, local_hostname=DNS_NAME.get_fqdn())
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if (self.username and self.password):
self.connection.login(self.username, self.password)
return True
except:
if (not self.fail_silently):
raise
|
'Closes the connection to the email server.'
| def close(self):
| if (self.connection is None):
return
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
|
'Sends one or more EmailMessage objects and returns the number of email
messages sent.'
| def send_messages(self, email_messages):
| if (not email_messages):
return
with self._lock:
new_conn_created = self.open()
if (not self.connection):
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
|
'A helper method that does the actual sending.'
| def _send(self, email_message):
| if (not email_message.recipients()):
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding) for addr in email_message.recipients()]
message = email_message.message()
charset = (message.get_charset().get_output_charset() if message.get_charset() else 'utf-8')
try:
self.connection.sendmail(from_email, recipients, force_bytes(message.as_string(), charset))
except:
if (not self.fail_silently):
raise
return False
return True
|
'Redirect messages to the dummy outbox'
| def send_messages(self, messages):
| for message in messages:
message.message()
mail.outbox.extend(messages)
return len(messages)
|
'Return the entire formatted message as a string.
Optional `unixfrom\' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with \'From \'. See bug #13433 for details.'
| def as_string(self, unixfrom=False):
| fp = six.StringIO()
g = Generator(fp, mangle_from_=False)
if ((sys.version_info < (2, 6, 6)) and isinstance(self._payload, six.text_type)):
self._payload = self._payload.encode(self._charset.output_charset)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
|
'Return the entire formatted message as a string.
Optional `unixfrom\' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with \'From \'. See bug #13433 for details.'
| def as_string(self, unixfrom=False):
| fp = six.StringIO()
g = Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
|
'Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.'
| def __init__(self, subject=u'', body=u'', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None):
| if to:
assert (not isinstance(to, six.string_types)), u'"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert (not isinstance(cc, six.string_types)), u'"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert (not isinstance(bcc, six.string_types)), u'"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = (from_email or settings.DEFAULT_FROM_EMAIL)
self.subject = subject
self.body = body
self.attachments = (attachments or [])
self.extra_headers = (headers or {})
self.connection = connection
|
'Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).'
| def recipients(self):
| return ((self.to + self.cc) + self.bcc)
|
'Sends the email message.'
| def send(self, fail_silently=False):
| if (not self.recipients()):
return 0
return self.get_connection(fail_silently).send_messages([self])
|
'Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.'
| def attach(self, filename=None, content=None, mimetype=None):
| if isinstance(filename, MIMEBase):
assert (content == mimetype == None)
self.attachments.append(filename)
else:
assert (content is not None)
self.attachments.append((filename, content, mimetype))
|
'Attaches a file from the filesystem.'
| def attach_file(self, path, mimetype=None):
| filename = os.path.basename(path)
with open(path, u'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
|
'Converts the content, mimetype pair into a MIME attachment object.'
| def _create_mime_attachment(self, content, mimetype):
| (basetype, subtype) = mimetype.split(u'/', 1)
if (basetype == u'text'):
encoding = (self.encoding or settings.DEFAULT_CHARSET)
attachment = SafeMIMEText(content, subtype, encoding)
else:
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
|
'Converts the filename, content, mimetype triple into a MIME attachment
object.'
| def _create_attachment(self, filename, content, mimetype=None):
| if (mimetype is None):
(mimetype, _) = mimetypes.guess_type(filename)
if (mimetype is None):
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode(u'ascii')
except UnicodeEncodeError:
if (not six.PY3):
filename = filename.encode(u'utf-8')
filename = (u'utf-8', u'', filename)
attachment.add_header(u'Content-Disposition', u'attachment', filename=filename)
return attachment
|
'Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.'
| def __init__(self, subject=u'', body=u'', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, alternatives=None, cc=None):
| super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc)
self.alternatives = (alternatives or [])
|
'Attach an alternative content representation.'
| def attach_alternative(self, content, mimetype):
| assert (content is not None)
assert (mimetype is not None)
self.alternatives.append((content, mimetype))
|
'Returns the full path of this file.'
| def temporary_file_path(self):
| return self.file.name
|
'Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content'
| def from_dict(cls, file_dict):
| return cls(file_dict['filename'], file_dict['content'], file_dict.get('content-type', 'text/plain'))
|
'Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).'
| def chunks(self, chunk_size=None):
| if (not chunk_size):
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if (not data):
break
(yield data)
|
'Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there\'s no good reason to read from memory in
chunks.'
| def multiple_chunks(self, chunk_size=None):
| if (not chunk_size):
chunk_size = self.DEFAULT_CHUNK_SIZE
return (self.size > chunk_size)
|
'If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.'
| def __init__(self, connection_reset=False):
| self.connection_reset = connection_reset
|
'Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two \'--\'.'
| def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
| pass
|
'Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won\'t even get it).'
| def new_file(self, field_name, file_name, content_type, content_length, charset=None):
| self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
|
'Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.'
| def receive_data_chunk(self, raw_data, start):
| raise NotImplementedError()
|
'Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.'
| def file_complete(self, file_size):
| raise NotImplementedError()
|
'Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.'
| def upload_complete(self):
| pass
|
'Create the file object to append to as data is coming in.'
| def new_file(self, file_name, *args, **kwargs):
| super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset)
|
'Use the content_length to signal whether or not this handler should be in use.'
| def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
| if (content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE):
self.activated = False
else:
self.activated = True
|
'Add the data to the BytesIO file.'
| def receive_data_chunk(self, raw_data, start):
| if self.activated:
self.file.write(raw_data)
else:
return raw_data
|
'Return a file object if we\'re activated.'
| def file_complete(self, file_size):
| if (not self.activated):
return
self.file.seek(0)
return InMemoryUploadedFile(file=self.file, field_name=self.field_name, name=self.file_name, content_type=self.content_type, size=file_size, charset=self.charset)
|
'Retrieves the specified file from storage.'
| def open(self, name, mode='rb'):
| return self._open(name, mode)
|
'Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning.'
| def save(self, name, content):
| if (name is None):
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
return force_text(name.replace('\\', '/'))
|
'Returns a filename, based on the provided filename, that\'s suitable for
use in the target storage system.'
| def get_valid_name(self, name):
| return get_valid_filename(name)
|
'Returns a filename that\'s free on the target storage system, and
available for new content to be written to.'
| def get_available_name(self, name):
| (dir_name, file_name) = os.path.split(name)
(file_root, file_ext) = os.path.splitext(file_name)
count = itertools.count(1)
while self.exists(name):
name = os.path.join(dir_name, ('%s_%s%s' % (file_root, next(count), file_ext)))
return name
|
'Returns a local filesystem path where the file can be retrieved using
Python\'s built-in open() function. Storage systems that can\'t be
accessed using open() should *not* implement this method.'
| def path(self, name):
| raise NotImplementedError("This backend doesn't support absolute paths.")
|
'Deletes the specified file from the storage system.'
| def delete(self, name):
| raise NotImplementedError()
|
'Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.'
| def exists(self, name):
| raise NotImplementedError()
|
'Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.'
| def listdir(self, path):
| raise NotImplementedError()
|
'Returns the total size, in bytes, of the file specified by name.'
| def size(self, name):
| raise NotImplementedError()
|
'Returns an absolute URL where the file\'s contents can be accessed
directly by a Web browser.'
| def url(self, name):
| raise NotImplementedError()
|
'Returns the last accessed time (as datetime object) of the file
specified by name.'
| def accessed_time(self, name):
| raise NotImplementedError()
|
'Returns the creation time (as datetime object) of the file
specified by name.'
| def created_time(self, name):
| raise NotImplementedError()
|
'Returns the last modified time (as datetime object) of the file
specified by name.'
| def modified_time(self, name):
| raise NotImplementedError()
|
'Returns a compiled regular expression, depending upon the activated
language-code.'
| @property
def regex(self):
| language_code = get_language()
if (language_code not in self._regex_dict):
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured((u'"%s" is not a valid regular expression: %s' % (regex, six.text_type(e))))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
|
'Adds the prefix string to a string-based callback.'
| def add_prefix(self, prefix):
| if ((not prefix) or (not hasattr(self, u'_callback_str'))):
return
self._callback_str = ((prefix + u'.') + self._callback_str)
|
'Validates the given 1-based page number.'
| def validate_number(self, number):
| try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if (number < 1):
raise EmptyPage('That page number is less than 1')
if (number > self.num_pages):
if ((number == 1) and self.allow_empty_first_page):
pass
else:
raise EmptyPage('That page contains no results')
return number
|
'Returns a Page object for the given 1-based page number.'
| def page(self, number):
| number = self.validate_number(number)
bottom = ((number - 1) * self.per_page)
top = (bottom + self.per_page)
if ((top + self.orphans) >= self.count):
top = self.count
return Page(self.object_list[bottom:top], number, self)
|
'Returns the total number of objects, across all pages.'
| def _get_count(self):
| if (self._count is None):
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
self._count = len(self.object_list)
return self._count
|
'Returns the total number of pages.'
| def _get_num_pages(self):
| if (self._num_pages is None):
if ((self.count == 0) and (not self.allow_empty_first_page)):
self._num_pages = 0
else:
hits = max(1, (self.count - self.orphans))
self._num_pages = int(ceil((hits / float(self.per_page))))
return self._num_pages
|
'Returns a 1-based range of pages for iterating through within
a template for loop.'
| def _get_page_range(self):
| return range(1, (self.num_pages + 1))
|
'Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.'
| def start_index(self):
| if (self.paginator.count == 0):
return 0
return ((self.paginator.per_page * (self.number - 1)) + 1)
|
'Returns the 1-based index of the last object on this page,
relative to total objects found (hits).'
| def end_index(self):
| if (self.number == self.paginator.num_pages):
return self.paginator.count
return (self.number * self.paginator.per_page)
|
'Serialize a queryset.'
| def serialize(self, queryset, **options):
| self.options = options
self.stream = options.pop('stream', six.StringIO())
self.selected_fields = options.pop('fields', None)
self.use_natural_keys = options.pop('use_natural_keys', False)
self.start_serialization()
self.first = True
for obj in queryset:
self.start_object(obj)
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if (field.rel is None):
if ((self.selected_fields is None) or (field.attname in self.selected_fields)):
self.handle_field(obj, field)
elif ((self.selected_fields is None) or (field.attname[:(-3)] in self.selected_fields)):
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if ((self.selected_fields is None) or (field.attname in self.selected_fields)):
self.handle_m2m_field(obj, field)
self.end_object(obj)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
|
'Called when serializing of the queryset starts.'
| def start_serialization(self):
| raise NotImplementedError
|
'Called when serializing of the queryset ends.'
| def end_serialization(self):
| pass
|
'Called when serializing of an object starts.'
| def start_object(self, obj):
| raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.