INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Returns True if the running system's terminal supports color. Borrowed from Django https://github.com/django/django/blob/master/django/core/management/color.py
def file_supports_color(file_obj): """ Returns True if the running system's terminal supports color. Borrowed from Django https://github.com/django/django/blob/master/django/core/management/color.py """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) is_a_tty = file_is_a_tty(file_obj) return (supported_platform and is_a_tty)
Returns the session referred to by identifier
def load_report(identifier=None): ''' Returns the session referred to by identifier ''' path = os.path.join( report_dir(), identifier + '.pyireport' ) return ProfilerSession.load(path)
Saves the session to a temp file, and returns that path. Also prunes the number of reports to 10 so there aren't loads building up.
def save_report(session): ''' Saves the session to a temp file, and returns that path. Also prunes the number of reports to 10 so there aren't loads building up. ''' # prune this folder to contain the last 10 sessions previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport')) previous_reports.sort(reverse=True) while len(previous_reports) > 10: report_file = previous_reports.pop() os.remove(report_file) identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time)) path = os.path.join( report_dir(), identifier + '.pyireport' ) session.save(path) return path, identifier
The first frame when using the command line is always the __main__ function. I want to remove that from the output.
def remove_first_pyinstrument_frame_processor(frame, options): ''' The first frame when using the command line is always the __main__ function. I want to remove that from the output. ''' if frame is None: return None if 'pyinstrument' in frame.file_path and len(frame.children) == 1: frame = frame.children[0] frame.remove_from_parent() return frame return frame
Parses the internal frame records and returns a tree of Frame objects
def root_frame(self, trim_stem=True): ''' Parses the internal frame records and returns a tree of Frame objects ''' root_frame = None frame_stack = [] for frame_tuple in self.frame_records: identifier_stack = frame_tuple[0] time = frame_tuple[1] # now we must create a stack of frame objects and assign this time to the leaf for stack_depth, frame_identifier in enumerate(identifier_stack): if stack_depth < len(frame_stack): if frame_identifier != frame_stack[stack_depth].identifier: # trim any frames after and including this one del frame_stack[stack_depth:] if stack_depth >= len(frame_stack): frame = Frame(frame_identifier) frame_stack.append(frame) if stack_depth == 0: # There should only be one root frame, as far as I know assert root_frame is None, ASSERTION_MESSAGE root_frame = frame else: parent = frame_stack[stack_depth-1] parent.add_child(frame) # trim any extra frames del frame_stack[stack_depth+1:] # pylint: disable=W0631 # assign the time to the final frame frame_stack[-1].add_child(SelfTimeFrame(self_time=time)) if root_frame is None: return None if trim_stem: root_frame = self._trim_stem(root_frame) return root_frame
Removes this frame from its parent, and nulls the parent link
def remove_from_parent(self): ''' Removes this frame from its parent, and nulls the parent link ''' if self.parent: self.parent._children.remove(self) self.parent._invalidate_time_caches() self.parent = None
The total amount of self time in this frame (including self time recorded by SelfTimeFrame children)
def total_self_time(self): ''' The total amount of self time in this frame (including self time recorded by SelfTimeFrame children) ''' self_time = self.self_time for child in self.children: if isinstance(child, SelfTimeFrame): self_time += child.self_time return self_time
Adds a child frame, updating the parent link. Optionally, insert the frame in a specific position by passing the frame to insert this one after.
def add_child(self, frame, after=None): ''' Adds a child frame, updating the parent link. Optionally, insert the frame in a specific position by passing the frame to insert this one after. ''' frame.remove_from_parent() frame.parent = self if after is None: self._children.append(frame) else: index = self._children.index(after) + 1 self._children.insert(index, frame) self._invalidate_time_caches()
Convenience method to add multiple frames at once.
def add_children(self, frames, after=None): ''' Convenience method to add multiple frames at once. ''' if after is not None: # if there's an 'after' parameter, add the frames in reverse so the order is # preserved. for frame in reversed(frames): self.add_child(frame, after=after) else: for frame in frames: self.add_child(frame)
Return the path resolved against the closest entry in sys.path
def file_path_short(self): """ Return the path resolved against the closest entry in sys.path """ if not hasattr(self, '_file_path_short'): if self.file_path: result = None for path in sys.path: # On Windows, if self.file_path and path are on different drives, relpath # will result in exception, because it cannot compute a relpath in this case. # The root cause is that on Windows, there is no root dir like '/' on Linux. try: candidate = os.path.relpath(self.file_path, path) except ValueError: continue if not result or (len(candidate.split(os.sep)) < len(result.split(os.sep))): result = candidate self._file_path_short = result else: self._file_path_short = None return self._file_path_short
Returns a list of frames whose children include a frame outside of the group
def exit_frames(self): ''' Returns a list of frames whose children include a frame outside of the group ''' if self._exit_frames is None: exit_frames = [] for frame in self.frames: if any(c.group != self for c in frame.children): exit_frames.append(frame) self._exit_frames = exit_frames return self._exit_frames
Traverse down the frame hierarchy until a frame is found with more than one child
def first_interesting_frame(self): """ Traverse down the frame hierarchy until a frame is found with more than one child """ root_frame = self.root_frame() frame = root_frame while len(frame.children) <= 1: if frame.children: frame = frame.children[0] else: # there are no branches return root_frame return frame
Converts a timeline into a time-aggregate summary. Adds together calls along the same call stack, so that repeated calls appear as the same frame. Removes time-linearity - frames are sorted according to total time spent. Useful for outputs that display a summary of execution (e.g. text and html outputs)
def aggregate_repeated_calls(frame, options): ''' Converts a timeline into a time-aggregate summary. Adds together calls along the same call stack, so that repeated calls appear as the same frame. Removes time-linearity - frames are sorted according to total time spent. Useful for outputs that display a summary of execution (e.g. text and html outputs) ''' if frame is None: return None children_by_identifier = {} # iterate over a copy of the children since it's going to mutate while we're iterating for child in frame.children: if child.identifier in children_by_identifier: aggregate_frame = children_by_identifier[child.identifier] # combine the two frames, putting the children and self_time into the aggregate frame. aggregate_frame.self_time += child.self_time if child.children: aggregate_frame.add_children(child.children) # remove this frame, it's been incorporated into aggregate_frame child.remove_from_parent() else: # never seen this identifier before. It becomes the aggregate frame. children_by_identifier[child.identifier] = child # recurse into the children for child in frame.children: aggregate_repeated_calls(child, options=options) # sort the children by time # it's okay to use the internal _children list, sinde we're not changing the tree # structure. frame._children.sort(key=methodcaller('time'), reverse=True) # pylint: disable=W0212 return frame
Combines consecutive 'self time' frames
def merge_consecutive_self_time(frame, options): ''' Combines consecutive 'self time' frames ''' if frame is None: return None previous_self_time_frame = None for child in frame.children: if isinstance(child, SelfTimeFrame): if previous_self_time_frame: # merge previous_self_time_frame.self_time += child.self_time child.remove_from_parent() else: # keep a reference, maybe it'll be added to on the next loop previous_self_time_frame = child else: previous_self_time_frame = None for child in frame.children: merge_consecutive_self_time(child, options=options) return frame
When a frame has only one child, and that is a self-time frame, remove that node, since it's unnecessary - it clutters the output and offers no additional information.
def remove_unnecessary_self_time_nodes(frame, options): ''' When a frame has only one child, and that is a self-time frame, remove that node, since it's unnecessary - it clutters the output and offers no additional information. ''' if frame is None: return None if len(frame.children) == 1 and isinstance(frame.children[0], SelfTimeFrame): child = frame.children[0] frame.self_time += child.self_time child.remove_from_parent() for child in frame.children: remove_unnecessary_self_time_nodes(child, options=options) return frame
Remove nodes that represent less than e.g. 1% of the output
def remove_irrelevant_nodes(frame, options, total_time=None): ''' Remove nodes that represent less than e.g. 1% of the output ''' if frame is None: return None if total_time is None: total_time = frame.time() filter_threshold = options.get('filter_threshold', 0.01) for child in frame.children: proportion_of_total = child.time() / total_time if proportion_of_total < filter_threshold: frame.self_time += child.time() child.remove_from_parent() for child in frame.children: remove_irrelevant_nodes(child, options=options, total_time=total_time) return frame
decorate(func, caller) decorates a function using a caller.
def decorate(func, caller, extras=()): """ decorate(func, caller) decorates a function using a caller. """ evaldict = dict(_call_=caller, _func_=func) es = '' for i, extra in enumerate(extras): ex = '_e%d_' % i evaldict[ex] = extra es += ex + ', ' fun = FunctionMaker.create( func, "return _call_(_func_, %s%%(shortsignature)s)" % es, evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun
Factory of decorators turning a function into a generic function dispatching on the given arguments.
def dispatch_on(*dispatch_args): """ Factory of decorators turning a function into a generic function dispatching on the given arguments. """ assert dispatch_args, 'No dispatch args passed' dispatch_str = '(%s,)' % ', '.join(dispatch_args) def check(arguments, wrong=operator.ne, msg=''): """Make sure one passes the expected number of arguments""" if wrong(len(arguments), len(dispatch_args)): raise TypeError('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)) def gen_func_dec(func): """Decorator turning a function into a generic function""" # first check the dispatch arguments argset = set(getfullargspec(func).args) if not set(dispatch_args) <= argset: raise NameError('Unknown dispatch arguments %s' % dispatch_str) typemap = {} def vancestors(*types): """ Get a list of sets of virtual ancestors for the given types """ check(types) ras = [[] for _ in range(len(dispatch_args))] for types_ in typemap: for t, type_, ra in zip(types, types_, ras): if issubclass(t, type_) and type_ not in t.mro(): append(type_, ra) return [set(ra) for ra in ras] def ancestors(*types): """ Get a list of virtual MROs, one for each type """ check(types) lists = [] for t, vas in zip(types, vancestors(*types)): n_vas = len(vas) if n_vas > 1: raise RuntimeError( 'Ambiguous dispatch for %s: %s' % (t, vas)) elif n_vas == 1: va, = vas mro = type('t', (t, va), {}).mro()[1:] else: mro = t.mro() lists.append(mro[:-1]) # discard t and object return lists def register(*types): """ Decorator to register an implementation for the given types """ check(types) def dec(f): check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) typemap[types] = f return f return dec def dispatch_info(*types): """ An utility to introspect the dispatch algorithm """ check(types) lst = [] for anc in itertools.product(*ancestors(*types)): lst.append(tuple(a.__name__ for a in anc)) return lst def _dispatch(dispatch_args, *args, **kw): types = tuple(type(arg) for arg in dispatch_args) try: # fast path f = typemap[types] except KeyError: pass else: return f(*args, **kw) combinations = itertools.product(*ancestors(*types)) next(combinations) # the first one has been already tried for types_ in combinations: f = typemap.get(types_) if f is not None: return f(*args, **kw) # else call the default implementation return func(*args, **kw) return FunctionMaker.create( func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func) gen_func_dec.__name__ = 'dispatch_on' + dispatch_str return gen_func_dec
Open the rendered HTML in a webbrowser. If output_filename=None (the default), a tempfile is used. The filename of the HTML file is returned.
def open_in_browser(self, session, output_filename=None): """ Open the rendered HTML in a webbrowser. If output_filename=None (the default), a tempfile is used. The filename of the HTML file is returned. """ if output_filename is None: output_file = tempfile.NamedTemporaryFile(suffix='.html', delete=False) output_filename = output_file.name with codecs.getwriter('utf-8')(output_file) as f: f.write(self.render(session)) else: with codecs.open(output_filename, 'w', 'utf-8') as f: f.write(self.render(session)) from pyinstrument.vendor.six.moves import urllib url = urllib.parse.urlunparse(('file', '', output_filename, '', '', '')) webbrowser.open(url) return output_filename
compile the JS, then run superclass implementation
def run(self): '''compile the JS, then run superclass implementation''' if subprocess.call(['npm', '--version']) != 0: raise RuntimeError('npm is required to build the HTML renderer.') self.check_call(['npm', 'install'], cwd=HTML_RENDERER_DIR) self.check_call(['npm', 'run', 'build'], cwd=HTML_RENDERER_DIR) self.copy_file(HTML_RENDERER_DIR+'/dist/js/app.js', 'pyinstrument/renderers/html_resources/app.js') setuptools.command.build_py.build_py.run(self)
Marks a function as deprecated.
def deprecated(func, *args, **kwargs): ''' Marks a function as deprecated. ''' warnings.warn( '{} is deprecated and should no longer be used.'.format(func), DeprecationWarning, stacklevel=3 ) return func(*args, **kwargs)
Marks an option as deprecated.
def deprecated_option(option_name, message=''): ''' Marks an option as deprecated. ''' def caller(func, *args, **kwargs): if option_name in kwargs: warnings.warn( '{} is deprecated. {}'.format(option_name, message), DeprecationWarning, stacklevel=3 ) return func(*args, **kwargs) return decorator(caller)
Set the size as a 2-tuple for thumbnailed images after uploading them.
def THUMBNAIL_OPTIONS(self): """ Set the size as a 2-tuple for thumbnailed images after uploading them. """ from django.core.exceptions import ImproperlyConfigured size = self._setting('DJNG_THUMBNAIL_SIZE', (200, 200)) if not (isinstance(size, (list, tuple)) and len(size) == 2 and isinstance(size[0], int) and isinstance(size[1], int)): raise ImproperlyConfigured("'DJNG_THUMBNAIL_SIZE' must be a 2-tuple of integers.") return {'crop': True, 'size': size}
Some widgets require a modified rendering context, if they contain angular directives.
def get_context(self, name, value, attrs): """ Some widgets require a modified rendering context, if they contain angular directives. """ context = super(NgWidgetMixin, self).get_context(name, value, attrs) if callable(getattr(self._field, 'update_widget_rendering_context', None)): self._field.update_widget_rendering_context(context) return context
Returns a TupleErrorList for this field. This overloaded method adds additional error lists to the errors as detected by the form validator.
def errors(self): """ Returns a TupleErrorList for this field. This overloaded method adds additional error lists to the errors as detected by the form validator. """ if not hasattr(self, '_errors_cache'): self._errors_cache = self.form.get_field_errors(self) return self._errors_cache
Returns a string of space-separated CSS classes for the wrapping element of this input field.
def css_classes(self, extra_classes=None): """ Returns a string of space-separated CSS classes for the wrapping element of this input field. """ if hasattr(extra_classes, 'split'): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) # field_css_classes is an optional member of a Form optimized for django-angular field_css_classes = getattr(self.form, 'field_css_classes', None) if hasattr(field_css_classes, 'split'): extra_classes.update(field_css_classes.split()) elif isinstance(field_css_classes, (list, tuple)): extra_classes.update(field_css_classes) elif isinstance(field_css_classes, dict): extra_field_classes = [] for key in ('*', self.name): css_classes = field_css_classes.get(key) if hasattr(css_classes, 'split'): extra_field_classes = css_classes.split() elif isinstance(css_classes, (list, tuple)): if '__default__' in css_classes: css_classes.remove('__default__') extra_field_classes.extend(css_classes) else: extra_field_classes = css_classes extra_classes.update(extra_field_classes) return super(NgBoundField, self).css_classes(extra_classes)
Renders the field.
def as_widget(self, widget=None, attrs=None, only_initial=False): """ Renders the field. """ if not widget: widget = self.field.widget if DJANGO_VERSION > (1, 10): # so that we can refer to the field when building the rendering context widget._field = self.field # Make sure that NgWidgetMixin is not already part of the widget's bases so it doesn't get added twice. if not isinstance(widget, NgWidgetMixin): widget.__class__ = type(widget.__class__.__name__, (NgWidgetMixin, widget.__class__), {}) return super(NgBoundField, self).as_widget(widget, attrs, only_initial)
Return server side errors. Shall be overridden by derived forms to add their extra errors for AngularJS.
def get_field_errors(self, field): """ Return server side errors. Shall be overridden by derived forms to add their extra errors for AngularJS. """ identifier = format_html('{0}[\'{1}\']', self.form_name, field.name) errors = self.errors.get(field.html_name, []) return self.error_class([SafeTuple( (identifier, self.field_error_css_classes, '$pristine', '$pristine', 'invalid', e)) for e in errors])
Updated the widget attributes which shall be added to the widget when rendering this field.
def update_widget_attrs(self, bound_field, attrs): """ Updated the widget attributes which shall be added to the widget when rendering this field. """ if bound_field.field.has_subwidgets() is False: widget_classes = getattr(self, 'widget_css_classes', None) if widget_classes: if 'class' in attrs: attrs['class'] += ' ' + widget_classes else: attrs.update({'class': widget_classes}) return attrs
During form initialization, some widgets have to be replaced by a counterpart suitable to be rendered the AngularJS way.
def convert_widgets(self): """ During form initialization, some widgets have to be replaced by a counterpart suitable to be rendered the AngularJS way. """ warnings.warn("Will be removed after dropping support for Django-1.10", PendingDeprecationWarning) widgets_module = getattr(self, 'widgets_module', 'djng.widgets') for field in self.base_fields.values(): if hasattr(field, 'get_converted_widget'): new_widget = field.get_converted_widget(widgets_module) if new_widget: field.widget = new_widget
If a widget was converted and the Form data was submitted through a multipart request, then these data fields must be converted to suit the Django Form validation
def rectify_multipart_form_data(self, data): """ If a widget was converted and the Form data was submitted through a multipart request, then these data fields must be converted to suit the Django Form validation """ for name, field in self.base_fields.items(): try: field.implode_multi_values(name, data) except AttributeError: pass return data
If a widget was converted and the Form data was submitted through an Ajax request, then these data fields must be converted to suit the Django Form validation
def rectify_ajax_form_data(self, data): """ If a widget was converted and the Form data was submitted through an Ajax request, then these data fields must be converted to suit the Django Form validation """ for name, field in self.base_fields.items(): try: data[name] = field.convert_ajax_data(data.get(name, {})) except AttributeError: pass return data
Determine the kind of input field and create a list of potential errors which may occur during validation of that field. This list is returned to be displayed in '$dirty' state if the field does not validate for that criteria.
def get_field_errors(self, bound_field): """ Determine the kind of input field and create a list of potential errors which may occur during validation of that field. This list is returned to be displayed in '$dirty' state if the field does not validate for that criteria. """ errors = super(NgFormValidationMixin, self).get_field_errors(bound_field) if bound_field.is_hidden: return errors identifier = format_html('{0}[\'{1}\']', self.form_name, self.add_prefix(bound_field.name)) potential_errors = bound_field.field.get_potential_errors() errors.extend([SafeTuple((identifier, self.field_error_css_classes, '$dirty', pe[0], 'invalid', force_text(pe[1]))) for pe in potential_errors]) if not isinstance(bound_field.field.widget, widgets.PasswordInput): # all valid fields shall display OK tick after changed into dirty state errors.append(SafeTuple((identifier, self.field_error_css_classes, '$dirty', '$valid', 'valid', ''))) if bound_field.value(): # valid bound fields shall display OK tick, even in pristine state errors.append(SafeTuple((identifier, self.field_error_css_classes, '$pristine', '$valid', 'valid', ''))) return errors
Returns this form rendered as HTML with <div class="form-group">s for each form field.
def as_div(self): """ Returns this form rendered as HTML with <div class="form-group">s for each form field. """ # wrap non-field-errors into <div>-element to prevent re-boxing error_row = '<div class="djng-line-spreader">%s</div>' div_element = self._html_output( normal_row='<div%(html_class_attr)s>%(label)s%(field)s%(help_text)s%(errors)s</div>', error_row=error_row, row_ender='</div>', help_text_html='<span class="help-block">%s</span>', errors_on_separate_row=False) return div_element
Conditionally switch between AngularJS and Django variable expansion for ``{{`` and ``}}`` keeping Django's expansion for ``{%`` and ``%}`` Usage:: {% angularjs 1 %} or simply {% angularjs %} {% process variables through the AngularJS template engine %} {% endangularjs %} {% angularjs 0 %} {% process variables through the Django template engine %} {% endangularjs %} Instead of 0 and 1, it is possible to use a context variable.
def angularjs(parser, token): """ Conditionally switch between AngularJS and Django variable expansion for ``{{`` and ``}}`` keeping Django's expansion for ``{%`` and ``%}`` Usage:: {% angularjs 1 %} or simply {% angularjs %} {% process variables through the AngularJS template engine %} {% endangularjs %} {% angularjs 0 %} {% process variables through the Django template engine %} {% endangularjs %} Instead of 0 and 1, it is possible to use a context variable. """ bits = token.contents.split() if len(bits) < 2: bits.append('1') values = [parser.compile_filter(bit) for bit in bits[1:]] django_nodelist = parser.parse(('endangularjs',)) angular_nodelist = NodeList() for node in django_nodelist: # convert all occurrences of VariableNode into a TextNode using the # AngularJS double curly bracket notation if isinstance(node, VariableNode): # convert Django's array notation into JS array notation tokens = node.filter_expression.token.split('.') token = tokens[0] for part in tokens[1:]: if part.isdigit(): token += '[%s]' % part else: token += '.%s' % part node = TextNode('{{ %s }}' % token) angular_nodelist.append(node) parser.delete_first_token() return AngularJsNode(django_nodelist, angular_nodelist, values[0])
Returns a script tag for including the proper locale script in any HTML page. This tag determines the current language with its locale. Usage: <script src="{% static 'node_modules/angular-i18n/' %}{% djng_locale_script %}"></script> or, if used with a default language: <script src="{% static 'node_modules/angular-i18n/' %}{% djng_locale_script 'de' %}"></script>
def djng_locale_script(context, default_language='en'): """ Returns a script tag for including the proper locale script in any HTML page. This tag determines the current language with its locale. Usage: <script src="{% static 'node_modules/angular-i18n/' %}{% djng_locale_script %}"></script> or, if used with a default language: <script src="{% static 'node_modules/angular-i18n/' %}{% djng_locale_script 'de' %}"></script> """ language = get_language_from_request(context['request']) if not language: language = default_language return format_html('angular-locale_{}.js', language.lower())
Update the dictionary of attributes used while rendering the input widget
def update_widget_attrs(self, bound_field, attrs): """ Update the dictionary of attributes used while rendering the input widget """ bound_field.form.update_widget_attrs(bound_field, attrs) widget_classes = self.widget.attrs.get('class', None) if widget_classes: if 'class' in attrs: attrs['class'] += ' ' + widget_classes else: attrs.update({'class': widget_classes}) return attrs
Return a regex pattern matching valid email addresses. Uses the same logic as the django validator, with the folowing exceptions: - Internationalized domain names not supported - IP addresses not supported - Strips lookbehinds (not supported in javascript regular expressions)
def get_email_regex(self): """ Return a regex pattern matching valid email addresses. Uses the same logic as the django validator, with the folowing exceptions: - Internationalized domain names not supported - IP addresses not supported - Strips lookbehinds (not supported in javascript regular expressions) """ validator = self.default_validators[0] user_regex = validator.user_regex.pattern.replace('\Z', '@') domain_patterns = ([re.escape(domain) + '$' for domain in validator.domain_whitelist] + [validator.domain_regex.pattern.replace('\Z', '$')]) domain_regex = '({0})'.format('|'.join(domain_patterns)) email_regex = user_regex + domain_regex return re.sub(r'\(\?\<[^()]*?\)', '', email_regex)
Add only the required message, but no 'ng-required' attribute to the input fields, otherwise all Checkboxes of a MultipleChoiceField would require the property "checked".
def get_multiple_choices_required(self): """ Add only the required message, but no 'ng-required' attribute to the input fields, otherwise all Checkboxes of a MultipleChoiceField would require the property "checked". """ errors = [] if self.required: msg = _("At least one checkbox has to be selected.") errors.append(('$error.multifield', msg)) return errors
Due to the way Angular organizes it model, when Form data is sent via a POST request, then for this kind of widget, the posted data must to be converted into a format suitable for Django's Form validation.
def implode_multi_values(self, name, data): """ Due to the way Angular organizes it model, when Form data is sent via a POST request, then for this kind of widget, the posted data must to be converted into a format suitable for Django's Form validation. """ mkeys = [k for k in data.keys() if k.startswith(name + '.')] mvls = [data.pop(k)[0] for k in mkeys] if mvls: data.setlist(name, mvls)
Due to the way Angular organizes it model, when this Form data is sent using Ajax, then for this kind of widget, the sent data has to be converted into a format suitable for Django's Form validation.
def convert_ajax_data(self, field_data): """ Due to the way Angular organizes it model, when this Form data is sent using Ajax, then for this kind of widget, the sent data has to be converted into a format suitable for Django's Form validation. """ data = [key for key, val in field_data.items() if val] return data
Reads url name, args, kwargs from GET parameters, reverses the url and resolves view function Returns the result of resolved view function, called with provided args and kwargs Since the view function is called directly, it isn't ran through middlewares, so the middlewares must be added manually The final result is exactly the same as if the request was for the resolved view. Parametrized urls: djangoUrl.reverse can be used with parametrized urls of $resource In that case the reverse url is something like: /angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=:id $resource can either replace the ':id' part with say 2 and we can proceed as usual, reverse with reverse('orders', kwargs={'id': 2}). If it's not replaced we want to reverse to url we get a request to url '/angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=' which gives a request.GET QueryDict {u'djng_url_name': [u'orders'], u'djng_url_kwarg_id': [u'']} In that case we want to ignore the id param and only reverse to url with name 'orders' and no params. So we ignore args and kwargs that are empty strings.
def process_request(self, request): """ Reads url name, args, kwargs from GET parameters, reverses the url and resolves view function Returns the result of resolved view function, called with provided args and kwargs Since the view function is called directly, it isn't ran through middlewares, so the middlewares must be added manually The final result is exactly the same as if the request was for the resolved view. Parametrized urls: djangoUrl.reverse can be used with parametrized urls of $resource In that case the reverse url is something like: /angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=:id $resource can either replace the ':id' part with say 2 and we can proceed as usual, reverse with reverse('orders', kwargs={'id': 2}). If it's not replaced we want to reverse to url we get a request to url '/angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=' which gives a request.GET QueryDict {u'djng_url_name': [u'orders'], u'djng_url_kwarg_id': [u'']} In that case we want to ignore the id param and only reverse to url with name 'orders' and no params. So we ignore args and kwargs that are empty strings. """ if request.path == self.ANGULAR_REVERSE: url_name = request.GET.get('djng_url_name') url_args = request.GET.getlist('djng_url_args', []) url_kwargs = {} # Remove falsy values (empty strings) url_args = filter(lambda x: x, url_args) # Read kwargs for param in request.GET: if param.startswith('djng_url_kwarg_'): # Ignore kwargs that are empty strings if request.GET[param]: url_kwargs[param[15:]] = request.GET[param] # [15:] to remove 'djng_url_kwarg' prefix url = unquote(reverse(url_name, args=url_args, kwargs=url_kwargs)) assert not url.startswith(self.ANGULAR_REVERSE), "Prevent recursive requests" # rebuild the request object with a different environ request.path = request.path_info = url request.environ['PATH_INFO'] = url query = request.GET.copy() for key in request.GET: if key.startswith('djng_url'): query.pop(key, None) if six.PY3: request.environ['QUERY_STRING'] = query.urlencode() else: request.environ['QUERY_STRING'] = query.urlencode().encode('utf-8') # Reconstruct GET QueryList in the same way WSGIRequest.GET function works request.GET = http.QueryDict(request.environ['QUERY_STRING'])
Returns a dictionary to be used for calling ``djangoCall.configure()``, which itself extends the Angular API to the client, offering him to call remote methods.
def get_all_remote_methods(resolver=None, ns_prefix=''): """ Returns a dictionary to be used for calling ``djangoCall.configure()``, which itself extends the Angular API to the client, offering him to call remote methods. """ if not resolver: resolver = get_resolver(get_urlconf()) result = {} for name in resolver.reverse_dict.keys(): if not isinstance(name, six.string_types): continue try: url = reverse(ns_prefix + name) resmgr = resolve(url) ViewClass = import_string('{0}.{1}'.format(resmgr.func.__module__, resmgr.func.__name__)) if isclass(ViewClass) and issubclass(ViewClass, JSONResponseMixin): result[name] = _get_remote_methods_for(ViewClass, url) except (NoReverseMatch, ImproperlyConfigured): pass for namespace, ns_pattern in resolver.namespace_dict.items(): sub_res = get_all_remote_methods(ns_pattern[1], ns_prefix + namespace + ':') if sub_res: result[namespace] = sub_res return result
Override dispatch to call appropriate methods: * $query - ng_query * $get - ng_get * $save - ng_save * $delete and $remove - ng_delete
def dispatch(self, request, *args, **kwargs): """ Override dispatch to call appropriate methods: * $query - ng_query * $get - ng_get * $save - ng_save * $delete and $remove - ng_delete """ allowed_methods = self.get_allowed_methods() try: if request.method == 'GET' and 'GET' in allowed_methods: if 'pk' in request.GET or self.slug_field in request.GET: return self.ng_get(request, *args, **kwargs) return self.ng_query(request, *args, **kwargs) elif request.method == 'POST' and 'POST' in allowed_methods: return self.ng_save(request, *args, **kwargs) elif request.method == 'DELETE' and 'DELETE' in allowed_methods: return self.ng_delete(request, *args, **kwargs) except self.model.DoesNotExist as e: return self.error_json_response(e.args[0], 404) except NgMissingParameterError as e: return self.error_json_response(e.args[0]) except JSONResponseException as e: return self.error_json_response(e.args[0], e.status_code) except ValidationError as e: if hasattr(e, 'error_dict'): return self.error_json_response('Form not valid', detail=e.message_dict) else: return self.error_json_response(e.message) return self.error_json_response('This view can not handle method {0}'.format(request.method), 405)
Return serialized queryset or single object as python dictionary serialize() only works on iterables, so to serialize a single object we put it in a list
def serialize_queryset(self, queryset): """ Return serialized queryset or single object as python dictionary serialize() only works on iterables, so to serialize a single object we put it in a list """ object_data = [] is_queryset = False query_fields = self.get_fields() try: iter(queryset) is_queryset = True raw_data = serializers.serialize(self.serializer_name, queryset, fields=query_fields, use_natural_keys=self.serialize_natural_keys) except TypeError: # Not iterable raw_data = serializers.serialize(self.serializer_name, [queryset, ], fields=query_fields, use_natural_keys=self.serialize_natural_keys) for obj in raw_data: # Add pk to fields obj['fields']['pk'] = obj['pk'] object_data.append(obj['fields']) if is_queryset: return object_data return object_data[0]
Called on $save() Use modelform to save new object or modify an existing one
def ng_save(self, request, *args, **kwargs): """ Called on $save() Use modelform to save new object or modify an existing one """ form = self.get_form(self.get_form_class()) if form.is_valid(): obj = form.save() return self.build_json_response(obj) raise ValidationError(form.errors)
Delete object and return it's data in JSON encoding The response is build before the object is actually deleted so that we can still retrieve a serialization in the response even with a m2m relationship
def ng_delete(self, request, *args, **kwargs): """ Delete object and return it's data in JSON encoding The response is build before the object is actually deleted so that we can still retrieve a serialization in the response even with a m2m relationship """ if 'pk' not in request.GET: raise NgMissingParameterError("Object id is required to delete.") obj = self.get_object() response = self.build_json_response(obj) obj.delete() return response
Outputs a <div ng-form="name"> for this set of choice fields to nest an ngForm.
def render(self): """ Outputs a <div ng-form="name"> for this set of choice fields to nest an ngForm. """ start_tag = format_html('<div {}>', mark_safe(' '.join(self.field_attrs))) output = [start_tag] for widget in self: output.append(force_text(widget)) output.append('</div>') return mark_safe('\n'.join(output))
Rewrite the error dictionary, so that its keys correspond to the model fields.
def _post_clean(self): """ Rewrite the error dictionary, so that its keys correspond to the model fields. """ super(NgModelFormMixin, self)._post_clean() if self._errors and self.prefix: self._errors = ErrorDict((self.add_prefix(name), value) for name, value in self._errors.items())
Return a dictionary specifying the defaults for this form. This dictionary can be used to inject the initial values for an Angular controller using the directive: ``ng-init={{ thisform.get_initial_data|js|safe }}``.
def get_initial_data(self): """ Return a dictionary specifying the defaults for this form. This dictionary can be used to inject the initial values for an Angular controller using the directive: ``ng-init={{ thisform.get_initial_data|js|safe }}``. """ data = {} ng_models = hasattr(self, 'Meta') and getattr(self.Meta, 'ng_models', []) or [] for name, field in self.fields.items(): if 'ng-model' in self.ng_directives or name in ng_models: data[name] = self.initial.get(name) if self.initial else field.initial return data
Tries to catch resize signals sent from the terminal.
def _handle_resize(self, signum=None, frame=None): 'Tries to catch resize signals sent from the terminal.' w, h = utils.get_terminal_size() self.term_width = w
(re)initialize values to original state so the progressbar can be used (again)
def init(self): ''' (re)initialize values to original state so the progressbar can be used (again) ''' self.previous_value = None self.last_update_time = None self.start_time = None self.updates = 0 self.end_time = None self.extra = dict() self._last_update_timer = timeit.default_timer()
Return current percentage, returns None if no max_value is given >>> progress = ProgressBar() >>> progress.max_value = 10 >>> progress.min_value = 0 >>> progress.value = 0 >>> progress.percentage 0.0 >>> >>> progress.value = 1 >>> progress.percentage 10.0 >>> progress.value = 10 >>> progress.percentage 100.0 >>> progress.min_value = -10 >>> progress.percentage 100.0 >>> progress.value = 0 >>> progress.percentage 50.0 >>> progress.value = 5 >>> progress.percentage 75.0 >>> progress.value = -5 >>> progress.percentage 25.0 >>> progress.max_value = None >>> progress.percentage
def percentage(self): '''Return current percentage, returns None if no max_value is given >>> progress = ProgressBar() >>> progress.max_value = 10 >>> progress.min_value = 0 >>> progress.value = 0 >>> progress.percentage 0.0 >>> >>> progress.value = 1 >>> progress.percentage 10.0 >>> progress.value = 10 >>> progress.percentage 100.0 >>> progress.min_value = -10 >>> progress.percentage 100.0 >>> progress.value = 0 >>> progress.percentage 50.0 >>> progress.value = 5 >>> progress.percentage 75.0 >>> progress.value = -5 >>> progress.percentage 25.0 >>> progress.max_value = None >>> progress.percentage ''' if self.max_value is None or self.max_value is base.UnknownLength: return None elif self.max_value: todo = self.value - self.min_value total = self.max_value - self.min_value percentage = todo / total else: percentage = 1 return percentage * 100
Returns whether the ProgressBar should redraw the line.
def _needs_update(self): 'Returns whether the ProgressBar should redraw the line.' if self.poll_interval: delta = timeit.default_timer() - self._last_update_timer poll_status = delta > self.poll_interval.total_seconds() else: delta = 0 poll_status = False # Do not update if value increment is not large enough to # add more bars to progressbar (according to current # terminal width) try: divisor = self.max_value / self.term_width # float division if self.value // divisor == self.previous_value // divisor: return poll_status or self.end_time else: return True except Exception: # ignore any division errors pass return poll_status or self.end_time
Updates the ProgressBar to a new value.
def update(self, value=None, force=False, **kwargs): 'Updates the ProgressBar to a new value.' if self.start_time is None: self.start() return self.update(value, force=force, **kwargs) if value is not None and value is not base.UnknownLength: if self.max_value is base.UnknownLength: # Can't compare against unknown lengths so just update pass elif self.min_value <= value <= self.max_value: # pragma: no cover # Correct value, let's accept pass elif self.max_error: raise ValueError( 'Value %s is out of range, should be between %s and %s' % (value, self.min_value, self.max_value)) else: self.max_value = value self.previous_value = self.value self.value = value minimum_update_interval = self._MINIMUM_UPDATE_INTERVAL delta = timeit.default_timer() - self._last_update_timer if delta < minimum_update_interval and not force: # Prevent updating too often return # Save the updated values for dynamic messages for key in kwargs: if key in self.dynamic_messages: self.dynamic_messages[key] = kwargs[key] else: raise TypeError( 'update() got an unexpected keyword ' + 'argument {0!r}'.format(key)) if self._needs_update() or force: self.updates += 1 ResizableMixin.update(self, value=value) ProgressBarBase.update(self, value=value) StdRedirectMixin.update(self, value=value) # Only flush if something was actually written self.fd.flush()
Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: Args: max_value (int): The maximum value of the progressbar reinit (bool): Initialize the progressbar, this is useful if you wish to reuse the same progressbar but can be disabled if data needs to be passed along to the next run >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish()
def start(self, max_value=None, init=True): '''Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: Args: max_value (int): The maximum value of the progressbar reinit (bool): Initialize the progressbar, this is useful if you wish to reuse the same progressbar but can be disabled if data needs to be passed along to the next run >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish() ''' if init: self.init() # Prevent multiple starts if self.start_time is not None: # pragma: no cover return self if max_value is not None: self.max_value = max_value if self.max_value is None: self.max_value = self._DEFAULT_MAXVAL StdRedirectMixin.start(self, max_value=max_value) ResizableMixin.start(self, max_value=max_value) ProgressBarBase.start(self, max_value=max_value) # Constructing the default widgets is only done when we know max_value if self.widgets is None: self.widgets = self.default_widgets() if self.prefix: self.widgets.insert(0, widgets.FormatLabel( self.prefix, new_style=True)) if self.suffix: self.widgets.append(widgets.FormatLabel( self.suffix, new_style=True)) for widget in self.widgets: interval = getattr(widget, 'INTERVAL', None) if interval is not None: self.poll_interval = min( self.poll_interval or interval, interval, ) self.num_intervals = max(100, self.term_width) if self.max_value is not base.UnknownLength and self.max_value < 0: raise ValueError('Value out of range') self.start_time = self.last_update_time = datetime.now() self._last_update_timer = timeit.default_timer() self.update(self.min_value, force=True) return self
Puts the ProgressBar bar in the finished state. Also flushes and disables output buffering if this was the last progressbar running. Args: end (str): The string to end the progressbar with, defaults to a newline dirty (bool): When True the progressbar kept the current state and won't be set to 100 percent
def finish(self, end='\n', dirty=False): ''' Puts the ProgressBar bar in the finished state. Also flushes and disables output buffering if this was the last progressbar running. Args: end (str): The string to end the progressbar with, defaults to a newline dirty (bool): When True the progressbar kept the current state and won't be set to 100 percent ''' if not dirty: self.end_time = datetime.now() self.update(self.max_value, force=True) StdRedirectMixin.finish(self, end=end) ResizableMixin.finish(self) ProgressBarBase.finish(self)
Wrap the examples so they generate readable output
def example(fn): '''Wrap the examples so they generate readable output''' @functools.wraps(fn) def wrapped(): try: sys.stdout.write('Running: %s\n' % fn.__name__) fn() sys.stdout.write('\n') except KeyboardInterrupt: sys.stdout.write('\nSkipping example.\n\n') # Sleep a bit to make killing the script easier time.sleep(0.2) examples.append(wrapped) return wrapped
Updates the widget to show the ETA or total time when finished.
def _calculate_eta(self, progress, data, value, elapsed): '''Updates the widget to show the ETA or total time when finished.''' if elapsed: # The max() prevents zero division errors per_item = elapsed.total_seconds() / max(value, 1e-6) remaining = progress.max_value - data['value'] eta_seconds = remaining * per_item else: eta_seconds = 0 return eta_seconds
Load standard graph validation sets For each size (from 6 to 32 graph nodes) the dataset consists of 100 graphs drawn from the Erdős-Rényi ensemble with edge probability 50%.
def load_stdgraphs(size: int) -> List[nx.Graph]: """Load standard graph validation sets For each size (from 6 to 32 graph nodes) the dataset consists of 100 graphs drawn from the Erdős-Rényi ensemble with edge probability 50%. """ from pkg_resources import resource_stream if size < 6 or size > 32: raise ValueError('Size out of range.') filename = 'datasets/data/graph{}er100.g6'.format(size) fdata = resource_stream('quantumflow', filename) return nx.read_graph6(fdata)
Download and rescale the MNIST database of handwritten digits MNIST is a dataset of 60,000 28x28 grayscale images handwritten digits, along with a test set of 10,000 images. We use Keras to download and access the dataset. The first invocation of this method may take a while as the dataset has to be downloaded and cached. If size is None, then we return the original MNIST data. For rescaled MNIST, we chop off the border, downsample to the desired size with Lanczos resampling, and then (optionally) zero out the corner pixels. Returns (x_train, y_train, x_test, y_test) x_train ndarray of shape (60000, size, size) y_train ndarray of shape (60000,) x_test ndarray of shape (10000, size, size) y_test ndarray of shape (10000,)
def load_mnist(size: int = None, border: int = _MNIST_BORDER, blank_corners: bool = False, nums: List[int] = None) \ -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Download and rescale the MNIST database of handwritten digits MNIST is a dataset of 60,000 28x28 grayscale images handwritten digits, along with a test set of 10,000 images. We use Keras to download and access the dataset. The first invocation of this method may take a while as the dataset has to be downloaded and cached. If size is None, then we return the original MNIST data. For rescaled MNIST, we chop off the border, downsample to the desired size with Lanczos resampling, and then (optionally) zero out the corner pixels. Returns (x_train, y_train, x_test, y_test) x_train ndarray of shape (60000, size, size) y_train ndarray of shape (60000,) x_test ndarray of shape (10000, size, size) y_test ndarray of shape (10000,) """ # DOCME: Fix up formatting above, # DOCME: Explain nums argument # JIT import since keras startup is slow from keras.datasets import mnist def _filter_mnist(x: np.ndarray, y: np.ndarray, nums: List[int] = None) \ -> Tuple[np.ndarray, np.ndarray]: xt = [] yt = [] items = len(y) for n in range(items): if nums is not None and y[n] in nums: xt.append(x[n]) yt.append(y[n]) xt = np.stack(xt) yt = np.stack(yt) return xt, yt def _rescale(imgarray: np.ndarray, size: int) -> np.ndarray: N = imgarray.shape[0] # Chop off border imgarray = imgarray[:, border:-border, border:-border] rescaled = np.zeros(shape=(N, size, size), dtype=np.float) for n in range(0, N): img = Image.fromarray(imgarray[n]) img = img.resize((size, size), Image.LANCZOS) rsc = np.asarray(img).reshape((size, size)) rsc = 256.*rsc/rsc.max() rescaled[n] = rsc return rescaled.astype(dtype=np.uint8) def _blank_corners(imgarray: np.ndarray) -> None: # Zero out corners sz = imgarray.shape[1] corner = (sz//2)-1 for x in range(0, corner): for y in range(0, corner-x): imgarray[:, x, y] = 0 imgarray[:, -(1+x), y] = 0 imgarray[:, -(1+x), -(1+y)] = 0 imgarray[:, x, -(1+y)] = 0 (x_train, y_train), (x_test, y_test) = mnist.load_data() if nums: x_train, y_train = _filter_mnist(x_train, y_train, nums) x_test, y_test = _filter_mnist(x_test, y_test, nums) if size: x_train = _rescale(x_train, size) x_test = _rescale(x_test, size) if blank_corners: _blank_corners(x_train) _blank_corners(x_test) return x_train, y_train, x_test, y_test
Covert numpy array to tensorflow tensor
def astensor(array: TensorLike) -> BKTensor: """Covert numpy array to tensorflow tensor""" tensor = tf.convert_to_tensor(value=array, dtype=CTYPE) return tensor
Return the inner product between two states
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor: """Return the inner product between two states""" # Note: Relying on fact that vdot flattens arrays N = rank(tensor0) axes = list(range(N)) return tf.tensordot(tf.math.conj(tensor0), tensor1, axes=(axes, axes))
A QAOA circuit for the Quadratic Unconstrained Binary Optimization problem (i.e. an Ising model). Args: graph : a networkx graph instance with optional edge and node weights steps : number of QAOA steps beta : driver parameters (One per step) gamma : cost parameters (One per step)
def qubo_circuit( graph: nx.Graph, steps: int, beta: Sequence, gamma: Sequence) -> Circuit: """ A QAOA circuit for the Quadratic Unconstrained Binary Optimization problem (i.e. an Ising model). Args: graph : a networkx graph instance with optional edge and node weights steps : number of QAOA steps beta : driver parameters (One per step) gamma : cost parameters (One per step) """ qubits = list(graph.nodes()) # Initialization circ = Circuit() for q0 in qubits: circ += H(q0) # Run for given number of QAOA steps for p in range(0, steps): # Cost for q0, q1 in graph.edges(): weight = graph[q0][q1].get('weight', 1.0) # Note factor of pi due to parameterization of ZZ gate circ += ZZ(-weight * gamma[p] / np.pi, q0, q1) for q0 in qubits: node_weight = graph.nodes[q0].get('weight', None) if node_weight is not None: circ += RZ(node_weight, q0) # Drive for q0 in qubits: circ += RX(beta[p], q0) return circ
For the given graph, return the cut value for all binary assignments of the graph.
def graph_cuts(graph: nx.Graph) -> np.ndarray: """For the given graph, return the cut value for all binary assignments of the graph. """ N = len(graph) diag_hamiltonian = np.zeros(shape=([2]*N), dtype=np.double) for q0, q1 in graph.edges(): for index, _ in np.ndenumerate(diag_hamiltonian): if index[q0] != index[q1]: weight = graph[q0][q1].get('weight', 1) diag_hamiltonian[index] += weight return diag_hamiltonian
Return the circuit depth. Args: local: If True include local one-qubit gates in depth calculation. Else return the multi-qubit gate depth.
def depth(self, local: bool = True) -> int: """Return the circuit depth. Args: local: If True include local one-qubit gates in depth calculation. Else return the multi-qubit gate depth. """ G = self.graph if not local: def remove_local(dagc: DAGCircuit) \ -> Generator[Operation, None, None]: for elem in dagc: if dagc.graph.degree[elem] > 2: yield elem G = DAGCircuit(remove_local(self)).graph return nx.dag_longest_path_length(G) - 1
Split DAGCircuit into independent components
def components(self) -> List['DAGCircuit']: """Split DAGCircuit into independent components""" comps = nx.weakly_connected_component_subgraphs(self.graph) return [DAGCircuit(comp) for comp in comps]
Split DAGCircuit into layers, where the operations within each layer operate on different qubits (and therefore commute). Returns: A Circuit of Circuits, one Circuit per layer
def layers(self) -> Circuit: """Split DAGCircuit into layers, where the operations within each layer operate on different qubits (and therefore commute). Returns: A Circuit of Circuits, one Circuit per layer """ node_depth: Dict[Qubit, int] = {} G = self.graph for elem in self: depth = np.max(list(node_depth.get(prev, -1) + 1 for prev in G.predecessors(elem))) node_depth[elem] = depth depth_nodes = invert_map(node_depth, one_to_one=False) layers = [] for nd in range(0, self.depth()): elements = depth_nodes[nd] circ = Circuit(list(elements)) layers.append(circ) return Circuit(layers)
Return the all-zero state on N qubits
def zero_state(qubits: Union[int, Qubits]) -> State: """Return the all-zero state on N qubits""" N, qubits = qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) ket[(0,) * N] = 1 return State(ket, qubits)
Return a W state on N qubits
def w_state(qubits: Union[int, Qubits]) -> State: """Return a W state on N qubits""" N, qubits = qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) for n in range(N): idx = np.zeros(shape=N, dtype=int) idx[n] += 1 ket[tuple(idx)] = 1 / sqrt(N) return State(ket, qubits)
Return a GHZ state on N qubits
def ghz_state(qubits: Union[int, Qubits]) -> State: """Return a GHZ state on N qubits""" N, qubits = qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) ket[(0, ) * N] = 1 / sqrt(2) ket[(1, ) * N] = 1 / sqrt(2) return State(ket, qubits)
Return a random state from the space of N qubits
def random_state(qubits: Union[int, Qubits]) -> State: """Return a random state from the space of N qubits""" N, qubits = qubits_count_tuple(qubits) ket = np.random.normal(size=([2] * N)) \ + 1j * np.random.normal(size=([2] * N)) return State(ket, qubits).normalize()
Join two state vectors into a larger qubit state
def join_states(*states: State) -> State: """Join two state vectors into a larger qubit state""" vectors = [ket.vec for ket in states] vec = reduce(outer_product, vectors) return State(vec.tensor, vec.qubits)
Print a state vector
def print_state(state: State, file: TextIO = None) -> None: """Print a state vector""" state = state.vec.asarray() for index, amplitude in np.ndenumerate(state): ket = "".join([str(n) for n in index]) print(ket, ":", amplitude, file=file)
Pretty print state probabilities. Args: state: ndigits: Number of digits of accuracy file: Output stream (Defaults to stdout)
def print_probabilities(state: State, ndigits: int = 4, file: TextIO = None) -> None: """ Pretty print state probabilities. Args: state: ndigits: Number of digits of accuracy file: Output stream (Defaults to stdout) """ prob = bk.evaluate(state.probabilities()) for index, prob in np.ndenumerate(prob): prob = round(prob, ndigits) if prob == 0.0: continue ket = "".join([str(n) for n in index]) print(ket, ":", prob, file=file)
Returns the completely mixed density matrix
def mixed_density(qubits: Union[int, Qubits]) -> Density: """Returns the completely mixed density matrix""" N, qubits = qubits_count_tuple(qubits) matrix = np.eye(2**N) / 2**N return Density(matrix, qubits)
Returns: A randomly sampled Density from the Hilbert–Schmidt ensemble of quantum states Ref: "Induced measures in the space of mixed quantum states" Karol Zyczkowski, Hans-Juergen Sommers, J. Phys. A34, 7111-7125 (2001) https://arxiv.org/abs/quant-ph/0012101
def random_density(qubits: Union[int, Qubits]) -> Density: """ Returns: A randomly sampled Density from the Hilbert–Schmidt ensemble of quantum states Ref: "Induced measures in the space of mixed quantum states" Karol Zyczkowski, Hans-Juergen Sommers, J. Phys. A34, 7111-7125 (2001) https://arxiv.org/abs/quant-ph/0012101 """ N, qubits = qubits_count_tuple(qubits) size = (2**N, 2**N) ginibre_ensemble = (np.random.normal(size=size) + 1j * np.random.normal(size=size)) / np.sqrt(2.0) matrix = ginibre_ensemble @ np.transpose(np.conjugate(ginibre_ensemble)) matrix /= np.trace(matrix) return Density(matrix, qubits=qubits)
Return a copy of this state with new qubits
def relabel(self, qubits: Qubits) -> 'State': """Return a copy of this state with new qubits""" return State(self.vec.tensor, qubits, self._memory)
Return a copy of this state with qubit labels permuted
def permute(self, qubits: Qubits) -> 'State': """Return a copy of this state with qubit labels permuted""" vec = self.vec.permute(qubits) return State(vec.tensor, vec.qubits, self._memory)
Normalize the state
def normalize(self) -> 'State': """Normalize the state""" tensor = self.tensor / bk.ccast(bk.sqrt(self.norm())) return State(tensor, self.qubits, self._memory)
Returns: The state probabilities
def probabilities(self) -> bk.BKTensor: """ Returns: The state probabilities """ value = bk.absolute(self.tensor) return value * value
Measure the state in the computational basis the the given number of trials, and return the counts of each output configuration.
def sample(self, trials: int) -> np.ndarray: """Measure the state in the computational basis the the given number of trials, and return the counts of each output configuration. """ # TODO: Can we do this within backend? probs = np.real(bk.evaluate(self.probabilities())) res = np.random.multinomial(trials, probs.ravel()) res = res.reshape(probs.shape) return res
Return the expectation of a measurement. Since we can only measure our computer in the computational basis, we only require the diagonal of the Hermitian in that basis. If the number of trials is specified, we sample the given number of times. Else we return the exact expectation (as if we'd performed an infinite number of trials. )
def expectation(self, diag_hermitian: bk.TensorLike, trials: int = None) -> bk.BKTensor: """Return the expectation of a measurement. Since we can only measure our computer in the computational basis, we only require the diagonal of the Hermitian in that basis. If the number of trials is specified, we sample the given number of times. Else we return the exact expectation (as if we'd performed an infinite number of trials. ) """ if trials is None: probs = self.probabilities() else: probs = bk.real(bk.astensorproduct(self.sample(trials) / trials)) diag_hermitian = bk.astensorproduct(diag_hermitian) return bk.sum(bk.real(diag_hermitian) * probs)
Measure the state in the computational basis. Returns: A [2]*bits array of qubit states, either 0 or 1
def measure(self) -> np.ndarray: """Measure the state in the computational basis. Returns: A [2]*bits array of qubit states, either 0 or 1 """ # TODO: Can we do this within backend? probs = np.real(bk.evaluate(self.probabilities())) indices = np.asarray(list(np.ndindex(*[2] * self.qubit_nb))) res = np.random.choice(probs.size, p=probs.ravel()) res = indices[res] return res
Convert a pure state to a density matrix
def asdensity(self) -> 'Density': """Convert a pure state to a density matrix""" matrix = bk.outer(self.tensor, bk.conj(self.tensor)) return Density(matrix, self.qubits, self._memory)
Return the partial trace over the specified qubits
def partial_trace(self, qubits: Qubits) -> 'Density': """Return the partial trace over the specified qubits""" vec = self.vec.partial_trace(qubits) return Density(vec.tensor, vec.qubits, self._memory)
Return a copy of this state with new qubits
def relabel(self, qubits: Qubits) -> 'Density': """Return a copy of this state with new qubits""" return Density(self.vec.tensor, qubits, self._memory)
Return a copy of this state with qubit labels permuted
def permute(self, qubits: Qubits) -> 'Density': """Return a copy of this state with qubit labels permuted""" vec = self.vec.permute(qubits) return Density(vec.tensor, vec.qubits, self._memory)
Normalize state
def normalize(self) -> 'Density': """Normalize state""" tensor = self.tensor / self.trace() return Density(tensor, self.qubits, self._memory)
Returns: The state probabilities
def probabilities(self) -> bk.BKTensor: """Returns: The state probabilities """ prob = bk.productdiag(self.tensor) return prob
Create and run a circuit with N qubits and given number of gates
def benchmark(N, gates): """Create and run a circuit with N qubits and given number of gates""" qubits = list(range(0, N)) ket = qf.zero_state(N) for n in range(0, N): ket = qf.H(n).run(ket) for _ in range(0, (gates-N)//3): qubit0, qubit1 = random.sample(qubits, 2) ket = qf.X(qubit0).run(ket) ket = qf.T(qubit1).run(ket) ket = qf.CNOT(qubit0, qubit1).run(ket) return ket.vec.tensor
Return benchmark performance in GOPS (Gate operations per second)
def benchmark_gops(N, gates, reps): """Return benchmark performance in GOPS (Gate operations per second)""" t = timeit.timeit(lambda: benchmark(N, gates), number=reps) gops = (GATES*REPS)/t gops = int((gops * 100) + 0.5) / 100.0 return gops
Create composite gates, decompose, and return a list of canonical coordinates
def sandwich_decompositions(coords0, coords1, samples=SAMPLES): """Create composite gates, decompose, and return a list of canonical coordinates""" decomps = [] for _ in range(samples): circ = qf.Circuit() circ += qf.CANONICAL(*coords0, 0, 1) circ += qf.random_gate([0]) circ += qf.random_gate([1]) circ += qf.CANONICAL(*coords1, 0, 1) gate = circ.asgate() coords = qf.canonical_coords(gate) decomps.append(coords) return decomps
Construct and display 3D plot of canonical coordinates
def display_weyl(decomps): """Construct and display 3D plot of canonical coordinates""" tx, ty, tz = list(zip(*decomps)) rcParams['axes.labelsize'] = 24 rcParams['font.family'] = 'serif' rcParams['font.serif'] = ['Computer Modern Roman'] rcParams['text.usetex'] = True fig = pyplot.figure() ax = Axes3D(fig) ax.scatter(tx, ty, tz) ax.plot((1,), (1,), (1,)) ax.plot((0, 1, 1/2, 0, 1/2, 1, 1/2, 1/2), (0, 0, 1/2, 0, 1/2, 0, 1/2, 1/2), (0, 0, 0, 0, 1/2, 0, 0, 1/2)) ax.plot((0, 1/2, 1, 1/2, 1/2), (0, 1/4, 0, 1/4, 1/2), (0, 1/4, 0, 1/4, 0)) points = [(0, 0, 0), (1/4, 0, 0), (1/2, 0, 0), (3/4, 0, 0), (1, 0, 0), (1/4, 1/4, 0), (1/2, 1/4, 0), (3/4, 1/4, 0), (1/2, 1/2, 0), (1/4, 1/4, 1/4), (1/2, 1/4, 1/4), (3/4, 1/4, 1/4), (1/2, 1/2, 1/4), (1/2, 1/2, 1/2)] ax.scatter(*zip(*points)) eps = 0.04 ax.text(0, 0, 0-2*eps, 'I', ha='center') ax.text(1, 0, 0-2*eps, 'I', ha='center') ax.text(1/2, 1/2, 0-2*eps, 'iSWAP', ha='center') ax.text(1/2, 1/2, 1/2+eps, 'SWAP', ha='center') ax.text(1/2, 0, 0-2*eps, 'CNOT', ha='center') # More coordinate labels # ax.text(1/4-eps, 1/4, 1/4, '$\sqrt{SWAP}$', ha='right') # ax.text(3/4+eps, 1/4, 1/4, '$\sqrt{SWAP}^\dagger$', ha='left') # ax.text(1/4, 0, 0-2*eps, '$\sqrt{{CNOT}}$', ha='center') # ax.text(3/4, 0, 0-2*eps, '$\sqrt{{CNOT}}$', ha='center') # ax.text(1/2, 1/4, 0-2*eps, 'B', ha='center') # ax.text(1/2, 1/4, 1/4+eps, 'ECP', ha='center') # ax.text(1/4, 1/4, 0-2*eps, '$\sqrt{iSWAP}$', ha='center') # ax.text(3/4, 1/4, 0-2*eps, '$\sqrt{iSWAP}$', ha='center') # ax.text(1/2, 1/2+eps, 1/4, 'PSWAP(1/2)', ha='left') ax.set_xlim(0, 1) ax.set_ylim(-1/4, 3/4) ax.set_zlim(-1/4, 3/4) # Get rid of the panes ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the spines ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0)) # Get rid of the ticks ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) pyplot.show()
Return the Pauli sigma_X operator acting on the given qubit
def sX(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_X operator acting on the given qubit""" return Pauli.sigma(qubit, 'X', coefficient)
Return the Pauli sigma_Y operator acting on the given qubit
def sY(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_Y operator acting on the given qubit""" return Pauli.sigma(qubit, 'Y', coefficient)
Return the Pauli sigma_Z operator acting on the given qubit
def sZ(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_Z operator acting on the given qubit""" return Pauli.sigma(qubit, 'Z', coefficient)
Return the Pauli sigma_I (identity) operator. The qubit is irrelevant, but kept as an argument for consistency
def sI(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_I (identity) operator. The qubit is irrelevant, but kept as an argument for consistency""" return Pauli.sigma(qubit, 'I', coefficient)
Return the sum of elements of the Pauli algebra
def pauli_sum(*elements: Pauli) -> Pauli: """Return the sum of elements of the Pauli algebra""" terms = [] key = itemgetter(0) for term, grp in groupby(heapq.merge(*elements, key=key), key=key): coeff = sum(g[1] for g in grp) if not isclose(coeff, 0.0): terms.append((term, coeff)) return Pauli(tuple(terms))
Return the product of elements of the Pauli algebra
def pauli_product(*elements: Pauli) -> Pauli: """Return the product of elements of the Pauli algebra""" result_terms = [] for terms in product(*elements): coeff = reduce(mul, [term[1] for term in terms]) ops = (term[0] for term in terms) out = [] key = itemgetter(0) for qubit, qops in groupby(heapq.merge(*ops, key=key), key=key): res = next(qops)[1] # Operator: X Y Z for op in qops: pair = res + op[1] res, rescoeff = PAULI_PROD[pair] coeff *= rescoeff if res != 'I': out.append((qubit, res)) p = Pauli(((tuple(out), coeff),)) result_terms.append(p) return pauli_sum(*result_terms)