text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Update the widget's value without firing a changed signal <END_TASK> <USER_TASK:> Description: def update_internal(self, value): """Update the widget's value without firing a changed signal """
self.block() self.set_widget_value(value) self.unblock()
<SYSTEM_TASK:> Perform the initial connection of the widget <END_TASK> <USER_TASK:> Description: def connect_widget(self): """Perform the initial connection of the widget the default implementation will connect to the widgets signal based on self.signal_name """
if self.signal_name is not None: # None for read only widgets sid = self.widget.connect(self.signal_name, self.widget_changed) self.connections.append(sid)
<SYSTEM_TASK:> Create a proxy for a widget and add it to this group <END_TASK> <USER_TASK:> Description: def add_proxy_for(self, name, widget): """Create a proxy for a widget and add it to this group :param name: The name or key of the proxy, which will be emitted with the changed signal :param widget: The widget to create a proxy for """
proxy = proxy_for(widget) self.add_proxy(name, proxy)
<SYSTEM_TASK:> Join two URLs. <END_TASK> <USER_TASK:> Description: def _urljoin(left, right): """Join two URLs. Takes URLs specified by left and right and joins them into a single URL. If right is an absolute URL, it is returned directly. This differs from urlparse.urljoin() in that the latter always chops off the left-most component of left unless it is trailed by '/', which is not the behavior we want. """
# Handle the tricky case of right being a full URL tmp = urlparse.urlparse(right) if tmp.scheme or tmp.netloc: # Go ahead and use urlparse.urljoin() return urlparse.urljoin(left, right) # Check for slashes joincond = (left[-1:], right[:1]) if joincond == ('/', '/'): # Too many, preserve only one return left + right[1:] elif '/' in joincond: # Just one; great! return left + right else: # Not enough; add one return left + '/' + right
<SYSTEM_TASK:> Decorate a method to inject an HTTPRequest. <END_TASK> <USER_TASK:> Description: def restmethod(method, reluri, *qargs, **headers): """Decorate a method to inject an HTTPRequest. Generates an HTTPRequest using the given HTTP method and relative URI. If additional positional arguments are present, they are expected to be strings that name function arguments that should be included as the query parameters of the URL. If additional keyword arguments are present, the keywords are expected to name function arguments and the values are expected to name headers to set from those values. The request is injected as the first function argument after the 'self' argument. Note that two attributes must exist on the object the method is called on: the '_baseurl' attribute specifies the URL that reluri is relative to; and the '_make_req' attribute specifies a method that instantiates an HTTPRequest from a method and full url (which will include query arguments). """
def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): # Process the arguments against the original function argmap, theSelf, req_name = _getcallargs(func, args, kwargs) # Build the URL url = _urljoin(theSelf._baseurl, reluri.format(**argmap)) # Build the query string, as needed if qargs: query = dict([(k, argmap[k]) for k in qargs if argmap[k] is not None]) if query: url += '?%s' % urllib.urlencode(query) # Build the headers, if needed hlist = None if headers: hlist = hdrs.HeaderDict() for aname, hname in headers.items(): if argmap[aname]: hlist[hname] = argmap[aname] if not hlist: # If there are no headers, don't send any hlist = None # Now, build the request and pass it to the method argmap[req_name] = theSelf._make_req(method, url, func.__name__, hlist) # Call the method return func(**argmap) # Return the function wrapper return wrapper # Return the actual decorator return decorator
<SYSTEM_TASK:> Apply some addons to a widget. <END_TASK> <USER_TASK:> Description: def apply_addons(widget, *addon_types, **named_addon_types): """Apply some addons to a widget. :param widget: The widget to apply addons to. :param addon_types: A list of addon types, which will be instantiated and applied to the widget with the default name of the addon. :param named_addon_types: A named list of addons, the keywords will be the name of the addon when loaded and will override the default addon name. This can allow loading the same addon multpily for the same widget under different names. Plugins should conform to the GObjectPlugin interface or be a subclass of it. Once loaded, addons will be available as widget.addons.<addon_name> withe standard attribute access. """
for addon_type in addon_types: addon_type(widget) for name, addon_type in named_addon_types.items(): addon_type(widget, addon_name=name)
<SYSTEM_TASK:> Create a path based on component configuration. <END_TASK> <USER_TASK:> Description: def make_path(config, *endings): """ Create a path based on component configuration. All paths are relative to the component's configuration directory; usually this will be the same for an entire session, but this function supuports component-specific configuration directories. Arguments: config - the configuration object for a component endings - a list of file paths to append to the component's configuration directory """
config_dir = config.get("dp.config_dir") return os.path.join(config_dir, *endings)
<SYSTEM_TASK:> Convert the JSON expression of a contact information into an instance <END_TASK> <USER_TASK:> Description: def from_json(payload): """ Convert the JSON expression of a contact information into an instance `Contact. @param payload: a JSON expression representing a contact information: [ type:ContactName, value:string, [is_primary:boolean, [is_verified:boolean]] ] @return: an instance `Contact`. @raise AssertError: if the specified type of this contact information is not a string representation of an item of the enumeration `ContactName`. @raise ValueError: if the value of this contact information is null. """
contact_item_number = len(payload) assert 2 <= contact_item_number <= 4, 'Invalid contact information format' is_primary = is_verified = None # Unpack the contact information to each component. if contact_item_number == 2: (name, value) = payload elif contact_item_number == 3: (name, value, is_primary) = payload else: (name, value, is_primary, is_verified) = payload return Contact(cast.string_to_enum(name, Contact.ContactName), value, is_primary=is_primary and cast.string_to_boolean(is_primary, strict=True), is_verified=is_verified and cast.string_to_boolean(is_verified, strict=True))
<SYSTEM_TASK:> Convert an object representing a contact information to an instance <END_TASK> <USER_TASK:> Description: def from_object(obj): """ Convert an object representing a contact information to an instance `Contact`. @param obj: an object containg the following attributes: * `name`: an item of the enumeration `ContactName` representing the type of this contact information. * `value`: value of this contact information representing by a string, such as ``+84.01272170781``, the formatted value for a telephone number property. * `is_primary`: indicate whether this contact property is the first to be used to contact the entity that this contact information corresponds to. There is only one primary contact property for a given property name (e.g., `EMAIL`, `PHONE`, `WEBSITE`). * `is_verified`: indicate whether this contact information has been verified, whether it has been grabbed from a trusted Social Networking Service (SNS), or whether through a challenge/response process. @raise ValueError: if the value of this contact information is null. """
return obj if isinstance(obj, Contact) \ else Contact(cast.string_to_enum(obj.name, Contact.ContactName), obj.value, is_primary=obj.is_primary and cast.string_to_boolean(obj.is_primary, strict=True), is_verified=obj.is_verified and cast.string_to_boolean(obj.is_verified, strict=True))
<SYSTEM_TASK:> Use this Function to submit. <END_TASK> <USER_TASK:> Description: def post(self, request, question_level, question_number, format=None): """ Use this Function to submit. """
user = User.objects.get(username=request.user.username) question = Question.objects.filter(question_level=question_level).filter(question_number=question_number) if int(user.profile.user_access_level) < int(question_level): content = {'user_nick':profile.user_nick, 'please move along': 'Nothing to see here' } return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE) time_last = None time_last_query = Submission.objects.filter(submission_user__username=request.user.username).filter(submission_question__question_level=question_level).filter(submission_question__question_number=question_number).filter(submission_state='WA').order_by('submission_timestamp').last() check_resubmission = AcceptedQuestion.objects.filter(record_user=request.user).filter(record_question=question) if check_resubmission: content = {'user_nick':profile.user_nick, 'Do not try to resubmit': 'Already Accepted' } return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE) if time_last_query: time_last = time_last_query.submission_timestamp time_limit = self.get_contest_consecutive_submission_halt_time() # print time_last, time_limit, datetime.datetime.now(), time_limit <= datetime.datetime.now() if(time_last is None or time_last + time_limit <= datetime.datetime.now()): type_of_contest = self.get_contest_type() type_of_submission = self.get_question_type() if type_of_submission != STRING: content = { 'user_nick':profile.user_nick, 'please move along': 'WRONG TYPE SUBMISSION'} return Response(content, status=status.HTTP_405_METHOD_NOT_ALLOWED) serializer = SubmissionSerializer(data=request.data) if serializer.is_valid(): serializer.save(submission_user = request.user, submission_question = self.get_object(question_level,question_number)[0], ) # checker_queue.delay(int(serializer.data['id'])) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: content = {'user_nick':profile.user_nick, 'Try to submit after some time': 'Nothing to see here' } return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
<SYSTEM_TASK:> Use this Function to submit Comment. <END_TASK> <USER_TASK:> Description: def post(self, request, question_level, question_number, format=None): """ Use this Function to submit Comment. """
user = User.objects.get(username=request.user.username) question = Question.objects.filter(question_level=question_level).filter(question_number=question_number) if int(user.profile.user_access_level) < int(question_level): content = {'user_nick':profile.user_nick, 'please move along': 'Nothing to see here' } return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE) time_last = None time_last_query = Comment.objects.filter(comment_user__username=request.user.username).order_by('comment_timestamp').last() if time_last_query: time_last = time_last_query.comment_timestamp time_limit = self.get_contest_consecutive_submission_halt_time() # the halt time for submission is kept same as submission halt time # print time_last, time_limit, datetime.datetime.now(), time_limit <= datetime.datetime.now() if(time_last is None or time_last + time_limit <= datetime.datetime.now()): serializer = CommentSerializer(data=request.data) if not request.data['comment_message'] or len(str(request.data['comment_message'])) >= 255: content = {'user_nick':profile.user_nick, 'Try to submit something small': 'Like your Dick' } return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE) if serializer.is_valid(): serializer.save(comment_user = request.user, comment_question = question[0], ) checker_queue.delay(int(serializer.data['id'])) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: content = {'user_nick':profile.user_nick, 'Try to submit after some time': 'Nothing to see here' } return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
<SYSTEM_TASK:> Return a sequence of characters corresponding to the specified base. <END_TASK> <USER_TASK:> Description: def calculate_base_sequence(base_offsets): """ Return a sequence of characters corresponding to the specified base. Example:: >>> calculate_base_sequence([ ('0', '9'), ('A', 'F') ]) '0123456789ABCDEF' @param base_offsets: a list of character offsets in the form of tuples ``(x, y)`` where ``x`` corresponds to the first character of a sequence and ``y`` to the last character of this sequence. @return a sequence of characters, i.e., a string, corresponding to the specified base. """
return ''.join([ chr(c) for c in list(itertools.chain.from_iterable( [ range(ord(x), ord(y) + 1) for (x, y) in base_offsets ])) ])
<SYSTEM_TASK:> Generate a secured key composed of the integer value encoded in Base62 <END_TASK> <USER_TASK:> Description: def generate_secured_key(value, key_nonce_separator='.', nonce_length=4, base=BASE62): """ Generate a secured key composed of the integer value encoded in Base62 and a nonce. @param value: an integer value. @param key_nonce_separator: the character that is used to separate the key and the nonce to form the secured key. @param nonce_length: the number of characters to compose the nonce. @param base: a sequence of characters that is used to encode the integer value. @return: a tuple ``(key, nonce, secured_key)``: * ``key``: string representation of the integer value in Base62. * ``nonce``: "number used once", a pseudo-random number to ensure that the key cannot be reused in replay attacks. * ``secured_key``: a string composed of the key concatenated with the nonce. """
if not isinstance(value, (int, long)): raise ValueError() posix_time = int(time.mktime(datetime.datetime.now().timetuple())) nonce = hashlib.md5(str(posix_time)).hexdigest()[:nonce_length] key = int_to_key(value, base=base) return key, nonce, '%s%s%s' % (key, key_nonce_separator, nonce)
<SYSTEM_TASK:> Convert the specified integer to a key using the given base. <END_TASK> <USER_TASK:> Description: def int_to_key(value, base=BASE62): """ Convert the specified integer to a key using the given base. @param value: a positive integer. @param base: a sequence of characters that is used to encode the integer value. @return: a key expressed in the specified base. """
def key_sequence_generator(value, base): """ Generator for producing sequence of characters of a key providing an integer value and a base of characters for encoding, such as Base62 for instance. @param value: a positive integer. @param base: a sequence of characters that is used to encode the integer value. @return: the next character of the object's key encoded with the specified base. """ base_length = len(base) while True: yield base[value % base_length] if value < base_length: break value /= base_length return ''.join([ c for c in key_sequence_generator(value, base) ])
<SYSTEM_TASK:> Convert the following key to an integer. <END_TASK> <USER_TASK:> Description: def key_to_int(key, base=BASE62): """ Convert the following key to an integer. @param key: a key. @param base: a sequence of characters that was used to encode the integer value. @return: the integer value corresponding to the given key. @raise ValueError: if one character of the specified key doesn't match any character of the specified base. """
base_length = len(base) value = 0 for c in reversed(key): value = (value * base_length) + base.index(c) return value
<SYSTEM_TASK:> Parse a given secured key and return its associated integer, the key <END_TASK> <USER_TASK:> Description: def parse_secured_key(secured_key, key_nonce_separator='.', nonce_length=4, base=BASE62): """ Parse a given secured key and return its associated integer, the key itself, and the embedded nonce. @param secured_key a string representation of a secured key composed of a key in Base62, a separator character, and a nonce. @param key_nonce_separator: the character that is used to separate the key and the nonce to form the secured key. @param nonce_length: the number of characters to compose the nonce. @param base: a sequence of characters that is used to encode the integer value. @return: a tuple ``(value, key, nonce)``: * ``value``: the integer value of the key. * ``key``: the plain-text key. * ``nonce``: "number used once", a pseudo-random number to ensure that the key cannot be reused in replay attacks. @raise ValueError: if the format of the secured key is invalid, or if the embedded nonce is of the wrong length. """
parts = secured_key.split(key_nonce_separator) if len(parts) != 2: raise ValueError('Invalid secured key format') (key, nonce) = parts if len(nonce) != nonce_length: raise ValueError('Invalid length of the key nonce') return key_to_int(key, base=base), key, nonce
<SYSTEM_TASK:> Get a template by name. <END_TASK> <USER_TASK:> Description: def get(args): """ Get a template by name. """
m = TemplateManager(args.hosts) t = m.get(args.name) if t: print(json.dumps(t, indent=2)) else: sys.exit(1)
<SYSTEM_TASK:> Delete a template by name <END_TASK> <USER_TASK:> Description: def delete(args): """ Delete a template by name """
m = TemplateManager(args.hosts) m.delete(args.name)
<SYSTEM_TASK:> Parses a item object <END_TASK> <USER_TASK:> Description: def item_parser(item): """ Parses a item object It gives None for the property not present Used when some item types do not get parsed easily when using gevent """
if __is_deleted(item): return deleted_parser(item) return Item( __check_key('id', item), __check_key('deleted', item), __check_key('type', item), __check_key('by', item), __check_key('time', item), __check_key('text', item), __check_key('dead', item), __check_key('parent', item), __check_key('kids', item), __check_key('url', item), __check_key('score', item), __check_key('title', item), __check_key('parts', item), )
<SYSTEM_TASK:> Return a string representation of the specified JSON object. <END_TASK> <USER_TASK:> Description: def json_to_string(value, null_string_repr='[]', trimable=False): """ Return a string representation of the specified JSON object. @param value: a JSON object. @param null_string_rep: the string representation of the null object. @return: a string representation of the specified JSON object. """
return null_string_repr if is_undefined(value) \ else obj.jsonify(value, trimable=trimable)
<SYSTEM_TASK:> Return a boolean with a value represented by the specified string. <END_TASK> <USER_TASK:> Description: def string_to_boolean(value, strict=False, default_value=False): """ Return a boolean with a value represented by the specified string. @param value: a string representation of a boolean. @param strict: indicate whether the specified string MUST be of a valid boolean representation. @return: the boolean value represented by the string. @raise ValueError: if the string doesn't represent a valid boolean, while the argument ``strict`` equals ``True``. """
if is_undefined(value) and default_value: return default_value if isinstance(value, bool): return value if isinstance(value, basestring) and value is not None: value = value.lower() is_true = value in ('yes', 'true', 't', '1') if not is_true and strict == True and \ value not in ('no', 'false', 'f', '0'): raise ValueError("The specified string doesn't represent a boolean value") return is_true
<SYSTEM_TASK:> Return a Python date that corresponds to the specified string <END_TASK> <USER_TASK:> Description: def string_to_date(value): """ Return a Python date that corresponds to the specified string representation. @param value: string representation of a date. @return: an instance ``datetime.datetime`` represented by the string. """
if isinstance(value, datetime.date): return value return dateutil.parser.parse(value).date()
<SYSTEM_TASK:> Return a decimal corresponding to the string representation of a <END_TASK> <USER_TASK:> Description: def string_to_decimal(value, strict=True): """ Return a decimal corresponding to the string representation of a number. @param value: a string representation of an decimal number. @param strict: indicate whether the specified string MUST be of a valid decimal number representation. @return: the decimal value represented by the string. @raise ValueError: if the string doesn't represent a valid decimal, while the argument ``strict`` equals ``True``. """
if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None try: return float(value) except ValueError: raise ValueError( 'The specified string "%s" does not represent an integer' % value)
<SYSTEM_TASK:> Return the item of an enumeration that corresponds to the specified <END_TASK> <USER_TASK:> Description: def string_to_enum(value, enumeration, strict=True, default_value=None): """ Return the item of an enumeration that corresponds to the specified string representation. @param value: string representation of an item of a Python enumeration. @param enumeration: a Python enumeration. @param strict: indicate whether the value must correspond to an item of the specified Python enumeration or if ``None`` value is accepted. @return: the item of the Python enumeration the specified string representation corresponds to. @raise ValueError: if the enumeration is not an instance of ``Enum``, or if the string representation doesn't correspond to any item of the given Python enumeration, or if the default value is not an item of the given Python enumeration. """
if not isinstance(enumeration, Enum): raise ValueError('The specified enumeration is not an instance of Enum') if is_undefined(value): if strict: raise ValueError('The value cannot be null') if default_value is not None and not default_value in enumeration: raise ValueError('The default value must be an item of the specified enumeration') return default_value item = [ item for item in enumeration if str(item) == value] if len(item) == 0: raise ValueError('The specified string "%s" does not represent any item of the enumeration' % value) return item[0]
<SYSTEM_TASK:> Return an integer corresponding to the string representation of a <END_TASK> <USER_TASK:> Description: def string_to_integer(value, strict=False): """ Return an integer corresponding to the string representation of a number. @param value: a string representation of an integer number. @param strict: indicate whether the specified string MUST be of a valid integer number representation. @return: the integer value represented by the string. @raise ValueError: if the string doesn't represent a valid integer, while the argument ``strict`` equals ``True``. """
if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None try: return int(value) except ValueError: raise ValueError('The specified string "%s" does not represent an integer' % value)
<SYSTEM_TASK:> Return a tuple corresponding to the string representation of an IPv4 <END_TASK> <USER_TASK:> Description: def string_to_ipv4(value, strict=False): """ Return a tuple corresponding to the string representation of an IPv4 address. An IPv4 address is canonically represented in dot-decimal notation, which consists of four decimal numbers, each ranging from 0 to 255, separated by dots, e.g., ``172.16.254.1``. @param value: a dotted-decimal notation of an IPv4 address, consisting of four decimal numbers, each ranging from ``0`` to ``255``, separated by dots. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of four decimal numbers ``(byte1, byte2, byte3, byte4)``, each ranging from ``0`` to ``255``. """
if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None if not REGEX_IPV4.match(value): raise ValueError('The specified string "%s" does not represent a IPv4' % value) ipv4 = [ int(byte) for byte in value.split('.') if int(byte) < 256] if len(ipv4) != 4: raise ValueError('The IPv4 "%s" has invalid byte(s)' % value) return ipv4
<SYSTEM_TASK:> Return an instance ``datetime.time`` that corresponds to the specified <END_TASK> <USER_TASK:> Description: def string_to_time(value): """ Return an instance ``datetime.time`` that corresponds to the specified string representation. @param value: a string representation of a time ``HH:MM[:SS]``, where: * ``HH``: hour (24-hour clock) as a zero-padded decimal number. * ``MM``: minute as a zero-padded decimal number. * ``SS``: second as a zero-padded decimal number. @return: an instance ``datetime.time`` represented by the string. @raise ValueError: if the string representation doesn't correspond to a valid time. """
try: return datetime.datetime.strptime(value, '%H:%M:%S').time() except ValueError: return datetime.datetime.strptime(value, '%H:%M').time()
<SYSTEM_TASK:> Return the ISO 8601 date time that corresponds to the specified string <END_TASK> <USER_TASK:> Description: def string_to_timestamp(value, second_digits=3, default_utc_offset=None, rounding_method=TimestampRoundingMethod.ceiling): """ Return the ISO 8601 date time that corresponds to the specified string representation. When the required precision is lower than microsecond, the function rounds the sub-second time to the specified number of digits. By default, when possible, it returns the smallest value greater than or equal to the sub-second time for specified precision, also know as "ceiling conversion"; for instance, the function converts ``918845`` microseconds to ``919`` milliseconds. Otherwise, the function return the largest value equal to the sub-second time for specified precision, also known as "floor conversion"; for instance, the function converts ``999916`` microseconds to ``999`` milliseconds. @param value: string representation of an ISO date time. @param second_digits: indicate the number of second digits after the decimal point, to determine the time precision. For instance, 3 digits corresponds to milliseconds, 6 digits corresponds to microseconds. @param default_utc_offset: if the specified string representation doesn't mention any time zone, use this offset as the time zone of the corresponding ISO 8601 date time, or use the time zone of the machine this code is running on as the default time zone. @param rounding_method: one of the rounding method, as declared in ``TimestampRoundingMethod``, when the required precision is lower than microsecond. @return: an instance ``ISO8601DateTime`` represented by the string. @raise ValueError: if the specified number of second digits after the decimal point is not in 0..6, or if the string representation doesn't correspond to a valid ISO 8601 date time. """
if second_digits < 0 or second_digits > 6: raise ValueError('The number of second digits after the decimal point must be in 0..6') if not value: return None if isinstance(value, datetime.date): pydatetime = ISO8601DateTime.from_datetime(value) else: if default_utc_offset is not None: value = '%s%s%02d' % (value[:19], '-' if default_utc_offset < 0 else '+', default_utc_offset) d = dateutil.parser.parse(value) if second_digits >= 6: microsecond = d.microsecond else: f = 10 ** (6 - second_digits) if rounding_method == TimestampRoundingMethod.ceiling: m = int(math.ceil(float(d.microsecond) / f)) if m >= f: m -= 1 else: m = int(math.floor(float(d.microsecond) / f)) microsecond = m * f pydatetime = ISO8601DateTime( d.year, d.month, d.day, d.hour, d.minute, d.second, microsecond, d.tzinfo or dateutil.tz.tzlocal()) pydatetime.set_second_digits(second_digits) return pydatetime
<SYSTEM_TASK:> Create the flask app <END_TASK> <USER_TASK:> Description: def create_app(debug=False): """ Create the flask app :param debug: Use debug mode :type debug: bool :return: Created app :rtype: flask.Flask """
app = Flask(__name__) app.secret_key = str(uuid.uuid4()) app.json_encoder = DateTimeEncoder app.register_blueprint(page) Bower(app) api = Api(app) api.add_resource( PluginListAPI, api_path + "plugins/", endpoint="APIPlugins" ) api.add_resource( PluginAPI, api_path + "plugins/<plugin_key>", endpoint="APIPlugin" ) api.add_resource( PluginResourceAPI, api_path + "plugins/<plugin_key>/resources/", endpoint="APIPluginResource" ) if debug: # Setup app for real debuging mode app.debug = True # Force update of static files (even in dev mode, browsers still cache) app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 @app.after_request def add_header(response): response.headers['Cache-Control'] = "public, max-age=0" return response return app
<SYSTEM_TASK:> Given three colinear points p, q, r, the function checks if point q <END_TASK> <USER_TASK:> Description: def on_segment(point_p, point_q, point_r): """ Given three colinear points p, q, r, the function checks if point q lies on line segment "pr" :param point_p: :type point_p: models.Point :param point_q: :type point_q: models.Point :param point_r: :type point_r: models.Point :return: if point r on line segment "pr" :rtype: bool """
if (point_q.x <= max(point_p.x, point_r.x) and point_q.x >= min(point_p.x, point_r.x) and point_q.y <= max(point_p.y, point_r.y) and point_q.y >= min(point_p.y, point_r.y)): return True return False
<SYSTEM_TASK:> Determine if lina_a intersect with line_b <END_TASK> <USER_TASK:> Description: def is_intersect(line_a, line_b): """ Determine if lina_a intersect with line_b :param lina_a: :type lina_a: models.Line :param lina_b: :type line_b: models.Line :return: :rtype: bool """
# Find the four orientations needed for general and special cases orientation_1 = orientation(line_a.endpoint_a, line_a.endpoint_b, line_b.endpoint_a) orientation_2 = orientation(line_a.endpoint_a, line_a.endpoint_b, line_b.endpoint_b) orientation_3 = orientation(line_b.endpoint_a, line_b.endpoint_b, line_a.endpoint_a) orientation_4 = orientation(line_b.endpoint_a, line_b.endpoint_b, line_a.endpoint_b) # General case if (orientation_1 != orientation_2 and orientation_3 != orientation_4): return True # Special cases if (orientation_1 == 0 and on_segment(line_a.endpoint_a, line_b.endpoint_a, line_a.endpoint_b)): return True if (orientation_2 == 0 and on_segment(line_a.endpoint_a, line_b.endpoint_b, line_a.endpoint_b)): return True if (orientation_3 == 0 and on_segment(line_b.endpoint_a, line_a.endpoint_a, line_b.endpoint_b)): return True if (orientation_4 == 0 and on_segment(line_b.endpoint_a, line_a.endpoint_b, line_b.endpoint_b)): return True return False
<SYSTEM_TASK:> Detemine if point is in region <END_TASK> <USER_TASK:> Description: def is_inside(point, region): """ Detemine if point is in region :param point: :type point: models.Point :param region: :type region: Region """
points = region.vertices extrame = models.Point(x=1000000, y=point.y) points = points + [points[0]] intersect_count = 0 for i in range(len(points) - 1): if is_intersect(models.Line(point, extrame), models.Line(points[i], points[i+1])): intersect_count += 1 return intersect_count % 2 == 1
<SYSTEM_TASK:> Determine refractive index and radius of a spherical object <END_TASK> <USER_TASK:> Description: def analyze(qpi, r0, method="edge", model="projection", edgekw={}, imagekw={}, ret_center=False, ret_pha_offset=False, ret_qpi=False): """Determine refractive index and radius of a spherical object Parameters ---------- qpi: qpimage.QPImage Quantitative phase image data r0: float Approximate radius of the sphere [m] method: str The method used to determine the refractive index can either be "edge" (determine the radius from the edge detected in the phase image) or "image" (perform a 2D phase image fit). model: str The light-scattering model used by `method`. If `method` is "edge", only "projection" is allowed. If `method` is "image", `model` can be one of "mie", "projection", "rytov", or "rytov-sc". edgekw: dict Keyword arguments for tuning the edge detection algorithm, see :func:`qpsphere.edgefit.contour_canny`. imagekw: dict Keyword arguments for tuning the image fitting algorithm, see :func:`qpsphere.imagefit.alg.match_phase` ret_center: bool If True, return the center coordinate of the sphere. ret_pha_offset: bool If True, return the phase image background offset. ret_qpi: bool If True, return the modeled data as a :class:`qpimage.QPImage`. Returns ------- n: float Computed refractive index r: float Computed radius [m] c: tuple of floats Only returned if `ret_center` is True; Center position of the sphere [px] pha_offset: float Only returned if `ret_pha_offset` is True; Phase image background offset qpi_sim: qpimage.QPImage Only returned if `ret_qpi` is True; Modeled data Notes ----- If `method` is "image", then the "edge" method is used as a first step to estimate initial parameters for radius, refractive index, and position of the sphere using `edgekw`. If this behavior is not desired, please make use of the method :func:`qpsphere.imagefit.analyze`. """
if method == "edge": if model != "projection": raise ValueError("`method='edge'` requires `model='projection'`!") n, r, c = edgefit.analyze(qpi=qpi, r0=r0, edgekw=edgekw, ret_center=True, ret_edge=False, ) res = [n, r] if ret_center: res.append(c) if ret_pha_offset: res.append(0) if ret_qpi: qpi_sim = simulate(radius=r, sphere_index=n, medium_index=qpi["medium index"], wavelength=qpi["wavelength"], grid_size=qpi.shape, model="projection", pixel_size=qpi["pixel size"], center=c) res.append(qpi_sim) elif method == "image": n0, r0, c0 = edgefit.analyze(qpi=qpi, r0=r0, edgekw=edgekw, ret_center=True, ret_edge=False, ) res = imagefit.analyze(qpi=qpi, model=model, n0=n0, r0=r0, c0=c0, imagekw=imagekw, ret_center=ret_center, ret_pha_offset=ret_pha_offset, ret_qpi=ret_qpi ) else: raise NotImplementedError("`method` must be 'edge' or 'image'!") return res
<SYSTEM_TASK:> Return the background phase mask of a qpsphere simulation <END_TASK> <USER_TASK:> Description: def bg_phase_mask_from_sim(sim, radial_clearance=1.1): """Return the background phase mask of a qpsphere simulation Parameters ---------- sim: qpimage.QPImage Quantitative phase data simulated with qpsphere; The simulation keyword arguments "sim center", "sim radius", and "pixel size" must be present in `sim.meta`. radial_clearance: float Multiplicator to the fitted radius of the sphere; modifies the size of the mask; set to "1" to use the radius determined by :func:`qpsphere.analyze`. The circular area containing the phase object is set to `False` in the output `mask` image. Returns ------- mask: boolean 2d np.ndarray The mask is `True` for background regions and `False` for object regions. """
# Mask values around the object cx, cy = sim["sim center"] radius = sim["sim radius"] px_um = sim["pixel size"] x = np.arange(sim.shape[0]).reshape(-1, 1) y = np.arange(sim.shape[1]).reshape(1, -1) rsq = (x - cx)**2 + (y - cy)**2 mask = rsq > (radius/px_um * radial_clearance)**2 return mask
<SYSTEM_TASK:> Determine the background phase mask for a spherical phase object <END_TASK> <USER_TASK:> Description: def bg_phase_mask_for_qpi(qpi, r0, method="edge", model="projection", edgekw={}, imagekw={}, radial_clearance=1.1): """Determine the background phase mask for a spherical phase object The position and radius of the phase object are determined with :func:`analyze`, to which the corresponding keyword arguments are passed. A binary mask is created from the simulation results via :func:`bg_phase_mask_from_sim`. Parameters ---------- qpi: qpimage.QPImage Quantitative phase image data r0: float Approximate radius of the sphere [m] method: str The method used to determine the refractive index can either be "edge" (determine the radius from the edge detected in the phase image) or "image" (perform a 2D phase image fit). model: str The light-scattering model used by `method`. If `method` is "edge", only "projection" is allowed. If `method` is "image", `model` can be one of "mie", "projection", "rytov", or "rytov-sc". edgekw: dict Keyword arguments for tuning the edge detection algorithm, see :func:`qpsphere.edgefit.contour_canny`. imagekw: dict Keyword arguments for tuning the image fitting algorithm, see :func:`qpsphere.imagefit.alg.match_phase` radial_clearance: float Multiplicator to the fitted radius of the sphere; modifies the size of the mask; set to "1" to use the radius determined by :func:`qpsphere.analyze`. The circular area containing the phase object is set to `False` in the output `mask` image. Returns ------- mask: boolean 2d np.ndarray The mask is `True` for background regions and `False` for object regions. """
# fit sphere _, _, sim = analyze(qpi=qpi, r0=r0, method=method, model=model, edgekw=edgekw, imagekw=imagekw, ret_qpi=True) # determine mask mask = bg_phase_mask_from_sim(sim=sim, radial_clearance=radial_clearance) return mask
<SYSTEM_TASK:> Add an organisation segment. <END_TASK> <USER_TASK:> Description: def with_organisation(self, organisation): """Add an organisation segment. Args: organisation (str): Official name of an administrative body holding an election. Returns: IdBuilder Raises: ValueError """
if organisation is None: organisation = '' organisation = slugify(organisation) self._validate_organisation(organisation) self.organisation = organisation return self
<SYSTEM_TASK:> Create a modified environment. <END_TASK> <USER_TASK:> Description: def create_environment(component_config): """ Create a modified environment. Arguments component_config - The configuration for a component. """
ret = os.environ.copy() for env in component_config.get_list("dp.env_list"): real_env = env.upper() value = os.environ.get(real_env) value = _prepend_env(component_config, env, value) value = _append_env(component_config, env, value) _apply_change(ret, real_env, value, component_config) return ret
<SYSTEM_TASK:> expects Block from Compressor <END_TASK> <USER_TASK:> Description: def process_data(self, block): """expects Block from Compressor"""
if hasattr(block, 'send_destinations') and block.send_destinations: self.fire(events.FileProcessed(block)) self._log_in_db(block) if self._sent_log_file: self._log_in_sent_log(block) self.log.info("Sent to '%s' file '%s' containing files: %s", str(block.send_destinations), block.processed_data_file_info.basename, str([file_info.path for file_info in block.content_file_infos])) else: self.log.info("File %s wasn't sent", block.processed_data_file_info.basename) return block
<SYSTEM_TASK:> Returns whether or not the request should be sent to the <END_TASK> <USER_TASK:> Description: def should_send(self, request): """Returns whether or not the request should be sent to the modules, based on the filters."""
if self.filters.get('whitelist', None): return request.tree.type in self.filters['whitelist'] elif self.filters.get('blacklist', None): return request.tree.type not in self.filters['blacklist'] else: return True
<SYSTEM_TASK:> Get a river by name. <END_TASK> <USER_TASK:> Description: def get(args): """ Get a river by name. """
m = RiverManager(args.hosts) r = m.get(args.name) if r: print(json.dumps(r, indent=2)) else: sys.exit(1)
<SYSTEM_TASK:> Create a river. This command expects to be fed a JSON document on STDIN. <END_TASK> <USER_TASK:> Description: def create(args): """ Create a river. This command expects to be fed a JSON document on STDIN. """
data = json.load(sys.stdin) m = RiverManager(args.hosts) m.create(args.name, data)
<SYSTEM_TASK:> Delete a river by name <END_TASK> <USER_TASK:> Description: def delete(args): """ Delete a river by name """
m = RiverManager(args.hosts) m.delete(args.name)
<SYSTEM_TASK:> Compare the extant river with the given name to the passed JSON. The <END_TASK> <USER_TASK:> Description: def compare(args): """ Compare the extant river with the given name to the passed JSON. The command will exit with a return code of 0 if the named river is configured as specified, and 1 otherwise. """
data = json.load(sys.stdin) m = RiverManager(args.hosts) if m.compare(args.name, data): sys.exit(0) else: sys.exit(1)
<SYSTEM_TASK:> Fit refractive index and radius to a phase image of a sphere <END_TASK> <USER_TASK:> Description: def analyze(qpi, model, n0, r0, c0=None, imagekw={}, ret_center=False, ret_pha_offset=False, ret_qpi=False): """Fit refractive index and radius to a phase image of a sphere Parameters ---------- qpi: QPImage Quantitative phase image information model: str Name of the light-scattering model (see :const:`qpsphere.models.available`) n0: float Approximate refractive index of the sphere r0: float Approximate radius of the sphere [m] c0: tuple of (float, float) Approximate center position in ndarray index coordinates [px]; if set to `None` (default), the center of the image is used. imagekw: dict Additional keyword arguments to :func:`qpsphere.imagefit.alg.match_phase`. ret_center: bool Return the center coordinate of the sphere ret_pha_offset: bool If True, return the phase image background offset. ret_qpi: bool If True, return the modeled data as a :class:`qpimage.QPImage`. Returns ------- n: float Computed refractive index r: float Computed radius [m] c: tuple of floats Only returned if `ret_center` is True Center position of the sphere [px] pha_offset: float Only returned if `ret_pha_offset` is True Phase image background offset qpi_sim: qpimage.QPImage Only returned if `ret_qpi` is True Modeled data """
res = match_phase(qpi, model=model, n0=n0, r0=r0, c0=c0, ret_center=ret_center, ret_pha_offset=ret_pha_offset, ret_qpi=ret_qpi, **imagekw) return res
<SYSTEM_TASK:> Environment variables should be bytes not unicode on Windows. <END_TASK> <USER_TASK:> Description: def __convert_env(env, encoding): """Environment variables should be bytes not unicode on Windows."""
d = dict(os.environ, **(oget(env, {}))) # workaround for Windows+Python3 environment if not SHOULD_NOT_ENCODE_ARGS: return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items()) else: return d
<SYSTEM_TASK:> Calculate total length of the polyline <END_TASK> <USER_TASK:> Description: def length(self): """ Calculate total length of the polyline :return: total length in meters :rtype: float """
total_length = 0 for location_a, location_b in zip( self.locations[:-1], self.locations[1:]): total_length += Line(location_a, location_b).length return total_length
<SYSTEM_TASK:> install the configured exception hook wrapping the old exception hook <END_TASK> <USER_TASK:> Description: def install_hook(dialog=SimpleExceptionDialog, invoke_old_hook=False, **extra): """ install the configured exception hook wrapping the old exception hook don't use it twice :oparam dialog: a different exception dialog class :oparam invoke_old_hook: should we invoke the old exception hook? """
global _old_hook assert _old_hook is None def new_hook(etype, eval, trace): gobject.idle_add(dialog_handler, dialog, etype, eval, trace, extra) if invoke_old_hook: _old_hook(etype, eval, trace) _old_hook = sys.excepthook sys.excepthook = new_hook
<SYSTEM_TASK:> Return a dictionary with various information about this root_action. <END_TASK> <USER_TASK:> Description: def _summarize_action(self, root_action): """Return a dictionary with various information about this root_action. Note: Scoring assumes that each actor makes the "best" choices in their turn based on the simulation available. """
def is_target_node(node): return isinstance(node, base.EOT) or (node is root_action) # store per-turn results from the bottom up. realistic_ends_by_node = dict() for node in root_action.post_order_nodes(): # bottom up to this action # only work with EOT or the root action if not is_target_node(node): continue # by the time this node is reached, the results of all children # have been stored under it in realistic_ends (except leaves) # so choose the best of the available results and send it # up to the next valid node try: # get the results stored previously in the deeper turn realistic_ends = realistic_ends_by_node[node] except KeyError: # leaves are own realistic end realistic_ends = [node] # identify the "best" end for this node if node is root_action: active = node.parent.active passive = node.parent.passive else: active = node.parent.passive passive = node.parent.active ends_by_score = dict() for realistic_end in realistic_ends: # determine the relative score. i.e. if delta is positive # then the end result is better for active than passive relative_score = self._relative_score(node, realistic_end, active, passive) ends_by_score[relative_score] = realistic_end best_end = ends_by_score[max(ends_by_score.keys())] # done after determining realistic result for root action if node is root_action: return self._summarize_result(root_action, best_end) # not done: place best end on the parent EOT's list of possibilities parent = node.parent while parent: if is_target_node(parent): break parent = parent.parent # keep moving up until target found # at this point the parent is either root_action or another EOT realistic_ends_by_node.setdefault(parent, list()).append(best_end) pass
<SYSTEM_TASK:> Return the balance of perception between the two nodes. <END_TASK> <USER_TASK:> Description: def _relative_score(self, start_eot, end_eot, active, passive): """Return the balance of perception between the two nodes. A positive score indicates the result is relatively better for active. """
active_start = self._score_eot_for_actor(start_eot, active) passive_start = self._score_eot_for_actor(start_eot, passive) active_end = self._score_eot_for_actor(end_eot, active) passive_end = self._score_eot_for_actor(end_eot, passive) return (active_end - passive_end) - (active_start - passive_start)
<SYSTEM_TASK:> Have the actor evaluate the end of turn for itself only. <END_TASK> <USER_TASK:> Description: def _score_eot_for_actor(self, eot, actor): """Have the actor evaluate the end of turn for itself only."""
# currently just simple sum of own attributes # could be much more sophisticated in both analysis (e.g. formulas) # and breadth of items analyzed (e.g. require other actor, the board) end_state = eot.parent a = {'player': end_state.player, 'opponent': end_state.opponent}[actor.name] # simple prioritization without regard to character attributes health = a.health * 2 r, g, b, y = a.r, a.g, a.b, a.y x, m = 0.5 * a.x, 0.5 * a.m return sum((health, r, g, b, y, x, m))
<SYSTEM_TASK:> Return a dict with useful information that summarizes this action. <END_TASK> <USER_TASK:> Description: def _summarize_result(self, root_action, leaf_eot): """Return a dict with useful information that summarizes this action."""
root_board = root_action.parent.board action_detail = root_action.position_pair score = self._relative_score(root_action, leaf_eot, root_action.parent.player, root_action.parent.opponent) # mana drain info total_leaves = 0 mana_drain_leaves = 0 for leaf in root_action.leaves(): total_leaves += 1 if leaf.is_mana_drain: mana_drain_leaves += 1 summary = base.Summary(root_board, action_detail, score, mana_drain_leaves, total_leaves) return summary
<SYSTEM_TASK:> Export the phase error to an hdf5 file <END_TASK> <USER_TASK:> Description: def export_phase_error_hdf5(h5path, identifier, index, phase, mphase, model, n0, r0, spi_params): """Export the phase error to an hdf5 file Parameters ---------- h5path: str or pathlib.Path path to hdf5 output file identifier: str unique identifier of the input phase (e.g. `qpimage.QPImage["identifier"]`) index: int iteration index phase: 2d real-valued np.ndarray phase image mphase: 2d real-valued np.ndarray reference phase image model: str sphere model name n0: float initial object index r0: float initial object radius [m] spi_params: dict parameter dictionary of :func:`SpherePhaseInterpolator` """
with h5py.File(h5path, mode="a") as h5: if identifier in h5: grp = h5[identifier] else: grp = h5.create_group(identifier) ds = grp.create_dataset("phase_error_{:05d}".format(index), data=mphase-phase) ds.attrs["index initial"] = n0 ds.attrs["radius initial"] = r0 ds.attrs["sim model"] = model ds.attrs["sim index"] = spi_params["sphere_index"] ds.attrs["sim radius"] = spi_params["radius"] ds.attrs["sim center"] = spi_params["center"] ds.attrs["fit iteration"] = index ds.attrs["fit iteration"] = index
<SYSTEM_TASK:> This class method is the preferred way to create SceneScript objects. <END_TASK> <USER_TASK:> Description: def scripts(cls, pkg, metadata, paths=[], **kwargs): """This class method is the preferred way to create SceneScript objects. :param str pkg: The dotted name of the package containing the scripts. :param metadata: A mapping or data object. This parameter permits searching among scripts against particular criteria. Its use is application specific. :param list(str) paths: A sequence of file paths to the scripts relative to the package. You can satisfy all parameter requirements by passing in a :py:class:`~turberfield.dialogue.model.SceneScript.Folder` object like this:: SceneScript.scripts(**folder._asdict()) The method generates a sequence of :py:class:`~turberfield.dialogue.model.SceneScript` objects. """
for path in paths: try: fP = pkg_resources.resource_filename(pkg, path) except ImportError: cls.log.warning( "No package called {}".format(pkg) ) else: if not os.path.isfile(fP): cls.log.warning( "No script file at {}".format(os.path.join(*pkg.split(".") + [path])) ) else: yield cls(fP, metadata)
<SYSTEM_TASK:> Read a block of text as a docutils document. <END_TASK> <USER_TASK:> Description: def read(text, name=None): """Read a block of text as a docutils document. :param str text: Scene script text. :param str name: An optional name for the document. :return: A document object. """
doc = docutils.utils.new_document(name, SceneScript.settings) parser = docutils.parsers.rst.Parser() parser.parse(text, doc) return doc
<SYSTEM_TASK:> Select a persona for each entity declared in the scene. <END_TASK> <USER_TASK:> Description: def select(self, personae, relative=False, roles=1): """Select a persona for each entity declared in the scene. :param personae: A sequence of Personae. :param bool relative: Affects imports from namespace packages. Used for testing only. :param int roles: The maximum number of roles allocated to each persona. :return: An OrderedDict of {Entity: Persona}. """
def constrained(entity): return ( len(entity["options"].get("types", [])) + len(entity["options"].get("states", [])) ) rv = OrderedDict() performing = defaultdict(set) pool = list(personae) self.log.debug(pool) entities = OrderedDict([ ("".join(entity.attributes["names"]), entity) for entity in sorted( group_by_type(self.doc)[EntityDirective.Declaration], key=constrained, reverse=True ) ]) for e in entities.values(): types = tuple(filter( None, (e.string_import(t, relative) for t in e["options"].get("types", [])) )) states = tuple(filter( None, (int(t) if t.isdigit() else e.string_import(t, relative) for t in e["options"].get("states", [])) )) otherRoles = {i.lower() for i in e["options"].get("roles", [])} typ = types or object persona = next( (i for i in pool if isinstance(i, typ) and getattr(i, "get_state", not states) and all(str(i.get_state(type(s))).startswith(str(s)) for s in states) and (performing[i].issubset(otherRoles) or not otherRoles)), None ) rv[e] = persona performing[persona].update(set(e.attributes["names"])) if not otherRoles or list(rv.values()).count(persona) == roles: try: pool.remove(persona) except ValueError: self.log.debug( "No persona for type {0} and states {1} with {2} {3}.".format( typ, states, roles, "role" if roles == 1 else "roles" ) ) return rv
<SYSTEM_TASK:> Allocate the scene script a cast of personae for each of its entities. <END_TASK> <USER_TASK:> Description: def cast(self, mapping): """Allocate the scene script a cast of personae for each of its entities. :param mapping: A dictionary of {Entity, Persona} :return: The SceneScript object. """
# See 'citation' method in # http://docutils.sourceforge.net/docutils/parsers/rst/states.py for c, p in mapping.items(): self.doc.note_citation(c) self.doc.note_explicit_target(c, c) c.persona = p self.log.debug("{0} to be played by {1}".format( c["names"][0].capitalize(), p) ) return self
<SYSTEM_TASK:> The starting point for your app. <END_TASK> <USER_TASK:> Description: def my_main(context): """ The starting point for your app."""
print('starting MyApp...') if context['debug']: print('Context:') for k in context: print('Key: {}\nValue: {}'.format(k, context[k])) print('Done!') return 0
<SYSTEM_TASK:> Return summaries of the likely resutls of each available action.. <END_TASK> <USER_TASK:> Description: def versus_summaries(turns=2, sims_to_average=2, async_results_q=None): """Return summaries of the likely resutls of each available action.. Arguments: - turns: how many turns to simulate. - in 2013, 1 is fast (seconds), 2 is slow (seconds), 3 who knows - sims_to_average: how many times to run the simulation to get more representative average results of each action. - async_results_q: provide a multiprocessing Queue on which the summaries of each turn will be placed. this is an asynchronous alternative to waiting for the final return value """
board, player, opponent, extra_actions = _state_investigator.get_versus() if extra_actions: extra_actions = 1 # limit value for realistic time if board is None: return tuple() averaged_summaries = list() # default return value is empty # keep a separate advisor for each simulation to average advisors = list() for i in range(sims_to_average): advisor = versus.Advisor() advisor.reset(board, player, opponent, extra_actions) advisors.append(advisor) # provide async sim results per turn; final results as return value for turn in range(turns): # store {action: list of results from each simulation} summaries_by_action = dict() for i in range(sims_to_average): advisor = advisors[i] advisor.simulate_next_turn() for s in advisor.sorted_current_summaries(): summaries_by_action.setdefault(s.action, list()).append(s) # now all sims and analysis for this turn have been completed averaged_summaries = list() for action, summaries in summaries_by_action.items(): board = summaries[0].board # any board. they are all the same action = summaries[0].action # any action. they are all the same score_sum = sum(s.score for s in summaries) score_avg = score_sum / len(summaries) manadrain_sum = sum(s.mana_drain_leaves for s in summaries) leaves_sum = sum(s.total_leaves for s in summaries) avg_summary = base.Summary(board, action, score_avg, manadrain_sum, leaves_sum) averaged_summaries.append(avg_summary) averaged_summaries.sort(key=lambda s: s.score, reverse=True) # option to provide the results asynchronouslys if not async_results_q is None: async_results_q.put(averaged_summaries) return averaged_summaries
<SYSTEM_TASK:> Return a pathname built of the specified number of sub-directories, <END_TASK> <USER_TASK:> Description: def build_tree_pathname(filename, directory_depth=8, pathname_separator_character=os.sep): """ Return a pathname built of the specified number of sub-directories, and where each directory is named after the nth letter of the filename corresponding to the directory depth. Examples:: >>> build_tree_pathname('foo.txt', 2, '/') 'f/o/' >>> build_tree_pathname('0123456789abcdef') '0/1/2/3/4/5/6/7/' @param filename: name of a file, with or without extension. @param directory_depth: number of sub-directories to be generated. @param pathname_separator_character: character to be used to separate pathname components, such as '/' for POSIX and '\\' for Windows. If not defined, the default is the character used by the operating system ``os.sep``. @return: a file pathname. """
pathname = '' (filename_without_extension, _file_extension_) = os.path.splitext(filename) for i in range(min(directory_depth, len(filename_without_extension))): pathname += filename_without_extension[i] + pathname_separator_character return pathname
<SYSTEM_TASK:> Return a file pathname which pathname is built of the specified <END_TASK> <USER_TASK:> Description: def build_tree_file_pathname(filename, directory_depth=8, pathname_separator_character=os.sep): """ Return a file pathname which pathname is built of the specified number of sub-directories, and where each directory is named after the nth letter of the filename corresponding to the directory depth. Examples:: >>> build_tree_file_pathname('foo.txt', 2, '/') 'f/o/foo.txt' >>> build_tree_file_pathname('0123456789abcdef') '0/1/2/3/4/5/6/7/0123456789abcdef' @param filename: name of a file, with or without extension. @param directory_depth: number of sub-directories to be generated. @param pathname_separator_character: character to be used to separate pathname components, such as '/' for POSIX and '\\' for Windows. If not defined, the default is the character used by the operating system ``os.sep``. @return: a file pathname. """
return build_tree_pathname(filename, directory_depth, pathname_separator_character) + filename
<SYSTEM_TASK:> Return the root path of a path relative to an absolute path. <END_TASK> <USER_TASK:> Description: def find_root_path(absolute_path, relative_path): """ Return the root path of a path relative to an absolute path. Example: @param absolute_path: an absolute path that is ended by the specified relative path. @param relative_path: a relative path that ends the specified absolute path. @return: the root path of the relative path. """
_absolute_path = os.path.normpath(absolute_path) _relative_path = os.path.normpath(relative_path) index = _absolute_path.rfind(_relative_path) if index == -1 or len(_relative_path) + index < len(_absolute_path): raise ValueError('The relative path does not end the specified absolute path') return _absolute_path[:index]
<SYSTEM_TASK:> Generate the MD5 checksum of the specified file. <END_TASK> <USER_TASK:> Description: def get_file_checksum(file_path_name, hash_algorithm_name='md5'): """ Generate the MD5 checksum of the specified file. @param file_path_name: the absolute path and name of the file to generate its MD5 checksum. @param hash_algorithm_name: specify the hash algorithm to use. Refer to ``hashlib.algorithms`` to get the names of the hash algorithms guaranteed to be supported by this module. @return: hash digest returned as a string of double length, containing only hexadecimal digits. This may be used to exchange the value safely in email or other non-binary environments. @note: the file can not be entirely read to memory, but it needs to be read by chunks of memory that will be freed after each iteration. What's important to notice is that the file must be opened in binary mode. The function breaks the file into chunks using block size of any multiple of 128 (say 8192, 32768, etc.) and its feed them to MD5 consecutively using ``update()``. This takes advantage advantage of the fact that MD5 has 128-byte digest blocks. The function actually uses a block size that depends on the block size of the filesystem to avoid performances issues. @note: The ``F_FRSIZE`` value is the actual minimum allocation unit of the filesystem, while the ``F_BSIZE`` is the block size that would lead to most efficient use of the disk with io calls. """
hash_algorithm = hashlib.new(hash_algorithm_name) if sys.platform == "win32": import ctypes sectors_per_cluster = ctypes.c_ulonglong(0) bytes_per_sector = ctypes.c_ulonglong(0) root_path_name = ctypes.c_wchar_p(u"C:\\") ctypes.windll.kernel32.GetDiskFreeSpaceW(root_path_name, ctypes.pointer(sectors_per_cluster), ctypes.pointer(bytes_per_sector), None, None) block_size = bytes_per_sector.value else: import statvfs block_size = os.statvfs('/')[statvfs.F_BSIZE] with open(file_path_name, 'rb') as handle: for chunk in iter(lambda: handle.read(block_size), b''): hash_algorithm.update(chunk) return hash_algorithm.hexdigest()
<SYSTEM_TASK:> Create the specified path, making all intermediate-level directories <END_TASK> <USER_TASK:> Description: def make_directory_if_not_exists(path): """ Create the specified path, making all intermediate-level directories needed to contain the leaf directory. Ignore any error that would occur if the leaf directory already exists. @note: all the intermediate-level directories are created with the default mode is 0777 (octal). @param path: the path to create. @raise OSError: an error that would occur if the path cannot be created. """
try: os.makedirs(path) except OSError, error: # Ignore if the directory has been already created. if error.errno <> errno.EEXIST: raise error
<SYSTEM_TASK:> Move the the specified file to another location. If the destination <END_TASK> <USER_TASK:> Description: def move_file(source_file_pathname, destination_file_pathname): """ Move the the specified file to another location. If the destination already exists, it is replaced silently. This function is an alternative to ``shutil.move(src, dst)``, which might raise ``OSError`` if the destination already exists. @param source_file_pathname: the complete path and name of the file to move. @param destination_file_pathname: the complete path and name of the file once moved. """
if os.path.exists(destination_file_pathname): os.remove(destination_file_pathname) shutil.move(source_file_pathname, destination_file_pathname)
<SYSTEM_TASK:> Locate a program file in the user's path. <END_TASK> <USER_TASK:> Description: def which(exe_name): """ Locate a program file in the user's path. @param exec_name: name of the executable file. @return: ``None`` if the executable has not been found in the user's path, or the path for the executable file. """
def is_exe(file_path_name): return os.path.isfile(file_path_name) and os.access(file_path_name, os.X_OK) is_platform_windows = (platform.system() == 'Windows') fpath, _fname = os.path.split(exe_name) if fpath: if is_exe(exe_name): return exe_name else: for path in os.environ['PATH'].split(os.pathsep): exe_file_path_name = os.path.join(path, exe_name) if is_exe(exe_file_path_name): return exe_file_path_name if is_platform_windows: windows_exe_file_path_name = '%s.exe' % exe_file_path_name if is_exe(windows_exe_file_path_name): return windows_exe_file_path_name windows_com_file_path_name = '%s.exe' % exe_file_path_name if is_exe(windows_com_file_path_name): return windows_com_file_path_name return None
<SYSTEM_TASK:> Get most reliable timestamp from a picture, running down a couple of options. <END_TASK> <USER_TASK:> Description: def get_time(filename, tags): """ Get most reliable timestamp from a picture, running down a couple of options. Filename, exif tags, modification time. """
# use exif 'Image DateTime' field as the time if 'Image DateTime' in tags.keys(): return time.strptime(str(tags['Image DateTime']), '%Y:%m:%d %H:%M:%S') # very fuzzy time machting on filename # TODO: very fuzzy part, now it just matches the iphone naming convention m = re.match('^(\d{4}-\d{2}-\d{2} \d{2}\.\d{2}\.\d{2}).*', filename) if m: return time.strptime(m.group(0), '%Y-%m-%d %H.%M.%S') # if all else fails use stat().st_mtime (consistent on windows/linux as last modification time) return time.localtime(Path(filename).stat().st_mtime)
<SYSTEM_TASK:> Create a request object for the specified method and url. <END_TASK> <USER_TASK:> Description: def _make_req(self, method, url, methname, headers=None): """Create a request object for the specified method and url."""
# Build up headers hset = hdrs.HeaderDict() # Walk through our global headers for hdr, value in self._headers.items(): # If it's a callable, call it if callable(value): value = value(methname) else: # OK, just stringify it value = str(value) # If it's meaningful, attach it if value: hset[hdr] = value # Were headers passed in? if headers is not None: # Update from specified headers hset.update(headers) # Hook method to instantiate requests self._debug("Creating request %s.%s(%r, %r, headers=%r)", self._req_class.__module__, self._req_class.__name__, method, url, hset) return self._req_class(method, url, self._client, self._procstack, headers=hset, debug=self._debug)
<SYSTEM_TASK:> Find everything with a specific entry_point. Results will be returned as a <END_TASK> <USER_TASK:> Description: def query_plugins(entry_point_name): """ Find everything with a specific entry_point. Results will be returned as a dictionary, with the name as the key and the entry_point itself as the value. Arguments: entry_point_name - the name of the entry_point to populate """
entries = {} for entry_point in pkg_resources.iter_entry_points(entry_point_name): entries[entry_point.name] = entry_point.load() return entries
<SYSTEM_TASK:> !~~prefixedDec <END_TASK> <USER_TASK:> Description: def prefixedDec(nstr, schema): """ !~~prefixedDec corresponding strings in documents must begin with the associated string in the schema, and the right part of strings in documents must be decimal. """
if not nstr.startswith(schema): return False postfix = nstr[len(schema):] try: int(postfix) except ValueError: return False return True
<SYSTEM_TASK:> Transform a schema, once loaded from its YAML representation, to its <END_TASK> <USER_TASK:> Description: def _transschema(x): """ Transform a schema, once loaded from its YAML representation, to its final internal representation """
if isinstance(x, tuple): return x.__class__(_transschema(x[0]), *x[1:]) elif isinstance(x, dict): return dict((_qualify_map(key, _transschema(val)) for key, val in x.iteritems())) elif isinstance(x, list): return map(_transschema, x) else: return x
<SYSTEM_TASK:> Populate entries dictionaries from index. <END_TASK> <USER_TASK:> Description: def _fill_entries(self): """Populate entries dictionaries from index."""
for file in self.index.files: path = self.index.paths[file.pathIndex] data_offset = file.dataOffset data_size = file.dataSize obj = RAFEntry(self._data_handle, path, data_offset, data_size) # Add to full path dictionary assert path not in self.entries_full self.entries_full[path.lower()] = obj # Add to name dictionary name = os.path.basename(path).lower() if name not in self.entries_name: self.entries_name[name] = [] self.entries_name[name].append(obj)
<SYSTEM_TASK:> Returns most recent version of the matching file. <END_TASK> <USER_TASK:> Description: def find(self, path=None, name=None): """ Returns most recent version of the matching file. If there are multiple files of the same name and version, a random one is used. """
if path is None and name is None: # TODO: Correct error type raise ValueError("Path or name is required.") # Entries are sorted by version number, so the last will be the most recent # TODO: Reduce redundancy with RAFArchive if path: return self.entries_full[path.lower()][-1] else: return self.entries_name[name.lower()][-1][-1]
<SYSTEM_TASK:> Find the most recent versions of all entries whose path matches a given pattern. <END_TASK> <USER_TASK:> Description: def find_re(self, pattern): """Find the most recent versions of all entries whose path matches a given pattern."""
# TODO: Reduce redundancy with RAFArchive pattern = re.compile(pattern, re.I) for k, v in six.iteritems(self.entries_full): if pattern.search(k): # Most recent version will be last yield v[-1]
<SYSTEM_TASK:> Implements string formatting along with color specified in colorama.Fore <END_TASK> <USER_TASK:> Description: def colorize(string, color, *args, **kwargs): """ Implements string formatting along with color specified in colorama.Fore """
string = string.format(*args, **kwargs) return color + string + colorama.Fore.RESET
<SYSTEM_TASK:> Converts a string to a valid filename. <END_TASK> <USER_TASK:> Description: def legitimize(text): """Converts a string to a valid filename. """
import platform os_ = platform.system() # POSIX systems text = text.translate({ 0: None, ord('/'): '-', ord('|'): '-', }) if os_ == 'Windows': # Windows (non-POSIX namespace) text = text.translate({ # Reserved in Windows VFAT and NTFS ord(':'): '-', ord('*'): '-', ord('?'): '-', ord('\\'): '-', ord('\"'): '\'', # Reserved in Windows VFAT ord('+'): '-', ord('<'): '-', ord('>'): '-', ord('['): '(', ord(']'): ')', }) else: # *nix if os_ == 'Darwin': # Mac OS HFS+ text = text.translate({ ord(':'): '-', }) # Remove leading . if text.startswith("."): text = text[1:] text = text[:82] # Trim to 82 Unicode characters long return text
<SYSTEM_TASK:> Get an estimation of the channel width. <END_TASK> <USER_TASK:> Description: def channel_width(im, chanangle=None, *, chanapproxangle=None, isccsedge=False): """Get an estimation of the channel width. Parameters: ----------- im: 2d array The channel image chanangle: number, optional The angle of the channel (None if unknown) chanapproxangle: number, optional If chanangle is None, the approximate channel angle isccsedge: boolean, default False Set to True if im is the dft of egde. False if it is an image of a channel. Returns: -------- width: number The channel width angle: number The corresponding angle Notes: ------ This function assumes two parallel lines along angle chanangle. The perpendicular line in the fourrier plane will have a mark of this, under the form of an oscillation at some frequency corresponding to the distance between the two parallel lines. This can be extracted by another fft. This second fft might have large components at low frequency, So the first few frequencies are neglected. The threshold is the first position below mean If the chanangle is not specified, the direction with higher contribution will be picked. If chanapproxangle is given, only angles close to this angle are taken into account """
# check input is numpy array im = np.asarray(im) # Compute the dft if it is not already done if not isccsedge: im = reg.dft_optsize(np.float32(edge(im))) # save the truesize for later use truesize = im.shape # get centered magnitude squared (This changes the size) im = reg.centered_mag_sq_ccs(im) # if the channel direction is not given, deduce it from channel_angle if chanangle is None: chanangle = channel_angle(im, isshiftdftedge=True, chanapproxangle=chanapproxangle, truesize=truesize) # get vector perpendicular to angle fdir = np.asarray([math.cos(chanangle), -math.sin(chanangle)]) # y,x = 0,1 # need to be in the RHS of the cadran for rfft if fdir[1] < 0: fdir *= -1 # get center of shifted fft center = np.asarray([im.shape[0] // 2, 0]) # get size shape = np.asarray([im.shape[0] // 2, im.shape[1]]) # get evenly spaced positions between 0 and 1 (not included) # should be replaced with linspace pos = np.r_[:1:(shape.min() + 1) * 1j][:-1] # get index of a line of length 1 in normalized units from center # in direction of chdir idx = ((fdir * shape)[:, np.newaxis].dot(pos[np.newaxis]) + center[:, np.newaxis]) # get the line idx = np.float32(idx) f = cv2.remap(np.float32(im), idx[1, :], idx[0, :], cv2.INTER_LINEAR) f = np.squeeze(f) # The central line of the fft will have a periodic feature for parallel # lines which we can detect with fft f = abs(irfft(f**2)) # filter to avoid "interferences" f = gaussian_filter(f, 1) # the offset is determined by the first pixel below mean wmin = np.nonzero(f - f.mean() < 0)[0][0] """ import matplotlib.pyplot as plt plt.figure() plt.plot(f,'x') plt.plot([wmin,wmin],[0,f.max()]) plt.plot([0,500],[f.mean()+3*f.std(),f.mean()+3*f.std()]) #""" # find max excluding the first few points ret = reg.get_peak_pos(f[wmin:f.size // 2]) # return max and corresponding angle return (wmin + ret), chanangle
<SYSTEM_TASK:> Extract the channel angle from the rfft <END_TASK> <USER_TASK:> Description: def channel_angle(im, chanapproxangle=None, *, isshiftdftedge=False, truesize=None): """Extract the channel angle from the rfft Parameters: ----------- im: 2d array The channel image chanapproxangle: number, optional If not None, an approximation of the result isshiftdftedge: boolean, default False If The image has already been treated: (edge, dft, fftshift), set to True truesize: 2 numbers, required if isshiftdftedge is True The true size of the image Returns: -------- angle: number The channel angle """
im = np.asarray(im) # Compute edge if not isshiftdftedge: im = edge(im) return reg.orientation_angle(im, isshiftdft=isshiftdftedge, approxangle=chanapproxangle, truesize=truesize)
<SYSTEM_TASK:> Register the images assuming they are channels <END_TASK> <USER_TASK:> Description: def register_channel(im0, im1, scale=None, ch0angle=None, chanapproxangle=None): """Register the images assuming they are channels Parameters: ----------- im0: 2d array The first image im1: 2d array The second image scale: number, optional The scale difference if known ch0angle: number, optional The angle of the channel in the first image if known chanapproxangle: number, optional The approximate angle for both images if known Returns: -------- angle: number The angle difference scale: number The scale difference [y, x]: 2 numbers The offset e2: 2d array The second image rotated and translated for performances reasons """
im0 = np.asarray(im0) im1 = np.asarray(im1) # extract the channels edges e0 = edge(im0) e1 = edge(im1) fe0, fe1 = reg.dft_optsize_same(np.float32(e0), np.float32(e1)) # compute the angle and channel width of biggest angular feature w0, a0 = channel_width( fe0, isccsedge=True, chanapproxangle=chanapproxangle) w1, a1 = channel_width( fe1, isccsedge=True, chanapproxangle=chanapproxangle) # get angle diff angle = reg.clamp_angle(a0 - a1) if ch0angle is not None: a0 = ch0angle a1 = a0 - angle # if the scale is unknown, ratio of the channels if scale is None: scale = w1 / w0 #scale and rotate e2 = reg.rotate_scale(e1, angle, scale) # get edge from scaled and rotated im1 fe2 = reg.dft_optsize(np.float32(e2), shape=fe0.shape) # find offset y, x = reg.find_shift_dft(fe0, fe2, isccs=True) # return all infos return angle, scale, [y, x], e2
<SYSTEM_TASK:> Scale the image to uint8 <END_TASK> <USER_TASK:> Description: def uint8sc(im): """Scale the image to uint8 Parameters: ----------- im: 2d array The image Returns: -------- im: 2d array (dtype uint8) The scaled image to uint8 """
im = np.asarray(im) immin = im.min() immax = im.max() imrange = immax - immin return cv2.convertScaleAbs(im - immin, alpha=255 / imrange)
<SYSTEM_TASK:> Greet each person by name. <END_TASK> <USER_TASK:> Description: def handle(self, args): """ Greet each person by name. """
salutation = { 'french': 'Bonjour', 'spanish': 'Hola', 'english': 'Hello', }[args.lang.lower()] output = [] for name in args.name: output.append("{} {}!".format(salutation, name)) return "\n".join(output)
<SYSTEM_TASK:> Try to solve the board described by board_string. <END_TASK> <USER_TASK:> Description: def capture(board): """Try to solve the board described by board_string. Return sequence of summaries that describe how to get to the solution. """
game = Game() v = (0, 0) stub_actor = base.Actor('capture', v, v, v, v, v, v, v, v, v) root = base.State(board, stub_actor, stub_actor, turn=1, actions_remaining=1) solution_node = None for eot in game.all_ends_of_turn(root): # check for a solution if eot.is_mana_drain: # quick check before checking whole board if eot.parent.board.is_empty(): solution_node = eot break # if solution found, build the list of swaps solution_sequence = list() # empty sequence (no solution) by default if solution_node: node = solution_node while node: # record each swap in the path to the root if not isinstance(node, base.Swap): node = node.parent continue summary = base.Summary(node.parent.board, node.position_pair, None, None, None) solution_sequence.append(summary) node = node.parent return tuple(reversed(solution_sequence))
<SYSTEM_TASK:> Disallow states that are not useful to continue simulating. <END_TASK> <USER_TASK:> Description: def _disallow_state(self, state): """Disallow states that are not useful to continue simulating."""
disallow_methods = (self._is_duplicate_board, self._is_impossible_by_count) for disallow_method in disallow_methods: if disallow_method(state): return True return False
<SYSTEM_TASK:> Disallow any board that has insufficient tile count to solve. <END_TASK> <USER_TASK:> Description: def _is_impossible_by_count(self, state): """Disallow any board that has insufficient tile count to solve."""
# count all the tile types and name them for readability counts = {tile_type: 0 for tile_type in base.Tile._all_types} standard_wildcard_type = '2' for p, tile in state.board.positions_with_tile(): # count all wildcards as one value tile_type = tile._type try: int(tile_type) counts[standard_wildcard_type] += 1 except ValueError: counts[tile_type] += 1 skullbomb = counts['*'] skull = counts['s'] wildcard = counts[standard_wildcard_type] red = counts['r'] green = counts['g'] blue = counts['b'] yellow = counts['y'] exp = counts['x'] money = counts['m'] # always allow skullbomb with enough skulls if skullbomb and skullbomb + skull >= 3: return False # always allow wildcard with enough of one color if wildcard: if any(wildcard + color >= 3 for color in (red, green, blue, yellow)): return False # disallow simple cases since special cases didn't occur if any(tile and tile < 3 for tile in (red, green, blue, yellow, exp, money, skull)): return True # allow the state if counts seem ok return False
<SYSTEM_TASK:> Build a tree with each level corresponding to a fixed position on <END_TASK> <USER_TASK:> Description: def find_or_graft(self, board): """Build a tree with each level corresponding to a fixed position on board. A path of tiles is stored for each board. If any two boards have the same path, then they are the same board. If there is any difference, a new branch will be created to store that path. Return: True if board already exists in the tree; False otherwise """
is_duplicate_board = True # assume same until find a difference # compare each position node = self for p, new_tile in board.positions_with_tile(): found_tile = False # assume no tile in same position until found for child in node.children: if child.tile == new_tile: # same tile found in this position --> continue this branch node = child found_tile = True break if found_tile: pass # go on to the next position else: # different tile --> start new branch and mark not exact match child = _DuplicateTree(new_tile) node.graft_child(child) node = child is_duplicate_board = False # this will get set many times. ok return is_duplicate_board
<SYSTEM_TASK:> Return the capture board or None if can't find it. <END_TASK> <USER_TASK:> Description: def get_capture(self): """Return the capture board or None if can't find it."""
# game game_image = self._game_image_from_screen('capture') if game_image is None: return # board board = self._board_from_game_image(game_image) if board is None: return if board.is_empty(): return return board
<SYSTEM_TASK:> Return the versus board, player, opponent and extra actions. <END_TASK> <USER_TASK:> Description: def get_versus(self): """Return the versus board, player, opponent and extra actions. Return None for any parts that can't be found. """
# game game_image = self._game_image_from_screen('versus') if game_image is None: return None, None, None, None # nothing else will work # board board = self._board_from_game_image(game_image) # may be None # safety check. there should be no blanks in a versus board if board: for p, tile in board.positions_with_tile(): if tile.is_blank(): board = None # actors player = self._actor_from_game_image('player', game_image) opponent = self._actor_from_game_image('opponent', game_image) # extra actions extra_actions = self._count_extra_actions(game_image) return board, player, opponent, extra_actions
<SYSTEM_TASK:> Return the image of the given game type from the screen. <END_TASK> <USER_TASK:> Description: def _game_image_from_screen(self, game_type): """Return the image of the given game type from the screen. Return None if no game is found. """
# screen screen_img = self._screen_shot() # game image game_rect = self._game_finders[game_type].locate_in(screen_img) if game_rect is None: return t, l, b, r = game_rect game_img = screen_img[t:b, l:r] return game_img
<SYSTEM_TASK:> Return a board object matching the board in the game image. <END_TASK> <USER_TASK:> Description: def _board_from_game_image(self, game_image): """Return a board object matching the board in the game image. Return None if any tiles are not identified. """
# board image board_rect = self._board_tools['board_region'].region_in(game_image) t, l, b, r = board_rect board_image = game_image[t:b, l:r] # board grid and tiles --> fill in a Board object board = Board() grid = self._board_tools['grid'] tile_id = self._board_tools['tile_id'] for p, borders in grid.borders_by_grid_position(board_image): t, l, b, r = borders tile = board_image[t:b, l:r] tile_character = tile_id.identify(tile) if tile_character is None: return None # soft failure board[p] = Tile.singleton(tile_character) return board
<SYSTEM_TASK:> Return an actor object matching the one in the game image. <END_TASK> <USER_TASK:> Description: def _actor_from_game_image(self, name, game_image): """Return an actor object matching the one in the game image. Note: Health and mana are based on measured percentage of a fixed maximum rather than the actual maximum in the game. Arguments: name: must be 'player' or 'opponent' game_image: opencv image of the main game area """
HEALTH_MAX = 100 MANA_MAX = 40 # get the set of tools for investigating this actor tools = {'player': self._player_tools, 'opponent': self._oppnt_tools}[name] # setup the arguments to be set: args = [name] # health: t, l, b, r = tools['health_region'].region_in(game_image) health_image = game_image[t:b, l:r] health_image = numpy.rot90(health_image) # upright for the TankLevel how_full = tools['health_tank'].how_full(health_image) if how_full is None: return None # failure health = int(round(HEALTH_MAX * how_full)) args.append((health, HEALTH_MAX)) # mana for color in ('r', 'g', 'b', 'y'): t, l, b, r = tools[color + '_region'].region_in(game_image) mana_image = game_image[t:b, l:r] how_full = tools[color + '_tank'].how_full(mana_image) if how_full is None: return None # failure mana = int(round(MANA_MAX * how_full)) args.append((mana, MANA_MAX)) # experience and coins simply start at zero x_m = (0, 1000), (0, 1000) args.extend(x_m) # hammer and scroll are unused h_c = (0, 0), (0, 0) args.extend(h_c) # build the actor and return it return Actor(*args)
<SYSTEM_TASK:> Count the number of extra actions for player in this turn. <END_TASK> <USER_TASK:> Description: def _count_extra_actions(self, game_image): """Count the number of extra actions for player in this turn."""
proportional = self._bonus_tools['extra_action_region'] # Use ProportionalRegion to isolate the extra actions area t, l, b, r = proportional.region_in(game_image) token_region = game_image[t:b, l:r] # Use TemplateFinder (multiple) to check for extra actions game_h, game_w = game_image.shape[0:2] token_h = int(round(game_h * 27.0 / 960)) token_w = int(round(game_w * 22.0 / 1280)) sizes = (token_h, token_w), # sizes change every time so just remake it. # thresholds are tight since need to count conservatively finder = v.TemplateFinder(pq_data.extra_action_template, sizes=sizes, acceptable_threshold=0.1, immediate_threshold=0.1) found_tokens = finder.locate_multiple_in(token_region) return len(found_tokens)
<SYSTEM_TASK:> Simulate a complete turn from one state only and generate each <END_TASK> <USER_TASK:> Description: def ends_of_one_state(self, root=None, root_eot=None): """Simulate a complete turn from one state only and generate each end of turn reached in the simulation. Arguments: Exactly one of: root: a start state with no parent or root_eot: an EOT or ManaDrain transition in the simulation """
# basic confirmation of valid arguments self._argument_gauntlet(root_eot, root) # setup the starting state if root: start_state = root else: start_state = State(root_eot.parent.board.copy(), root_eot.parent.player.copy(), root_eot.parent.opponent.copy(), root_eot.parent.turn + 1, 1) root_eot.graft_child(start_state) # track states that are stable - i.e. no remaining chain reactions ready_for_action = [start_state] # simulate all actions for each state until reaching EOTs while ready_for_action: ready_state = ready_for_action.pop() # handle states that have run out of actions (end of turn) if ready_state.actions_remaining <= 0: root_eot = self._simulated_EOT(ready_state) yield root_eot continue # no more simulation for an EOT # handle swaps when there are actions remaining for swap_result in self._simulated_swap_results(ready_state): # handle any chain reactions if swap_result.actions_remaining \ >= ready_state.actions_remaining: already_used_bonus = True else: already_used_bonus = False chain_result = self._simulated_chain_result(swap_result, already_used_bonus) # chain results may be filtered so test first if chain_result: ready_for_action.append(chain_result) #at this point all swaps have been tried #if nothing was valid, it's a manadrain if not tuple(ready_state.children): mana_drain_eot = self._simulated_mana_drain(ready_state) yield mana_drain_eot continue # if something was valid, now spells can be simulated else: pass
<SYSTEM_TASK:> Simulate one complete turn to completion and generate each end of <END_TASK> <USER_TASK:> Description: def ends_of_next_whole_turn(self, root): """Simulate one complete turn to completion and generate each end of turn reached during the simulation. Note on mana drain: Generates but does not continue simulation of mana drains. Arguments: root: a start state with no parent """
# simple confirmation that the root is actually a root. # otherwise it may seem to work but would be totally out of spec if root.parent: raise ValueError('Unexpectedly received a node with a parent for' ' root:\n{}'.format(root)) # build the list of eots (or just the root if first turn) to be run leaves = list(root.leaves()) kw_starts = list() if leaves[0] is root: # build ends of state kwargs as only the root kw_starts.append({'root': root}) else: # build ends of state kwargs as eots in the tree for leaf in leaves: # ignore mana drains if not leaf.is_mana_drain: kw_starts.append({'root_eot': leaf}) # run a single turn for each starting point for kw_start in kw_starts: for eot in self.ends_of_one_state(**kw_start): yield eot
<SYSTEM_TASK:> Simulate the root and continue generating ends of turn until <END_TASK> <USER_TASK:> Description: def all_ends_of_turn(self, root): """Simulate the root and continue generating ends of turn until everything has reached mana drain. Warning on random fill: If random fill is used together with this method, it will generate basically forever due to the huge number of possibilities it introduces. Arguments: root: a start state with no parent Note on mana drain: Generates but does not continue simulation of mana drains. Note on run time: This simulates a complete turn for each eot provided, rather than just one branch at a time. The method will only stop generating when all possibilities have been simulated or filtered. """
# simple confirmation that the root is actually a root. # otherwise it may seem to work but would be totally out of spec if root.parent: raise ValueError('Unexpectedly received a node with a parent for' ' root:\n{}'.format(root)) # run a single turn for each eot from a stack jobs = [root] while jobs: random_job_index = random.randint(0, len(jobs) - 1) start_eot = jobs.pop(random_job_index) # special case: handle the root once if start_eot is root: kw_root = {'root': start_eot} else: kw_root = {'root_eot': start_eot} for eot in self.ends_of_one_state(**kw_root): # only continue simulating non-mana drains if not eot.is_mana_drain: jobs.append(eot) yield eot
<SYSTEM_TASK:> Simulate any chain reactions. <END_TASK> <USER_TASK:> Description: def _simulated_chain_result(self, potential_chain, already_used_bonus): """Simulate any chain reactions. Arguments: potential_chain: a state to be tested for chain reactions already_used_bonus: boolean indicating whether a bonus turn was already applied during this action Return: final result state or None (if state is filtered out in capture) Note that if there is no chain reaction, the final result is the same as the original state received. """
while potential_chain: # hook for capture game optimizations. no effect in base # warning: only do this ONCE for any given state or it will # always filter the second time if self._disallow_state(potential_chain): potential_chain.graft_child(Filtered()) return None # no more simulation for this filtered state result_board, destroyed_groups = \ potential_chain.board.execute_once(random_fill= self.random_fill) # yield the state if nothing happened during execution (chain done) if not destroyed_groups: # yield this state as the final result of the chain return potential_chain # attach the transition chain = ChainReaction() potential_chain.graft_child(chain) # attach the result state if already_used_bonus: # disallow bonus action if already applied bonus_action = 0 else: # allow bonus action once and then flag as used bonus_action = any(len(group) >= 4 for group in destroyed_groups) already_used_bonus = True cls = potential_chain.__class__ chain_result = cls(board=result_board, turn=potential_chain.turn, actions_remaining= potential_chain.actions_remaining + bonus_action, player=potential_chain.player.copy(), opponent=potential_chain.opponent.copy()) # update the player and opponent base_attack = \ chain_result.active.apply_tile_groups(destroyed_groups) chain_result.passive.apply_attack(base_attack) chain.graft_child(chain_result) # prepare to try for another chain reaction potential_chain = chain_result
<SYSTEM_TASK:> Simulate a normal or mana drain EOT and return it. <END_TASK> <USER_TASK:> Description: def _simulated_EOT(self, state): """Simulate a normal or mana drain EOT and return it."""
# determine if this is a manadrain or just end of turn is_manadrain = True # default mana drain until valid swap found for swap_pair in state.board.potential_swaps(): result_board, destroyed_groups = \ state.board.execute_once(swap=swap_pair, random_fill=self.random_fill) if destroyed_groups: is_manadrain = False break # stop when the first valid swap found # attach appropriate EOT or ManaDrain if is_manadrain: end = self._simulated_mana_drain(state) else: end = EOT(False) state.graft_child(end) return end
<SYSTEM_TASK:> Apply mana drain effects to this state, attach a mana drain EOT <END_TASK> <USER_TASK:> Description: def _simulated_mana_drain(self, mana_drain_state): """Apply mana drain effects to this state, attach a mana drain EOT and return the mana drain EOT."""
# clear all mana mana_drain_state.player.apply_mana_drain() mana_drain_state.opponent.apply_mana_drain() # force change of turn mana_drain_state.actions_remaining = 0 # randomize the board if this game uses random fill if self.random_fill: random_start_board = mana_drain_state.board.random_start_board() mana_drain_state.board = random_start_board # attach the mana drain EOT mana_drain = EOT(True) mana_drain_state.graft_child(mana_drain) return mana_drain
<SYSTEM_TASK:> Execute the board only one time. Do not execute chain reactions. <END_TASK> <USER_TASK:> Description: def execute_once(self, swap=None, spell_changes=None, spell_destructions=None, random_fill=False): """Execute the board only one time. Do not execute chain reactions. Arguments: swap - pair of adjacent positions spell_changes - sequence of (position, tile) changes spell_destructions - sequence of positions to be destroyed Return: (copy of the board, destroyed tile groups) """
bcopy = self.copy() # work with a copy, not self total_destroyed_tile_groups = list() # swap if any bcopy._swap(swap) # spell changes if any bcopy._change(spell_changes) # spell destructions and record if any # first convert simple positions to groups spell_destructions = spell_destructions or tuple() destruction_groups = [[p] for p in spell_destructions] destroyed_tile_groups = bcopy._destroy(destruction_groups) total_destroyed_tile_groups.extend(destroyed_tile_groups) # execute one time only # look for matched groups matched_position_groups = bcopy._match() # destroy and record matched groups destroyed_tile_groups = bcopy._destroy(matched_position_groups) total_destroyed_tile_groups.extend(destroyed_tile_groups) bcopy._fall() if random_fill: bcopy._random_fill() return bcopy, total_destroyed_tile_groups
<SYSTEM_TASK:> Simulate swapping as in PQ. <END_TASK> <USER_TASK:> Description: def _swap(self, swap): """Simulate swapping as in PQ. swap should be a sequence of two positions with a square distance of exactly 1. Non-adjacent swaps cause a ValueError. """
if swap is None: return p1, p2 = swap square_distance = abs(p1[0] - p2[0]) + abs(p1[1] - p2[1]) if square_distance != 1: raise ValueError('Positions unexpectedly not adjacent: square' ' distance between {} and {} is' ' {}'.format(p1, p2, square_distance)) a = self._array a[p1], a[p2] = a[p2], a[p1]
<SYSTEM_TASK:> Apply the given changes to the board. <END_TASK> <USER_TASK:> Description: def _change(self, changes): """Apply the given changes to the board. changes: sequence of (position, new tile) pairs or None """
if changes is None: return for position, new_tile in changes: self._array[position] = new_tile
<SYSTEM_TASK:> Find all matches and generate a position group for each match. <END_TASK> <USER_TASK:> Description: def _match(self): """Find all matches and generate a position group for each match."""
#disable optimized matching optimized_rows = None optimized_columns = None for match in self.__match_rows(optimized_rows): #match in rows yield match for match in self.__match_rows(optimized_columns, transpose=True): #match in columns and transpose coordinates yield match